hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
ed4779750a87bffa2d7aef7043d1cc95e0cc51b2
163
/// Represents a landform the will be written to a GPX file pub struct Landform { pub name: String, pub latitude: String, pub longitude: String, }
27.166667
60
0.674847
26c5b36f47bafc392f729c57a11536382225e404
322
pub mod command; pub mod debugger; pub mod sourcemap; pub mod subroutine; // commands pub mod backtrace; pub mod breakpoint; pub mod disassemble; pub mod expression; pub mod frame; pub mod global; pub mod list; pub mod local; pub mod memory; pub mod process; pub mod run; pub mod settings; pub mod stack; pub mod thread;
15.333333
20
0.76087
3933110e1e183309753195320653a9d917f14477
1,675
#![no_std] #![no_main] #![feature(abi_avr_interrupt)] use panic_halt as _; use core::ops::Range; use core::sync::atomic::{AtomicBool, Ordering}; use arduino_hal::port::{mode, Pin}; use either::*; static REVERSED: AtomicBool = AtomicBool::new(false); fn is_reversed() -> bool { return REVERSED.load(Ordering::SeqCst); } #[avr_device::interrupt(atmega328p)] fn INT0() { let current = REVERSED.load(Ordering::SeqCst); REVERSED.store(!current, Ordering::SeqCst); } fn blink_for_range(range : Range<u16>, leds : &mut[Pin<mode::Output>]) { range.map(|i| i * 100).for_each(|ms| { let iter = if is_reversed() { Left(leds.iter_mut().rev()) } else { Right(leds.iter_mut()) }; iter.for_each(|led| { led.toggle(); arduino_hal::delay_ms(ms as u16); }) }); } #[arduino_hal::entry] fn main() -> ! { let dp = arduino_hal::Peripherals::take().unwrap(); let pins = arduino_hal::pins!(dp); // thanks to tsemczyszyn and Rahix: https://github.com/Rahix/avr-hal/issues/240 // Configure INT0 for falling edge. 0x03 would be rising edge. dp.EXINT.eicra.modify(|_, w| w.isc0().bits(0x02)); // Enable the INT0 interrupt source. dp.EXINT.eimsk.modify(|_, w| w.int0().set_bit()); let mut leds: [Pin<mode::Output>; 4] = [ pins.d3.into_output().downgrade(), pins.d4.into_output().downgrade(), pins.d5.into_output().downgrade(), pins.d6.into_output().downgrade(), ]; unsafe { avr_device::interrupt::enable() }; loop { blink_for_range(0..10, &mut leds); blink_for_range(10..0, &mut leds); } }
27.016129
83
0.605373
f78c1fa7dea99906f8914b67f6ba59a530b6afd6
5,034
use thiserror::Error; use crate::dic::lexicon::word_infos::WordInfo; use crate::dic::lexicon::Lexicon; use crate::prelude::*; /// Sudachi error #[derive(Error, Debug, Eq, PartialEq)] pub enum LexiconSetError { #[error("too large word_id {0} in dict {1}")] TooLargeWordId(u32, usize), #[error("too large dictionary_id {0}")] TooLargeDictionaryId(usize), #[error("too many user dictionaries")] TooManyDictionaries, } /// Set of Lexicons /// /// Handles multiple lexicons as one lexicon /// The first lexicon in the list must be from system dictionary pub struct LexiconSet<'a> { lexicons: Vec<Lexicon<'a>>, pos_offsets: Vec<usize>, } impl<'a> LexiconSet<'a> { /// The first 4 bits of word_id are used to indicate that from which lexicon /// the word comes, thus we can only hold 2^4 lexicons in the same time. const MAX_DICTIONARIES: usize = 16; /// Creates a LexiconSet given a lexicon /// /// This assume that given lexicon is from system dictionary pub fn new(system_lexicon: Lexicon) -> LexiconSet { LexiconSet { lexicons: vec![system_lexicon], pos_offsets: vec![0], } } /// Add a lexicon to the lexicon list /// /// pos_offset: number of pos in the grammar pub fn append( &mut self, lexicon: Lexicon<'a>, pos_offset: usize, ) -> Result<(), LexiconSetError> { if self.is_full() { return Err(LexiconSetError::TooManyDictionaries); } self.lexicons.push(lexicon); self.pos_offsets.push(pos_offset); Ok(()) } /// Returns if dictionary capacity is full pub fn is_full(&self) -> bool { self.lexicons.len() >= LexiconSet::MAX_DICTIONARIES } } impl LexiconSet<'_> { /// Returns a list of word_id and length of words that matches given input /// /// Searches user dictionary first and then system dictionary pub fn lookup(&self, input: &[u8], offset: usize) -> SudachiResult<Vec<(u32, usize)>> { let mut vs: Vec<(u32, usize)> = Vec::new(); for (did, user_lexicon) in self.lexicons.iter().enumerate().skip(1) { vs.extend( user_lexicon .lookup(input, offset)? .iter() .map(|(wid, l)| self.build_dictword_id(did, *wid).map(|dwid| (dwid, *l))) .collect::<Result<Vec<_>, _>>()?, ); } vs.extend(self.lexicons[0].lookup(input, offset)?); Ok(vs) } /// Returns word_info for given word_id pub fn get_word_info(&self, dictword_id: u32) -> SudachiResult<WordInfo> { let (dict_id, word_id) = LexiconSet::decode_dictword_id(dictword_id); let mut word_info = self.lexicons[dict_id].get_word_info(word_id)?; let pos_id = word_info.pos_id; if dict_id > 0 && pos_id as usize >= self.pos_offsets[1] { // user defined part-of-speech word_info.pos_id = (pos_id as usize - self.pos_offsets[1] + self.pos_offsets[dict_id]) as u16; } self.update_dict_id(&mut word_info.a_unit_split, dict_id)?; self.update_dict_id(&mut word_info.b_unit_split, dict_id)?; self.update_dict_id(&mut word_info.word_structure, dict_id)?; Ok(word_info) } /// Returns word_param for given word_id pub fn get_word_param(&self, dictword_id: u32) -> SudachiResult<(i16, i16, i16)> { let (dict_id, word_id) = LexiconSet::decode_dictword_id(dictword_id); self.lexicons[dict_id].get_word_param(word_id) } /// Merge dict_id and word_id into one u32 /// /// We use top 4 bits for dict_id fn build_dictword_id(&self, dict_id: usize, word_id: u32) -> Result<u32, LexiconSetError> { if word_id > 0x0FFFFFFF { return Err(LexiconSetError::TooLargeWordId(word_id, dict_id)); } if dict_id > self.lexicons.len() { return Err(LexiconSetError::TooLargeDictionaryId(dict_id)); } Ok((dict_id as u32) << 28 | word_id) } pub fn get_dictionary_id(dictword_id: u32) -> usize { (dictword_id >> 28) as usize } fn get_word_id(dictword_id: u32) -> u32 { dictword_id & 0x0FFFFFFF } fn decode_dictword_id(dictword_id: u32) -> (usize, u32) { let dict_id = LexiconSet::get_dictionary_id(dictword_id); let word_id = LexiconSet::get_word_id(dictword_id); (dict_id, word_id) } fn update_dict_id(&self, split: &mut Vec<u32>, dict_id: usize) -> SudachiResult<()> { for i in 0..split.len() { let (crr_dict_id, word_id) = LexiconSet::decode_dictword_id(split[i]); if crr_dict_id > 0 { // update if target word is not in system_dict split[i] = self.build_dictword_id(dict_id, word_id)?; } } Ok(()) } pub fn size(&self) -> u32 { self.lexicons.iter().fold(0, |acc, lex| acc + lex.size()) } }
33.785235
95
0.604887
ccdc4dd46f00a4ea594dc32a77a984f97cff2b89
3,156
//! General functions. use ffi_utils::{self, Text}; use libc::{c_char, c_void}; use std::fmt::{self, Debug, Formatter}; use std::ffi::{CStr, CString}; use std::mem; use std::ops::Deref; use ui_sys::{self, uiInitOptions}; use windows::Window; #[derive(Clone)] pub struct InitOptions; #[inline] pub fn init(_: InitOptions) -> Result<(),InitError> { unsafe { let mut init_options = uiInitOptions { Size: mem::size_of::<uiInitOptions>(), }; let err = ui_sys::uiInit(&mut init_options); if err.is_null() { ffi_utils::set_initialized(); Ok(()) } else { Err(InitError { ui_init_error: err, }) } } } #[inline] pub fn uninit() { unsafe { ffi_utils::unset_initialized(); Window::destroy_all_windows(); ui_sys::uiUninit(); } } #[inline] pub fn main() { unsafe { ui_sys::uiMain() } } #[inline] pub fn quit() { unsafe { ui_sys::uiQuit() } } pub struct InitError { ui_init_error: *const c_char, } impl Drop for InitError { fn drop(&mut self) { unsafe { ui_sys::uiFreeInitError(self.ui_init_error) } } } impl Debug for InitError { fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { (**self).fmt(f) } } impl Deref for InitError { type Target = str; fn deref(&self) -> &str { unsafe { CStr::from_ptr(self.ui_init_error).to_str().unwrap_or("") } } } #[inline] pub fn queue_main(callback: Box<FnMut()>) { unsafe { let mut data: Box<Box<FnMut()>> = Box::new(callback); ui_sys::uiQueueMain(ffi_utils::void_void_callback, &mut *data as *mut Box<FnMut()> as *mut c_void); mem::forget(data); } } #[inline] pub fn on_should_quit(callback: Box<FnMut()>) { unsafe { let mut data: Box<Box<FnMut()>> = Box::new(callback); ui_sys::uiOnShouldQuit(ffi_utils::void_void_callback, &mut *data as *mut Box<FnMut()> as *mut c_void); mem::forget(data); } } #[inline] pub fn open_file(parent: &Window) -> Option<Text> { unsafe { Text::optional(ui_sys::uiOpenFile(parent.as_ui_window())) } } #[inline] pub fn save_file(parent: &Window) -> Option<Text> { unsafe { Text::optional(ui_sys::uiSaveFile(parent.as_ui_window())) } } #[inline] pub fn msg_box(parent: &Window, title: &str, description: &str) { unsafe { let c_title = CString::new(title.as_bytes().to_vec()).unwrap(); let c_description = CString::new(description.as_bytes().to_vec()).unwrap(); ui_sys::uiMsgBox(parent.as_ui_window(), c_title.as_ptr(), c_description.as_ptr()) } } #[inline] pub fn msg_box_error(parent: &Window, title: &str, description: &str) { unsafe { let c_title = CString::new(title.as_bytes().to_vec()).unwrap(); let c_description = CString::new(description.as_bytes().to_vec()).unwrap(); ui_sys::uiMsgBoxError(parent.as_ui_window(), c_title.as_ptr(), c_description.as_ptr()) } }
23.377778
94
0.579531
09b06159af3718371b5554aab37e24ce166ccbfd
112,983
use crate::name_resolution::{DictEntry, FnKey, FnValue, TopLevelContext}; use crate::rustspec::*; use crate::util::check_vec; use crate::HacspecErrorEmitter; use im::{HashMap, HashSet}; use itertools::Itertools; use rustc_session::Session; use rustc_span::DUMMY_SP; // TODO: explain that we need typechecking inference to disambiguate method calls fn is_numeric(t: &Typ, top_ctxt: &TopLevelContext) -> bool { if (t.0).0 == Borrowing::Borrowed { return false; }; match &(t.1).0 { BaseTyp::UInt128 => true, BaseTyp::Int128 => true, BaseTyp::UInt64 => true, BaseTyp::Int64 => true, BaseTyp::UInt32 => true, BaseTyp::Int32 => true, BaseTyp::UInt16 => true, BaseTyp::Int16 => true, BaseTyp::UInt8 => true, BaseTyp::Int8 => true, BaseTyp::Usize => true, BaseTyp::Isize => true, BaseTyp::Named((name, _), None) => match top_ctxt.typ_dict.get(name) { Some((new_t1, dict_entry)) => { assert!((new_t1.0).0 == Borrowing::Consumed); match dict_entry { DictEntry::Alias => is_numeric(new_t1, top_ctxt), DictEntry::Enum => false, DictEntry::Array | DictEntry::NaturalInteger => true, } } None => match name.string.as_str() { "U8" | "U16" | "U32" | "U64" | "U128" | "I8" | "I16" | "I32" | "I64" | "I128" => { true } _ => false, }, }, BaseTyp::Array(_, _) => true, BaseTyp::Seq(t1) => is_numeric( &((Borrowing::Consumed, DUMMY_SP.into()), *t1.clone()), top_ctxt, ), _ => false, } } fn is_bool(t: &Typ, top_ctxt: &TopLevelContext) -> bool { if (t.0).0 == Borrowing::Borrowed { return false; }; match &(t.1).0 { BaseTyp::Bool => true, BaseTyp::Named((name, _), None) => match top_ctxt.typ_dict.get(name) { Some((new_t1, dict_entry)) => { assert!((new_t1.0).0 == Borrowing::Consumed); match dict_entry { DictEntry::Alias => is_numeric(new_t1, top_ctxt), DictEntry::Enum | DictEntry::Array | DictEntry::NaturalInteger => false, } } None => false, }, _ => false, } } fn is_copy(t: &BaseTyp, top_ctxt: &TopLevelContext) -> bool { match t { BaseTyp::Unit => true, BaseTyp::Bool => true, BaseTyp::UInt128 => true, BaseTyp::Int128 => true, BaseTyp::UInt64 => true, BaseTyp::Int64 => true, BaseTyp::UInt32 => true, BaseTyp::Int32 => true, BaseTyp::UInt16 => true, BaseTyp::Int16 => true, BaseTyp::UInt8 => true, BaseTyp::Int8 => true, BaseTyp::Usize => true, BaseTyp::Isize => true, BaseTyp::Seq(_) => false, BaseTyp::Str => false, BaseTyp::Array(_, _) => true, BaseTyp::Named((name, _), arg) => match top_ctxt.typ_dict.get(name) { Some((new_t1, _dict_entry)) => { debug_assert!((new_t1.0).0 == Borrowing::Consumed); is_copy(&(new_t1.1).0, top_ctxt) } None => match arg { None => match name.string.as_str() { "U8" | "U16" | "U32" | "U64" | "U128" | "I8" | "I16" | "I32" | "I64" | "I128" => true, _ => false, }, Some(_) => false, }, }, BaseTyp::Variable(_) => false, BaseTyp::Tuple(ts) => ts.iter().all(|(t, _)| is_copy(t, top_ctxt)), BaseTyp::Enum(ts, type_args) => { type_args.len() == 0 && ts.iter().all(|(_, t)| match t { None => true, Some((t, _)) => is_copy(t, top_ctxt), }) } BaseTyp::NaturalInteger(_, _, _) => true, } } fn is_array( sess: &Session, t: &Typ, top_ctxt: &TopLevelContext, span: &RustspecSpan, ) -> Result<(Option<Spanned<ArraySize>>, Spanned<BaseTyp>), ()> { match &(t.1).0 { BaseTyp::Seq(t1) => Ok((None, t1.as_ref().clone())), BaseTyp::Named(id, None) => { let name = &id.0; match top_ctxt.typ_dict.get(name) { Some((new_t, dict_entry)) => match dict_entry { DictEntry::Alias => is_array(sess, new_t, top_ctxt, span), DictEntry::Enum => { sess.span_rustspec_err( span.clone(), format!("expected an array but got type {}{}", &(t.0).0, &(t.1).0) .as_str(), ); Err(()) } DictEntry::Array => match &(new_t.1).0 { BaseTyp::Array(size, cell_t) => { Ok((Some(size.clone()), cell_t.as_ref().clone())) } _ => panic!("should not happen"), }, DictEntry::NaturalInteger => { sess.span_rustspec_err( span.clone(), format!( "expected an array but got a natural integer type: {}{}", &(t.0).0, &(t.1).0 ) .as_str(), ); Err(()) } }, None => { sess.span_rustspec_err( span.clone(), format!("expected an array but got type {}{}", &(t.0).0, &(t.1).0).as_str(), ); Err(()) } } } BaseTyp::Named(_, Some(_)) => Err(()), BaseTyp::Array(len, cell_t) => Ok((Some(len.clone()), cell_t.as_ref().clone())), _ => { sess.span_rustspec_err( span.clone(), format!("expected an array but got type {}{}", &(t.0).0, &(t.1).0).as_str(), ); Err(()) } } } fn is_index(t: &BaseTyp, top_ctxt: &TopLevelContext) -> bool { match t { BaseTyp::UInt128 => true, BaseTyp::Int128 => true, BaseTyp::UInt64 => true, BaseTyp::Int64 => true, BaseTyp::UInt32 => true, BaseTyp::Int32 => true, BaseTyp::UInt16 => true, BaseTyp::Int16 => true, BaseTyp::UInt8 => true, BaseTyp::Int8 => true, BaseTyp::Usize => true, BaseTyp::Isize => true, BaseTyp::Named((name, _), None) => match top_ctxt.typ_dict.get(name) { Some((((Borrowing::Consumed, _), (new_ty, _)), DictEntry::Alias)) => { is_index(new_ty, top_ctxt) } _ => false, }, _ => false, } } fn is_castable_integer(t: &BaseTyp, top_ctxt: &TopLevelContext) -> bool { let t = dealias_type(t.clone(), top_ctxt); match t { BaseTyp::UInt128 => true, BaseTyp::Int128 => true, BaseTyp::UInt64 => true, BaseTyp::Int64 => true, BaseTyp::UInt32 => true, BaseTyp::Int32 => true, BaseTyp::UInt16 => true, BaseTyp::Int16 => true, BaseTyp::UInt8 => true, BaseTyp::Int8 => true, BaseTyp::Usize => true, BaseTyp::Isize => true, _ => false, } } fn is_safe_casting(t1: &BaseTyp, t2: &BaseTyp) -> bool { match (t2, t1) { (BaseTyp::UInt128, BaseTyp::UInt64) | (BaseTyp::UInt128, BaseTyp::UInt32) | (BaseTyp::UInt128, BaseTyp::UInt16) | (BaseTyp::UInt128, BaseTyp::Usize) | (BaseTyp::UInt128, BaseTyp::UInt8) => true, (BaseTyp::UInt64, BaseTyp::UInt32) | (BaseTyp::UInt64, BaseTyp::UInt16) | (BaseTyp::UInt64, BaseTyp::Usize) | (BaseTyp::UInt64, BaseTyp::UInt8) => true, (BaseTyp::UInt32, BaseTyp::UInt16) | (BaseTyp::UInt32, BaseTyp::Usize) | (BaseTyp::UInt32, BaseTyp::UInt8) => true, (BaseTyp::UInt16, BaseTyp::UInt8) => true, (BaseTyp::Usize, BaseTyp::UInt16) | (BaseTyp::Usize, BaseTyp::UInt8) => true, (BaseTyp::Int128, BaseTyp::Int64) | (BaseTyp::Int128, BaseTyp::Int32) | (BaseTyp::Int128, BaseTyp::Int16) | (BaseTyp::Int128, BaseTyp::Isize) | (BaseTyp::Int128, BaseTyp::Int8) => true, (BaseTyp::Int64, BaseTyp::Int32) | (BaseTyp::Int64, BaseTyp::Int16) | (BaseTyp::Int64, BaseTyp::Isize) | (BaseTyp::Int64, BaseTyp::Int8) => true, (BaseTyp::Int32, BaseTyp::Int16) | (BaseTyp::Int32, BaseTyp::Isize) | (BaseTyp::Int32, BaseTyp::Int8) => true, (BaseTyp::Int16, BaseTyp::Int8) => true, (BaseTyp::Isize, BaseTyp::UInt16) | (BaseTyp::Isize, BaseTyp::UInt8) => true, _ => false, } } type TypeVarCtx = HashMap<TypVar, BaseTyp>; // This function returns Err(_) if there are borrowing problems during unification, // Ok(None) if unification failed because of incompatible types, and // Ok(Some(_)) if unification succeeded fn unify_types( sess: &Session, t1: &Typ, t2: &Typ, typ_ctx: &TypeVarCtx, top_ctx: &TopLevelContext, ) -> TypecheckingResult<Option<TypeVarCtx>> { // We first have to remove all the aliases // We don't support generic aliases for now match &(t1.1).0 { BaseTyp::Named((name1, _), None) => match top_ctx.typ_dict.get(name1) { Some(((new_t1_borrow, new_t1), DictEntry::Alias)) => { let new_new_t1_borrow = match (&(t1.0).0, &new_t1_borrow.0) { (Borrowing::Borrowed, Borrowing::Borrowed) => { sess.span_rustspec_err( (t1.0).1.clone(), "double borrowing is forbidden in Hacspec!", ); return Err(()); } (Borrowing::Consumed, Borrowing::Borrowed) | (Borrowing::Borrowed, Borrowing::Consumed) => { (Borrowing::Borrowed, (t1.0).1.clone()) } _ => (Borrowing::Consumed, (t1.0).1.clone()), }; return unify_types( sess, &(new_new_t1_borrow, new_t1.clone()), t2, typ_ctx, top_ctx, ); } _ => (), }, _ => (), } //Same thing for t2 match &(t2.1).0 { BaseTyp::Named((name2, _), None) => match top_ctx.typ_dict.get(name2) { Some(((new_t2_borrow, new_t2), DictEntry::Alias)) => { let new_new_t2_borrow = match (&(t2.0).0, &new_t2_borrow.0) { (Borrowing::Borrowed, Borrowing::Borrowed) => { sess.span_rustspec_err( (t2.0).1.clone(), "double borrowing is forbidden in Hacspec!", ); return Err(()); } (Borrowing::Consumed, Borrowing::Borrowed) | (Borrowing::Borrowed, Borrowing::Consumed) => { (Borrowing::Borrowed, (t2.0).1.clone()) } _ => (Borrowing::Consumed, (t2.0).1.clone()), }; return unify_types( sess, t1, &(new_new_t2_borrow, new_t2.clone()), typ_ctx, top_ctx, ); } _ => (), }, _ => (), } match (&(t1.0).0, &(t2.0).0) { (Borrowing::Consumed, Borrowing::Consumed) | (Borrowing::Borrowed, Borrowing::Borrowed) => { match (&(t1.1).0, &(t2.1).0) { (BaseTyp::Unit, BaseTyp::Unit) => Ok(Some(typ_ctx.clone())), (BaseTyp::Bool, BaseTyp::Bool) => Ok(Some(typ_ctx.clone())), (BaseTyp::UInt128, BaseTyp::UInt128) => Ok(Some(typ_ctx.clone())), (BaseTyp::Int128, BaseTyp::Int128) => Ok(Some(typ_ctx.clone())), (BaseTyp::UInt64, BaseTyp::UInt64) => Ok(Some(typ_ctx.clone())), (BaseTyp::Int64, BaseTyp::Int64) => Ok(Some(typ_ctx.clone())), (BaseTyp::UInt32, BaseTyp::UInt32) => Ok(Some(typ_ctx.clone())), (BaseTyp::Int32, BaseTyp::Int32) => Ok(Some(typ_ctx.clone())), (BaseTyp::UInt16, BaseTyp::UInt16) => Ok(Some(typ_ctx.clone())), (BaseTyp::Int16, BaseTyp::Int16) => Ok(Some(typ_ctx.clone())), (BaseTyp::UInt8, BaseTyp::UInt8) => Ok(Some(typ_ctx.clone())), (BaseTyp::Int8, BaseTyp::Int8) => Ok(Some(typ_ctx.clone())), (BaseTyp::Usize, BaseTyp::Usize) => Ok(Some(typ_ctx.clone())), (BaseTyp::Isize, BaseTyp::Isize) => Ok(Some(typ_ctx.clone())), (BaseTyp::Seq(tc1), BaseTyp::Seq(tc2)) => unify_types( sess, &(((Borrowing::Consumed, (t1.1).1)), *tc1.clone()), &(((Borrowing::Consumed, (t2.1).1)), *tc2.clone()), typ_ctx, top_ctx, ), (BaseTyp::Named(name1, args1), BaseTyp::Named(name2, args2)) => { let (name1, name2) = match (&name1.0, &name2.0) { ( TopLevelIdent { string: name1, .. }, TopLevelIdent { string: name2, .. }, ) => (name1.clone(), name2.clone()), }; if name1 == name2 { match (args1, args2) { (None, None) => Ok(Some(typ_ctx.clone())), (Some(args1), Some(args2)) => { if args1.len() == args2.len() { args1.iter().zip(args2.iter()).fold( Ok(Some(typ_ctx.clone())), |typ_ctx, (arg1, arg2)| match typ_ctx? { None => Ok(None), Some(typ_ctx) => unify_types( sess, &(((Borrowing::Consumed, arg1.1)), arg1.clone()), &(((Borrowing::Consumed, arg2.1)), arg2.clone()), &typ_ctx, top_ctx, ), }, ) } else { Ok(None) } } _ => Ok(None), } } else { Ok(None) } } (BaseTyp::Tuple(ts1), BaseTyp::Tuple(ts2)) => { if ts1.len() == ts2.len() { ts1.iter().zip(ts2.iter()).fold( Ok(Some(typ_ctx.clone())), |typ_ctx, (tc1, tc2)| { typ_ctx?.map_or(Ok(None), |typ_ctx| { unify_types( sess, &(((Borrowing::Consumed, (t1.1).1)), tc1.clone()), &(((Borrowing::Consumed, (t2.1).1)), tc2.clone()), &typ_ctx, top_ctx, ) }) }, ) } else { Ok(None) } } (BaseTyp::Variable(_), BaseTyp::Variable(_)) => Ok(None), (BaseTyp::Variable(id1), bt2) => Ok(Some(typ_ctx.update(id1.clone(), bt2.clone()))), (bt1, BaseTyp::Variable(id2)) => Ok(Some(typ_ctx.update(id2.clone(), bt1.clone()))), _ => Ok(None), } } // We don't need to unify the enum types since they're already dealt // with by the Named case (nominal typing) _ => Ok(None), } } fn unify_types_default_error_message( sess: &Session, t1: &Typ, t2: &Typ, typ_ctx: &TypeVarCtx, top_ctx: &TopLevelContext, ) -> TypecheckingResult<TypeVarCtx> { match unify_types(sess, t1, t2, typ_ctx, top_ctx) { Err(err) => Err(err), Ok(Some(x)) => Ok(x), Ok(None) => { sess.span_rustspec_err( (t1.1).1.clone(), format!( "error while unifying {}{} and {}{}", (t1.0).0, (t1.1).0, (t2.0).0, (t2.1).0 ) .as_str(), ); Err(()) } } } fn bind_variable_type( sess: &Session, ty: &Spanned<BaseTyp>, typ_ctx: &TypeVarCtx, ) -> TypecheckingResult<BaseTyp> { match &ty.0 { BaseTyp::Variable(id) => match typ_ctx.get(&id) { None => { sess.span_rustspec_err( ty.1.clone(), format!("type {} cannot be unified, Hacspec does not handle that kind of parametricity", ty.0).as_str(), ); Err(()) } Some(new_ty) => Ok(new_ty.clone()), }, BaseTyp::Seq(arg_ty) => Ok(BaseTyp::Seq(Box::new(( bind_variable_type(sess, arg_ty.as_ref(), typ_ctx)?, arg_ty.as_ref().1.clone(), )))), BaseTyp::Named(name, args) => Ok(BaseTyp::Named( name.clone(), match args .as_ref() .map::<Result<_, ()>, _>(|args: &Vec<Spanned<BaseTyp>>| { check_vec( args.iter() .map(|arg| { let new_ty: BaseTyp = bind_variable_type(sess, arg, typ_ctx)?; Ok((new_ty, arg.1.clone())) }) .collect(), ) }) { None => None, Some(Ok(x)) => Some(x), Some(Err(_)) => return Err(()), }, )), BaseTyp::Tuple(args) => Ok(BaseTyp::Tuple(check_vec( args.iter() .map(|(arg, span)| { Ok(( bind_variable_type(sess, &(arg.clone(), ty.1.clone()), typ_ctx)?, span.clone(), )) }) .collect(), )?)), _ => Ok(ty.0.clone()), } } type VarContext = HashMap<usize, (Typ, String)>; fn sig_args(sig: &FnValue) -> Vec<Typ> { match sig { FnValue::Local(sig) => sig.args.clone().into_iter().map(|(_, (x, _))| x).collect(), FnValue::External(sig) => sig.args.clone(), FnValue::ExternalNotInHacspec(_) => panic!("should not happen"), } } fn sig_ret(sig: &FnValue) -> BaseTyp { match sig { FnValue::Local(sig) => sig.ret.0.clone(), FnValue::External(sig) => sig.ret.clone(), FnValue::ExternalNotInHacspec(_) => panic!("should not happen"), } } fn find_func( sess: &Session, key1: &FnKey, top_level_context: &TopLevelContext, span: &RustspecSpan, ) -> TypecheckingResult<(FnValue, TypeVarCtx)> { let key1 = &match key1 { FnKey::Independent(_) => key1.clone(), FnKey::Impl(t1, n1) => FnKey::Impl(dealias_type(t1.clone(), top_level_context), n1.clone()), }; let candidates = top_level_context.functions.clone(); let mut has_err = false; let candidates: Vec<_> = candidates .iter() .filter_map(|(key2, sig)| match (key1, key2) { (FnKey::Independent(n1), FnKey::Independent(n2)) => match (n1, n2) { (TopLevelIdent { string: n1, .. }, TopLevelIdent { string: n2, .. }) => { if n1 == n2 { Some((HashMap::new(), sig)) } else { None } } }, (FnKey::Impl(t1, n1), FnKey::Impl(t2, n2)) => { let unification: TypecheckingResult<Option<TypeVarCtx>> = unify_types( sess, &( (Borrowing::Consumed, span.clone()), (t1.clone(), span.clone()), ), &( (Borrowing::Consumed, span.clone()), (dealias_type(t2.clone(), top_level_context), span.clone()), ), &HashMap::new(), top_level_context, ); match unification { Ok(Some(new_typ_ctx)) => match (n1, n2) { (TopLevelIdent { string: n1, .. }, TopLevelIdent { string: n2, .. }) => { if n1 == n2 { Some((new_typ_ctx, sig)) } else { None } } }, Ok(None) => None, Err(_) => { has_err = true; None } } } _ => None, }) .collect(); if has_err { return Err(()); } if candidates.len() == 0 { sess.span_rustspec_err(*span, format!("function {} cannot be found", key1).as_str()); return Err(()); } // TODO: figure out why we need this // https://github.com/hacspec/hacspec/issues/194 let candidates = if candidates.iter().all(|(_, candidate)| match candidate { FnValue::ExternalNotInHacspec(_) => true, _ => false, }) { // If all candidates are not in hacspec we return one candidates } else { // If not we discard the not in hacspec candidates and return // one in hacspec candidates .into_iter() .filter(|(_, candidate)| match candidate { FnValue::ExternalNotInHacspec(_) => false, _ => true, }) .collect() }; for (typ_ctx, sig) in candidates { return Ok((sig.clone(), typ_ctx)); } Err(()) } fn find_typ( x: &Ident, var_context: &VarContext, top_level_context: &TopLevelContext, ) -> Option<Typ> { match x { Ident::Unresolved(_) => panic!("name resolution should have already happened"), Ident::TopLevel(name) => top_level_context.consts.get(name).map(|(t, span)| { ( (Borrowing::Consumed, span.clone()), (t.clone(), span.clone()), ) }), Ident::Local(LocalIdent { name: _, id }) => var_context.get(id).map(|x| x.0.clone()), } } fn remove_var(x: &Ident, var_context: &VarContext) -> VarContext { match x { Ident::Local(LocalIdent { id, name: _ }) => var_context.without(id), _ => panic!("trying to lookup in the var context a non-local id"), } } fn add_var(x: &Ident, typ: &Typ, var_context: &VarContext) -> VarContext { match x { Ident::Local(LocalIdent { id, name }) => { var_context.update(id.clone(), (typ.clone(), name.clone())) } _ => panic!("trying to lookup in the var context a non-local id"), } } pub type TypecheckingResult<T> = Result<T, ()>; fn typecheck_expression( sess: &Session, (e, span): &Spanned<Expression>, top_level_context: &TopLevelContext, var_context: &VarContext, ) -> TypecheckingResult<(Expression, Typ, VarContext)> { match e { Expression::Tuple(args) => { let mut var_context = var_context.clone(); let new_and_typ_args = args .iter() .map(|arg| { let (new_arg, ((arg_typ_borrowing, _), arg_typ), new_var_context) = typecheck_expression(sess, arg, top_level_context, &var_context)?; var_context = new_var_context; match arg_typ_borrowing { Borrowing::Borrowed => { sess.span_rustspec_err( arg.1, "borrowed values are forbidden in Hacspec tuples", ); Err(()) } Borrowing::Consumed => Ok(((new_arg, arg.1.clone()), arg_typ)), } }) .collect(); let new_and_typ_args = check_vec(new_and_typ_args)?; let (new_args, typ_args): (Vec<_>, Vec<_>) = new_and_typ_args.into_iter().unzip(); Ok(( Expression::Tuple(new_args), ( (Borrowing::Consumed, span.clone()), (BaseTyp::Tuple(typ_args), span.clone()), ), var_context, )) } Expression::Named(id) => { let new_path = Expression::Named(id.clone()); match find_typ(&id, var_context, top_level_context) { None => { sess.span_rustspec_err( *span, format!("the variable {} is not present in the context", id).as_str(), ); Err(()) } Some(t) => { // This is where linearity kicks in if let Borrowing::Consumed = (t.0).0 { if is_copy(&(t.1).0, top_level_context) { Ok((new_path, t.clone(), var_context.clone())) } else { let new_var_context = remove_var(&id, var_context); Ok((new_path, t.clone(), new_var_context)) } } else { Ok((new_path, t.clone(), var_context.clone())) } } } } Expression::MatchWith(arg, arms) => { let (new_arg, t_arg, intermediate_var_context) = typecheck_expression(sess, arg, top_level_context, &var_context)?; let mut acc_var_context = intermediate_var_context.clone(); // First we retrieve the enum type that's being matched on as well // as diverse infos related to it let (mut t_arg_cases, t_arg_enum_name, t_arg_enum_args, enum_type_var_args) = match (t_arg.1).0.clone() { BaseTyp::Named((name, _), args) => { match top_level_context.typ_dict.get(&name) { Some(( ( (Borrowing::Consumed, _), (BaseTyp::Enum(cases, type_args_vars), _), ), DictEntry::Enum, )) => ( cases.clone(), name.clone(), args.clone(), type_args_vars.clone(), ), _ => { sess.span_rustspec_err( arg.1.clone(), format!( "expected an enum type, got {}{}", (t_arg.0).0, (t_arg.1).0 ) .as_str(), ); return Err(()); } } } _ => { sess.span_rustspec_err( arg.1.clone(), format!("expected an enum type, got {}{}", (t_arg.0).0, (t_arg.1).0) .as_str(), ); return Err(()); } }; let mut out_typ = None; // Then we typecheck each match arm let new_arms = check_vec( arms.into_iter() .map(|(arm_enum_ty, arm_case, arm_pattern, arm_exp)| { let arm_enum_ty = dealias_type(arm_enum_ty.clone(), top_level_context); // The enum name is repeated in each match arm so we // make sure it's the good one let (arm_enum_name, arm_enum_args) = match &arm_enum_ty { BaseTyp::Named((t_arm_ty_name, _), t_arm_ty_args) => { if &t_arg_enum_name != t_arm_ty_name { sess.span_rustspec_err( arm_case.1.clone(), format!( "expected {} type, got {}", t_arg_enum_name, arm_enum_ty ) .as_str(), ); return Err(()); } match top_level_context.typ_dict.get(&t_arm_ty_name) { Some((_, DictEntry::Enum)) => { (t_arm_ty_name.clone(), t_arm_ty_args.clone()) } _ => { sess.span_rustspec_err( arm_case.1.clone(), format!("expected an enum type, got {}", arm_enum_ty) .as_str(), ); return Err(()); } } } _ => { sess.span_rustspec_err( arm_case.1.clone(), format!("expected an enum type, got {}", arm_enum_ty).as_str(), ); return Err(()); } }; // Then we proceed with the typechecking, first // by checking correctness of the enum generic // arguments between those in the match arm and those // coming from the expression being matched let mut typ_var_ctx = HashMap::new(); match (&t_arg_enum_args, &arm_enum_args) { (None, None) => (), (Some(arg_args), Some(arms_args)) => { if arg_args.len() != arms_args.len() { sess.span_rustspec_err( arm_case.1.clone(), "discrepancy between the type arguments \ of the matched expression and those of the match arm", ); return Err(()); } for ((arg_arg, arm_arg), enum_type_var_arg) in arg_args .iter() .zip(arms_args) .zip(enum_type_var_args.iter()) { unify_types_default_error_message( sess, &((Borrowing::Consumed, DUMMY_SP.into()), arg_arg.clone()), &((Borrowing::Consumed, DUMMY_SP.into()), arm_arg.clone()), &HashMap::new(), top_level_context, )?; let new_typ_var_ctx = unify_types( sess, &((Borrowing::Consumed, DUMMY_SP.into()), arg_arg.clone()), &( (Borrowing::Consumed, DUMMY_SP.into()), ( BaseTyp::Variable(enum_type_var_arg.clone()), DUMMY_SP.into(), ), ), &HashMap::new(), top_level_context, )?; match new_typ_var_ctx { Some(new_typ_var_ctx) => { typ_var_ctx = typ_var_ctx.union(new_typ_var_ctx); } None => { sess.span_rustspec_err( arm_arg.1.clone(), format!( "expected {} type, got {}", arg_arg.0, arm_arg.0 ) .as_str(), ); return Err(()); } } } } _ => { sess.span_rustspec_err( arm_case.1.clone(), "discrepancy between the type arguments \ of the matched expression and those of the match arm", ); return Err(()); } }; // Then we finally proceed with typechecking the arm // expression, for that we retrieve the type of this arm's // payload let (case_index, case_typ) = match t_arg_cases.iter().enumerate().find( |(_, ((t_arg_case_name, _), _))| &arm_case.0 == t_arg_case_name, ) { Some((case_index, (_, t_arg_case_typ))) => { (case_index, t_arg_case_typ.clone()) } None => { sess.span_rustspec_err( arm_case.1.clone(), format!("enum case not found for {}", arm_enum_name.string) .as_str(), ); return Err(()); } }; let case_typ = match case_typ { Some(case_typ) => Some(( bind_variable_type(sess, &case_typ, &typ_var_ctx)?, case_typ.1.clone(), )), None => None, }; t_arg_cases.remove(case_index); // t_arg_cases stores the arms not covered by the match // yet let (new_arm_pattern, new_var_context) = match (arm_pattern, case_typ) { (None, None) => (None, HashMap::new()), (Some(arm_pattern), Some(case_typ)) => { let new_var_context = typecheck_pattern( sess, arm_pattern, &(t_arg.0.clone(), case_typ.clone()), top_level_context, )?; (Some(arm_pattern.clone()), new_var_context) } _ => { sess.span_rustspec_err( arm_case.1.clone(), format!("pattern not coherent with expected type").as_str(), ); return Err(()); } }; let (new_arm_exp, arm_typ, new_var_context) = typecheck_expression( sess, arm_exp, top_level_context, &intermediate_var_context.clone().union(new_var_context), )?; acc_var_context = acc_var_context.clone().intersection(new_var_context); match &out_typ { None => out_typ = Some(arm_typ), Some(out_typ) => { unify_types_default_error_message( sess, &arm_typ, out_typ, &HashMap::new(), top_level_context, )?; } }; Ok(( arm_enum_ty.clone(), arm_case.clone(), new_arm_pattern, (new_arm_exp, arm_exp.1.clone()), )) }) .collect(), )?; // Finally, we check whether all match arms have been included if t_arg_cases.len() > 0 { sess.span_rustspec_err( span.clone(), format!( "some cases are missing in the match: {}", t_arg_cases .into_iter() .map(|((t_case, _), _)| format!("{}", t_case)) .format(", ") ) .as_str(), ); return Err(()); } Ok(( Expression::MatchWith(Box::new((new_arg, arg.1.clone())), new_arms), out_typ.unwrap(), acc_var_context, )) } Expression::EnumInject(enum_ty, case_name, payload) => { let (enum_cases, enum_name, enum_args) = match enum_ty { BaseTyp::Named(enum_name, args) => { match top_level_context.typ_dict.get(&enum_name.0) { Some(( ((Borrowing::Consumed, _), (BaseTyp::Enum(cases, type_args), _)), DictEntry::Enum, )) => { if (args.is_none() && type_args.len() != 0) || (args.is_some() && args.as_ref().unwrap().len() != type_args.len()) { sess.span_rustspec_err( enum_name.1.clone(), format!( "wrong number of type arguments: got {:?}, expected {:?}", args, type_args ) .as_str(), ); return Err(()); } // No need to unify the type_args here (cases, enum_name, args) } _ => { sess.span_rustspec_err(enum_name.1.clone(), "enum not found"); return Err(()); } } } _ => panic!("should not happen"), }; let case_typ = match enum_cases .iter() .find(|((case_name_candidate, _), _)| case_name_candidate == &case_name.0) { Some((_, case_typ)) => case_typ, _ => { sess.span_rustspec_err( case_name.1.clone(), format!("enum case not found for {}", enum_name.0).as_str(), ); return Err(()); } }; let mut var_context = var_context.clone(); let new_payload = match (case_typ, payload) { (None, None) => None, (Some(case_typ), Some((payload, payload_span))) => { let (new_payload, payload_type, new_var_context) = typecheck_expression( sess, &(*payload.clone(), payload_span.clone()), top_level_context, &var_context, )?; var_context = new_var_context; unify_types_default_error_message( sess, &((Borrowing::Consumed, case_name.1.clone()), case_typ.clone()), &payload_type, &HashMap::new(), top_level_context, )?; Some((Box::new(new_payload), payload_span.clone())) } _ => { sess.span_rustspec_err(case_name.1.clone(), "incorrect payload"); return Err(()); } }; Ok(( Expression::EnumInject(enum_ty.clone(), case_name.clone(), new_payload), ( (Borrowing::Consumed, span.clone()), ( BaseTyp::Named(enum_name.clone(), enum_args.clone()), span.clone(), ), ), var_context, )) } Expression::InlineConditional(cond, e_t, e_f) => { let (new_cond, t_cond, var_context) = typecheck_expression(sess, cond, top_level_context, &var_context)?; unify_types_default_error_message( sess, &t_cond, &( (Borrowing::Consumed, (t_cond.0).1), (BaseTyp::Bool, (t_cond.1).1), ), &HashMap::new(), top_level_context, )?; let (new_e_t, t_e_t, var_context_true_branch) = typecheck_expression(sess, e_t, top_level_context, &var_context)?; let (new_e_f, t_e_f, var_context_false_branch) = typecheck_expression(sess, e_f, top_level_context, &var_context)?; let final_var_context = var_context .clone() .intersection(var_context_true_branch) .intersection(var_context_false_branch); unify_types_default_error_message( sess, &t_e_t, &t_e_f, &HashMap::new(), top_level_context, )?; Ok(( Expression::InlineConditional( Box::new((new_cond, cond.1.clone())), Box::new((new_e_t, e_t.1.clone())), Box::new((new_e_f, e_f.1.clone())), ), t_e_t, final_var_context, )) } Expression::Binary((op, op_span), e1, e2, _) => { let (new_e1, t1, var_context) = typecheck_expression(sess, e1, top_level_context, var_context)?; let (new_e2, t2, var_context) = typecheck_expression(sess, e2, top_level_context, &var_context)?; match op { BinOpKind::Shl | BinOpKind::Shr => match &(t2.1).0 { BaseTyp::UInt32 | BaseTyp::Usize => { if is_numeric(&t1, top_level_context) { Ok(( Expression::Binary( (op.clone(), op_span.clone()), Box::new((new_e1, e1.1.clone())), Box::new((new_e2, e2.1.clone())), Some(t1.clone()), ), t1, var_context, )) } else { sess.span_rustspec_err( e1.1.clone(), format!( "you can only shift integers, but found type {}{}", (t1.0).0, (t1.1).0 ) .as_str(), ); Err(()) } } _ => { sess.span_rustspec_err( e2.1.clone(), format!( "the shifting amount has to be an u32 or an usize, found type {}{}", (t2.0).0, (t2.1).0 ) .as_str(), ); Err(()) } }, _ => { if unify_types(sess, &t1, &t2, &HashMap::new(), top_level_context)?.is_none() { sess.span_rustspec_err( *span, format!( "wrong types of binary operators, left is {}{} while right is {}{}", (t1.0).0, (t1.1).0, (t2.0).0, (t2.1).0 ) .as_str(), ); Err(()) } else { if is_numeric(&t1, top_level_context) || (match op { BinOpKind::Eq | BinOpKind::Ne => true, _ => false, }) { Ok(( Expression::Binary( (op.clone(), op_span.clone()), Box::new((new_e1, e1.1.clone())), Box::new((new_e2, e2.1.clone())), Some(t1.clone()), ), match op { BinOpKind::Eq | BinOpKind::Lt | BinOpKind::Le | BinOpKind::Ne | BinOpKind::Ge | BinOpKind::Gt => { ((Borrowing::Consumed, (t1.0).1), (BaseTyp::Bool, (t1.1).1)) } _ => t1, }, var_context, )) } else { if is_bool(&t1, top_level_context) && (match op { BinOpKind::And | BinOpKind::Or => true, _ => false, }) { Ok(( Expression::Binary( (op.clone(), op_span.clone()), Box::new((new_e1, e1.1.clone())), Box::new((new_e2, e2.1.clone())), Some(t1.clone()), ), ((Borrowing::Consumed, (t1.0).1), (BaseTyp::Bool, (t1.1).1)), var_context, )) } else { sess.span_rustspec_err( span.clone(), format!( "operation not available for type {}{}", (t1.0).0, (t1.1).0 ) .as_str(), ); Err(()) } } } } } } Expression::Unary(op, e1, _) => { let (new_e1, e1_typ, new_var_context) = typecheck_expression(sess, e1, top_level_context, var_context)?; Ok(( Expression::Unary( op.clone(), Box::new((new_e1, e1.1.clone())), Some(e1_typ.clone()), ), e1_typ, new_var_context, )) } Expression::Lit(lit) => match lit { Literal::Unit => Ok(( e.clone(), ( (Borrowing::Consumed, span.clone()), (BaseTyp::Unit, span.clone()), ), var_context.clone(), )), Literal::Bool(_) => Ok(( e.clone(), ( (Borrowing::Consumed, span.clone()), (BaseTyp::Bool, span.clone()), ), var_context.clone(), )), Literal::Int128(_) => Ok(( e.clone(), ( (Borrowing::Consumed, span.clone()), (BaseTyp::Int128, span.clone()), ), var_context.clone(), )), Literal::UInt128(_) => Ok(( e.clone(), ( (Borrowing::Consumed, span.clone()), (BaseTyp::UInt128, span.clone()), ), var_context.clone(), )), Literal::Int64(_) => Ok(( e.clone(), ( (Borrowing::Consumed, span.clone()), (BaseTyp::Int64, span.clone()), ), var_context.clone(), )), Literal::UInt64(_) => Ok(( e.clone(), ( (Borrowing::Consumed, span.clone()), (BaseTyp::UInt64, span.clone()), ), var_context.clone(), )), Literal::Int32(_) => Ok(( e.clone(), ( (Borrowing::Consumed, span.clone()), (BaseTyp::Int32, span.clone()), ), var_context.clone(), )), Literal::UInt32(_) => Ok(( e.clone(), ( (Borrowing::Consumed, span.clone()), (BaseTyp::UInt32, span.clone()), ), var_context.clone(), )), Literal::Int16(_) => Ok(( e.clone(), ( (Borrowing::Consumed, span.clone()), (BaseTyp::Int16, span.clone()), ), var_context.clone(), )), Literal::UInt16(_) => Ok(( e.clone(), ( (Borrowing::Consumed, span.clone()), (BaseTyp::UInt16, span.clone()), ), var_context.clone(), )), Literal::Int8(_) => Ok(( e.clone(), ( (Borrowing::Consumed, span.clone()), (BaseTyp::Int8, span.clone()), ), var_context.clone(), )), Literal::UInt8(_) => Ok(( e.clone(), ( (Borrowing::Consumed, span.clone()), (BaseTyp::UInt8, span.clone()), ), var_context.clone(), )), Literal::Usize(_) => Ok(( e.clone(), ( (Borrowing::Consumed, span.clone()), (BaseTyp::Usize, span.clone()), ), var_context.clone(), )), Literal::Isize(_) => Ok(( e.clone(), ( (Borrowing::Consumed, span.clone()), (BaseTyp::Isize, span.clone()), ), var_context.clone(), )), Literal::Str(_) => Ok(( e.clone(), ( (Borrowing::Consumed, span.clone()), (BaseTyp::Str, span.clone()), ), var_context.clone(), )), }, Expression::NewArray(array_type, _, elements) => { match array_type { Some(array_type) => { let (array_len, (cell_type, cell_type_span)) = is_array( sess, &( (Borrowing::Consumed, array_type.1.clone()), ( BaseTyp::Named(array_type.clone(), None), array_type.1.clone(), ), ), top_level_context, &array_type.1, )?; let array_len = match array_len { Some(len) => len, None => { sess.span_rustspec_err( array_type.1.clone(), "type should be an array but is Seq", ); return Err(()); } }; match &array_len.0 { ArraySize::Integer(array_len) => { if elements.len() != *array_len { sess.span_rustspec_err( span.clone(), format!( "array {} initializer expected {} elements but got {}", array_type.0, array_len, elements.len() ) .as_str(), ) } } ArraySize::Ident(_) => (), // we trust Rust typechecking for this case, // in order to avoid redoing here a const computation engine }; let mut var_context = var_context.clone(); let new_elements = check_vec( elements .iter() .map(|element| { let (new_element, element_typ, new_var_context) = typecheck_expression( sess, element, top_level_context, &var_context, )?; var_context = new_var_context; match unify_types( sess, &( (Borrowing::Consumed, cell_type_span.clone()), (cell_type.clone(), cell_type_span.clone()), ), &element_typ, &HashMap::new(), top_level_context, )? { None => { sess.span_rustspec_err( element.1.clone(), format!( "expected type {}, got type {}{}", &cell_type, &(element_typ.0).0, &(element_typ.1).0 ) .as_str(), ); Err(()) } Some(_) => { // Here we can drop the unified variables because there should not be any // unified variables (no generic functions involved) Ok((new_element, element.1.clone())) } } }) .collect(), )?; let new_array_typ = ( (Borrowing::Consumed, array_type.1.clone()), ( BaseTyp::Named(array_type.clone(), None), array_type.1.clone(), ), ); Ok(( Expression::NewArray( Some(array_type.clone()), Some(cell_type), new_elements, ), new_array_typ, var_context, )) } None => { let mut cell_type: Option<(BaseTyp, RustspecSpan)> = None; let mut var_context = var_context.clone(); let new_elements = check_vec( elements .iter() .map(|element| { let (new_element, element_typ, new_var_context) = typecheck_expression( sess, element, top_level_context, &var_context, )?; var_context = new_var_context; match &cell_type { Some((cell_type, cell_type_span)) => { match unify_types( sess, &( (Borrowing::Consumed, cell_type_span.clone()), (cell_type.clone(), cell_type_span.clone()), ), &element_typ, &HashMap::new(), top_level_context, )? { None => { sess.span_rustspec_err( element.1.clone(), format!( "expected type {}, got type {}{}", &cell_type, &(element_typ.0).0, &(element_typ.1).0 ) .as_str(), ); Err(()) } Some(_) => { // Here we can drop the unified variables because there should not be any // unified variables (no generic functions involved) Ok((new_element, element.1.clone())) } } } None => match element_typ.0 .0 { Borrowing::Consumed => { cell_type = Some(element_typ.1); Ok(element.clone()) } Borrowing::Borrowed => { sess.span_rustspec_err( (element_typ.0).1, "cannot insert references in a Seq", ); Err(()) } }, } }) .collect(), )?; let cell_type = match cell_type { Some(x) => x, None => { sess.span_rustspec_err( span.clone(), "use Seq::new() to create an empty sequence instead", ); return Err(()); } }; Ok(( Expression::NewArray(None, Some(cell_type.0.clone()), new_elements), ( (Borrowing::Consumed, span.clone()), (BaseTyp::Seq(Box::new(cell_type)), span.clone()), ), var_context, )) } } } Expression::ArrayIndex((x, x_span), e2, _) => { let t1 = match find_typ(&x, var_context, top_level_context) { None => { sess.span_rustspec_err( *x_span, format!("the variable {} is unknown", x).as_str(), ); return Err(()); } Some(t) => t, }; let (new_e2, t2, var_context) = typecheck_expression(sess, e2, top_level_context, &var_context)?; let (_, (cell_t, cell_t_span)) = is_array(sess, &t1, top_level_context, x_span)?; // We ignore t1.0 because we can read from both consumed and borrowed array types if let Borrowing::Borrowed = (t2.0).0 { sess.span_rustspec_err(e2.1, "cannot index array with a borrowed type"); return Err(()); } if is_index(&(t2.1).0, top_level_context) { Ok(( Expression::ArrayIndex( (x.clone(), x_span.clone()), Box::new((new_e2, e2.1.clone())), Some(t1.clone()), ), ( (Borrowing::Consumed, (t1.0).1), (cell_t.clone(), cell_t_span.clone()), ), var_context, )) } else { sess.span_rustspec_err( e2.1, format!( "expected a public integer to index array but got type {}{}", (t2.0).0, (t2.1).0 ) .as_str(), ); Err(()) } } Expression::FuncCall(prefix, name, args) => { let (f_sig, typ_var_ctx) = find_func( sess, &match prefix { None => FnKey::Independent(name.0.clone()), Some((prefix, _)) => FnKey::Impl(prefix.clone(), name.0.clone()), }, top_level_context, &name.1, )?; let mut typ_var_ctx = typ_var_ctx; if let FnValue::ExternalNotInHacspec(sig_str) = f_sig { sess.span_rustspec_err( name.1.clone(), format!( "function {}{} is known but its signature is not in Hacspec: {}", (match prefix { None => String::new(), Some(prefix) => format!("{}::", &prefix.0), }), &name.0, sig_str ) .as_str(), ); return Err(()); }; let sig_args = sig_args(&f_sig); if sig_args.len() != args.len() { sess.span_rustspec_err( *span, format!( "function {} was expecting {} arguments but got {}", &name.0, sig_args.len(), args.len() ) .as_str(), ) } let mut var_context = var_context.clone(); let mut new_args = Vec::new(); for (sig_t, ((arg, arg_span), (arg_borrow, arg_borrow_span))) in sig_args.iter().zip(args) { let (new_arg, arg_t, new_var_context) = typecheck_expression( sess, &(arg.clone(), arg_span.clone()), top_level_context, &var_context, )?; let new_arg_t = match (&(arg_t.0).0, &arg_borrow) { (Borrowing::Borrowed, Borrowing::Borrowed) => { sess.span_rustspec_err( *arg_borrow_span, "double borrowing is forbidden in Hacspec!", ); return Err(()); } (Borrowing::Consumed, Borrowing::Borrowed) => { match arg { Expression::Named(_) => { // If the argument is a variable, then the consumed // variables are actually // not consumed so we don't update the var context } _ => { // in the case of a tuple or anything else // you want to register all the moves // that have happened var_context = new_var_context; } } ((Borrowing::Borrowed, (arg_t.0).1.clone()), arg_t.1.clone()) } _ => { var_context = new_var_context; arg_t.clone() } }; new_args.push(( (new_arg, arg_span.clone()), (arg_borrow.clone(), arg_borrow_span.clone()), )); match unify_types(sess, &new_arg_t, sig_t, &typ_var_ctx, top_level_context)? { None => { sess.span_rustspec_err( *arg_span, format!( "expected type {}{} for function argument, got {}{}", (sig_t.0).0, (sig_t.1).0, (arg_t.0).0, (arg_t.1).0 ) .as_str(), ); return Err(()); } Some(new_ctx) => typ_var_ctx = new_ctx, } } let ret_ty = sig_ret(&f_sig); let ret_ty = match bind_variable_type(sess, &(ret_ty.clone(), span.clone()), &typ_var_ctx) { Ok(ret_ty) => ret_ty, Err(_) => { sess.span_rustspec_err( name.1, "A type variable cannot be unified, please provide \ the type parameters for this function", ); return Err(()); } }; Ok(( Expression::FuncCall(prefix.clone(), name.clone(), new_args), ( (Borrowing::Consumed, name.1.clone()), (ret_ty, name.1.clone()), ), var_context, )) } Expression::MethodCall(sel, _, (f, f_span), orig_args) => { let (sel, sel_borrow) = sel.as_ref(); let mut var_context = var_context.clone(); // We omit to take the new var context because it will be retypechecked later, this // is just to determine wich type the method belongs to let (_, sel_typ, _) = typecheck_expression(sess, &sel, top_level_context, &var_context)?; let (f_sig, typ_var_ctx) = find_func( sess, &FnKey::Impl((sel_typ.1).0.clone(), f.clone()), top_level_context, f_span, )?; let mut typ_var_ctx = typ_var_ctx; if let FnValue::ExternalNotInHacspec(sig_str) = f_sig { sess.span_rustspec_err( *f_span, format!( "function {}::{} is known but its signature is not in Hacspec: {}", (sel_typ.1).0, f, sig_str ) .as_str(), ); return Err(()); }; let sig_args = sig_args(&f_sig); // Because self arguments are implictly borrowed in Rust, we have to insert // this implicit borrow logic here let new_sel_borrow = match ( &sel_borrow.0, &(sel_typ.0).0, &(sig_args.first().unwrap().0).0, ) { (Borrowing::Consumed, Borrowing::Consumed, Borrowing::Borrowed) => { (Borrowing::Borrowed, sel_borrow.1.clone()) } _ => sel_borrow.clone(), }; let mut args = Vec::new(); args.push((sel.clone(), new_sel_borrow.clone())); args.extend(orig_args.clone()); let mut new_args = Vec::new(); if sig_args.len() != args.len() { sess.span_rustspec_err( *span, format!( "method {}::{} was expecting {} arguments but got {}", (sel_typ.1).0, f, sig_args.len(), args.len() ) .as_str(), ) } for (sig_t, ((arg, arg_span), (arg_borrow, arg_borrow_span))) in sig_args.iter().zip(args) { let (new_arg, arg_t, new_var_context) = typecheck_expression( sess, &(arg.clone(), arg_span.clone()), top_level_context, &var_context, )?; let new_arg_t = match (&(arg_t.0).0, &arg_borrow) { (Borrowing::Borrowed, Borrowing::Borrowed) => { sess.span_rustspec_err( arg_borrow_span, "double borrowing is forbidden in Hacspec!", ); return Err(()); } (Borrowing::Consumed, Borrowing::Borrowed) => { match arg { Expression::Named(_) => { // If the argument is a variable, then the consumed // variables are actually // not consumed so we don't update the var context } _ => { // in the case of a tuple or anything else // you want to register all the moves // that have happened var_context = new_var_context; } } ((Borrowing::Borrowed, (arg_t.0).1.clone()), arg_t.1.clone()) } _ => { var_context = new_var_context; arg_t.clone() } }; new_args.push(( (new_arg, arg_span.clone()), (arg_borrow.clone(), arg_borrow_span.clone()), )); match unify_types(sess, &new_arg_t, sig_t, &typ_var_ctx, top_level_context)? { None => { sess.span_rustspec_err( arg_span, format!( "expected type {}{}, got {}{}", (sig_t.0).0, (sig_t.1).0, (new_arg_t.0).0, (new_arg_t.1).0 ) .as_str(), ); return Err(()); } Some(new_ctx) => typ_var_ctx = new_ctx, } } let new_sel = new_args.first().unwrap().clone(); new_args = new_args[1..].to_vec(); let ret_ty = sig_ret(&f_sig); let ret_ty = bind_variable_type(sess, &(ret_ty.clone(), span.clone()), &typ_var_ctx)?; Ok(( Expression::MethodCall( Box::new(new_sel), Some(sel_typ), (f.clone(), f_span.clone()), new_args, ), ( (Borrowing::Consumed, f_span.clone()), (ret_ty, f_span.clone()), ), var_context, )) } Expression::IntegerCasting(e1, t1, _) => { let (new_e1, e1_typ, var_context) = typecheck_expression(sess, e1, top_level_context, var_context)?; if (e1_typ.0).0 == Borrowing::Borrowed { sess.span_rustspec_err(e1.1.clone(), "cannot cast borrowed expression"); return Err(()); } if !is_castable_integer(&(e1_typ.1).0, top_level_context) { sess.span_rustspec_err( e1.1.clone(), format!( "this expression of type {}{} cannot be casted", (e1_typ.0).0, (e1_typ.1).0 ) .as_str(), ); return Err(()); } if !is_castable_integer(&t1.0, top_level_context) { sess.span_rustspec_err(e1.1.clone(), "impossible to cast to this type"); return Err(()); } if !is_safe_casting(&(e1_typ.1).0, &t1.0) { sess.span_rustspec_warn( span.clone(), format!( "casting from {} to {} is not safe (i.e it can lead to overflow)", &e1_typ.1 .0, &t1.0 ) .as_str(), ); } Ok(( Expression::IntegerCasting( Box::new((new_e1, e1.1.clone())), t1.clone(), Some((e1_typ.1).0.clone()), ), ((Borrowing::Consumed, t1.1.clone()), t1.clone()), var_context, )) } } } fn typecheck_pattern( sess: &Session, (pat, pat_span): &Spanned<Pattern>, (borrowing_typ, typ): &Typ, top_ctx: &TopLevelContext, ) -> TypecheckingResult<VarContext> { match &typ.0 { BaseTyp::Named((name, _), None) => match top_ctx.typ_dict.get(name) { Some((((Borrowing::Consumed, _), (new_ty, _)), DictEntry::Alias)) => { return typecheck_pattern( sess, &(pat.clone(), pat_span.clone()), &(borrowing_typ.clone(), (new_ty.clone(), typ.1.clone())), top_ctx, ) } _ => (), }, _ => (), }; match (pat, &typ.0) { ( Pattern::SingleCaseEnum((pat_enum_name, _), inner_pat), BaseTyp::Named((typ_name, _), None), ) if pat_enum_name == typ_name => match top_ctx.typ_dict.get(typ_name) { Some(( ((Borrowing::Consumed, _), (BaseTyp::Enum(cases, _type_args), cases_span)), DictEntry::Enum, )) => { if cases.len() != 1 { sess.span_rustspec_err( *pat_span, format!( "this pattern is matching the enum {} with multiple cases", pat_enum_name ) .as_str(), ); return Err(()); } let ((case_name, _), case_typ) = cases.into_iter().next().unwrap(); if case_name.string != pat_enum_name.string { sess.span_rustspec_err( *pat_span, format!( "this pattern matches the enum {} with a single case instead of the wrapper struct {}", case_name, pat_enum_name, ) .as_str(), ); return Err(()); } match case_typ { None => { sess.span_rustspec_err( *pat_span, format!( "this pattern is matching the enum {} with one case but no payload", pat_enum_name ) .as_str(), ); return Err(()); } Some((case_typ, _)) => typecheck_pattern( sess, inner_pat, &( borrowing_typ.clone(), // This propagates the borrowing down the enum (case_typ.clone(), cases_span.clone()), ), top_ctx, ), } } _ => { sess.span_rustspec_err( *pat_span, format!( "let-binding pattern expected a {} struct but the type is {}", pat_enum_name, typ.0 ) .as_str(), ); Err(()) } }, (Pattern::SingleCaseEnum(name, _), _) => { sess.span_rustspec_err( *pat_span, format!( "let-binding pattern expected a {} struct but the type is {}", name.0, typ.0 ) .as_str(), ); Err(()) } (Pattern::Tuple(pat_args), BaseTyp::Tuple(ref typ_args)) => { if pat_args.len() != typ_args.len() { sess.span_rustspec_err(*pat_span, format!("let-binding tuple pattern has {} variables but {} were expected from the type", pat_args.len(), typ_args.len()).as_str() ) }; let acc_var = pat_args.iter().zip(typ_args.iter()).fold( Ok(HashMap::new()), |acc, (pat_arg, typ_arg)| { let acc_var = acc?; let sub_var_context = typecheck_pattern( sess, pat_arg, &(borrowing_typ.clone(), typ_arg.clone()), top_ctx, )?; Ok(acc_var.union(sub_var_context)) }, )?; Ok(acc_var) } (Pattern::Tuple(_), _) => { sess.span_rustspec_err( *pat_span, format!( "let-binding pattern expected a tuple but the type is {}", typ.0 ) .as_str(), ); Err(()) } (Pattern::WildCard, _) => Ok(HashMap::new()), (Pattern::IdentPat(x), _) => { let (id, name) = match &x { Ident::Local(LocalIdent { id, name }) => (id.clone(), name.clone()), _ => panic!("should not happen"), }; Ok(HashMap::unit( id, ((borrowing_typ.clone(), typ.clone()), name), )) } } } fn var_set_to_tuple(vars: &VarSet, span: &RustspecSpan) -> Statement { Statement::ReturnExp(if vars.0.len() > 0 { Expression::Tuple( vars.0 .iter() .sorted() .map(|i| (Expression::Named(Ident::Local(i.clone())), span.clone())) .collect(), ) } else { Expression::Lit(Literal::Unit) }) } fn dealias_type(ty: BaseTyp, top_level_context: &TopLevelContext) -> BaseTyp { match &ty { BaseTyp::Named((name, _), None) => match top_level_context.typ_dict.get(name) { Some((((Borrowing::Consumed, _), (aliased_ty, _)), DictEntry::Alias)) => { dealias_type(aliased_ty.clone(), top_level_context) } _ => ty, }, _ => ty, } } // This function returns the type in the OK branch of the result return type // if there is a question mark fn typecheck_question_mark( sess: &Session, question_mark: bool, expr_typ: Typ, return_typ: &Spanned<BaseTyp>, expr_span: RustspecSpan, top_level_context: &TopLevelContext, ) -> TypecheckingResult<Typ> { let mut expr_typ = ( expr_typ.0, ( dealias_type(expr_typ.1 .0, top_level_context), expr_typ.1 .1, ), ); let return_typ = &( dealias_type(return_typ.0.clone(), top_level_context), return_typ.1.clone(), ); if question_mark { match expr_typ { ( (Borrowing::Consumed, _), (BaseTyp::Named((TopLevelIdent { string: name, .. }, _), Some(args)), _), ) if name == "Result" && args.len() == 2 => { let ok_typ = &args[0]; let err_typ = &args[1]; match return_typ { ( BaseTyp::Named( ( TopLevelIdent { string: return_name, .. }, _, ), Some(return_args), ), _, ) if return_name == "Result" && return_args.len() == 2 => { let err_typ_ret = &args[1]; match unify_types( sess, &((Borrowing::Consumed, err_typ.1.clone()), err_typ.clone()), &( (Borrowing::Consumed, err_typ_ret.1.clone()), err_typ_ret.clone(), ), &HashMap::new(), top_level_context, )? { Some(_) => { expr_typ = ((Borrowing::Consumed, ok_typ.1.clone()), ok_typ.clone()); } None => { sess.span_rustspec_err( expr_span, format!( "the type returned in case of error by this \ expression is {}, expected {}", err_typ.0, err_typ_ret.0, ) .as_str(), ); return Err(()); } } } _ => { sess.span_rustspec_err( return_typ.1, format!( "expected a result type for this \ return type because of a question mark in the function, got {}", return_typ.0, ) .as_str(), ); return Err(()); } } } _ => { sess.span_rustspec_err( expr_span, format!( "expected a result type for this \ expression ending with a question mark, got {}{}", (expr_typ.0).0, (expr_typ.1).0 ) .as_str(), ); return Err(()); } } } Ok(expr_typ) } fn typecheck_statement( sess: &Session, (s, s_span): Spanned<Statement>, top_level_context: &TopLevelContext, var_context: &VarContext, return_typ: &Spanned<BaseTyp>, ) -> TypecheckingResult<(Statement, Typ, VarContext, VarSet)> { match &s { Statement::LetBinding((pat, pat_span), typ, ref expr, question_mark) => { let (new_expr, expr_typ, new_var_context) = typecheck_expression(sess, expr, top_level_context, var_context)?; let expr_typ = typecheck_question_mark( sess, *question_mark, expr_typ, return_typ, expr.1.clone(), top_level_context, )?; let typ = match typ { None => Some ((expr_typ.clone(), expr.1.clone())), Some((inner_typ, _)) => { if unify_types(sess, inner_typ, &expr_typ, &HashMap::new(), top_level_context)? .is_none() { sess.span_rustspec_err( *pat_span, format!( "wrong type declared for variable: expected {}{}, found {}{}", (inner_typ.0).0, (inner_typ.1).0, (expr_typ.0).0, (expr_typ.1).0 ) .as_str(), ); return Err(()); } typ.clone() } }; let pat_var_context = typecheck_pattern( sess, &(pat.clone(), pat_span.clone()), &expr_typ, top_level_context, )?; Ok(( Statement::LetBinding( (pat.clone(), pat_span.clone()), typ.clone(), (new_expr, expr.1.clone()), *question_mark, ), ((Borrowing::Consumed, s_span), (BaseTyp::Unit, s_span)), new_var_context.clone().union(pat_var_context), VarSet(HashSet::new()), )) } Statement::Reassignment((x, x_span), e, question_mark) => { let (new_e, e_typ, new_var_context) = typecheck_expression(sess, &e, top_level_context, var_context)?; let e_typ = typecheck_question_mark( sess, *question_mark, e_typ, return_typ, e.1.clone(), top_level_context, )?; let x_typ = find_typ(&x, var_context, top_level_context); let x_typ = match x_typ { Some(t) => t, None => { sess.span_rustspec_err(*x_span, "trying to reassign to an inexisting variable"); return Err(()); } }; if unify_types(sess, &e_typ, &x_typ, &HashMap::new(), top_level_context)?.is_none() { sess.span_rustspec_err( e.1, format!( "variable {} has type {}{} but tried to reassign with an expression of type {}{}", x, (x_typ.0).0, (x_typ.1).0, (e_typ.0).0, (e_typ.1).0 ).as_str(), ); return Err(()); }; Ok(( Statement::Reassignment( (x.clone(), x_span.clone()), (new_e, e.1.clone()), *question_mark, ), ((Borrowing::Consumed, s_span), (BaseTyp::Unit, s_span)), add_var(&x, &x_typ, &new_var_context), VarSet(HashSet::unit(match x.clone() { Ident::Local(x) => x, _ => panic!("should not happen"), })), )) } Statement::ArrayUpdate((x, x_span), e1, e2, question_mark, _) => { let (new_e1, e1_t, var_context) = typecheck_expression(sess, &e1, top_level_context, var_context)?; let (new_e2, e2_t, var_context) = typecheck_expression(sess, &e2, top_level_context, &var_context)?; let e2_t = typecheck_question_mark( sess, *question_mark, e2_t, return_typ, e2.1.clone(), top_level_context, )?; if !is_index(&(e1_t.1).0, top_level_context) { sess.span_rustspec_err( e1.1, format!( "index should have an integer type but instead has {}{}", (e1_t.0).0, (e1_t.1).0, ) .as_str(), ); return Err(()); }; let x_typ = find_typ(&x, &var_context, top_level_context); let x_typ = match x_typ { Some(t) => t, None => { sess.span_rustspec_err(*x_span, "trying to update an inexisting array"); return Err(()); } }; let (_, cell_t) = is_array(sess, &x_typ, top_level_context, x_span)?; if unify_types( sess, &e2_t, &((Borrowing::Consumed, x_span.clone()), cell_t.clone()), &HashMap::new(), top_level_context, )? .is_none() { sess.span_rustspec_err( e2.1, format!( "array {} has cells of type {} but tried to reassign cell with an expression of type {}{}", x, cell_t.0, (e2_t.0).0, (e2_t.1).0 ).as_str(), ); return Err(()); }; Ok(( Statement::ArrayUpdate( (x.clone(), x_span.clone()), (new_e1, e1.1.clone()), (new_e2, e2.1.clone()), *question_mark, Some(x_typ), ), ((Borrowing::Consumed, s_span), (BaseTyp::Unit, s_span)), var_context, VarSet(HashSet::unit(match x.clone() { Ident::Local(x) => x, _ => panic!("should not happen"), })), )) } Statement::ReturnExp(e) => { let (new_e, e_t, var_context) = typecheck_expression(sess, &(e.clone(), s_span), top_level_context, var_context)?; Ok(( Statement::ReturnExp(new_e), e_t, var_context, VarSet(HashSet::new()), )) } Statement::Conditional(cond, (b1, b1_span), b2, _) => { let original_var_context = var_context; let (new_cond, cond_t, var_context) = typecheck_expression(sess, &cond, top_level_context, var_context)?; unify_types_default_error_message( sess, &cond_t, &( (Borrowing::Consumed, (cond_t.0).1), (BaseTyp::Bool, (cond_t.1).1), ), &HashMap::new(), top_level_context, )?; let (new_b1, var_context_b1) = typecheck_block( sess, (b1.clone(), b1_span.clone()), top_level_context, &var_context, return_typ, )?; let (new_b2, var_context_b2) = match b2 { None => (None, var_context.clone()), Some((b2, b2_span)) => { let (new_b2, var_context_b2) = typecheck_block( sess, (b2.clone(), b2_span.clone()), top_level_context, &var_context, return_typ, )?; (Some((new_b2, *b2_span)), var_context_b2) } }; match &new_b1.return_typ { None => panic!("should not happen"), Some(((Borrowing::Consumed, _), (BaseTyp::Unit, _))) => (), Some(((b_t, _), (t, _))) => { sess.span_rustspec_err( *b1_span, format!("block has return type {}{} but was expecting unit", b_t, t) .as_str(), ); return Err(()); } }; match &new_b2 { None => (), Some((new_b2, _)) => { match &new_b2.return_typ { None => panic!("should not happen"), Some(((Borrowing::Consumed, _), (BaseTyp::Unit, _))) => (), Some(((b_t, _), (t, _))) => { sess.span_rustspec_err( *b1_span, format!( "block has return type {}{} but was expecting unit", b_t, t ) .as_str(), ); return Err(()); } }; } } let new_mutated = VarSet( match &new_b1.mutated { None => HashSet::new(), Some(m) => m.vars.0.clone(), } .union(match &new_b2 { None => HashSet::new(), Some((new_b2, _)) => match &new_b2.mutated { None => HashSet::new(), Some(m) => m.vars.0.clone(), }, }), ); let mut_tuple = var_set_to_tuple(&new_mutated, &s_span); Ok(( Statement::Conditional( (new_cond, cond.1.clone()), (new_b1, *b1_span), new_b2, Some(Box::new(MutatedInfo { vars: new_mutated.clone(), stmt: mut_tuple, })), ), ((Borrowing::Consumed, s_span), (BaseTyp::Unit, s_span)), original_var_context .clone() .intersection(var_context_b1) .intersection(var_context_b2), new_mutated, )) } Statement::ForLoop(x, e1, e2, (b, b_span)) => { let original_var_context = var_context; let (new_e1, t_e1, var_context) = typecheck_expression(sess, e1, top_level_context, var_context)?; let (new_e2, t_e2, var_context) = typecheck_expression(sess, e2, top_level_context, &var_context)?; match ( t_e1.0.clone(), dealias_type(t_e1.1 .0.clone(), top_level_context), ) { ((Borrowing::Consumed, _), BaseTyp::Usize) => (), _ => { sess.span_rustspec_err( e1.1, format!( "loop range bound should be an usize but has type {}{}", (t_e1.0).0, (t_e1.1).0 ) .as_str(), ); return Err(()); } }; match ( t_e2.0.clone(), dealias_type(t_e2.1 .0.clone(), top_level_context), ) { ((Borrowing::Consumed, _), BaseTyp::Usize) => (), _ => { sess.span_rustspec_err( e2.1, format!( "loop range bound should be an usize but has type {}{}", (t_e2.0).0, (t_e2.1).0 ) .as_str(), ); return Err(()); } }; let var_context = match x { None => var_context, Some((x, x_span)) => add_var( &x, &((Borrowing::Consumed, *x_span), (BaseTyp::Usize, *x_span)), &var_context, ), }; let (new_b, var_context) = typecheck_block( sess, (b.clone(), b_span.clone()), top_level_context, &var_context, return_typ, )?; let mutated_vars = new_b.mutated.as_ref().unwrap().as_ref().vars.clone(); // Linear variables cannot be consumed in the body of the loop, so we check that let var_diff = original_var_context.clone().difference(var_context.clone()); for (var_diff_id, (_, var_diff_name)) in var_diff { if original_var_context.contains_key(&var_diff_id) { sess.span_rustspec_err( b_span.clone(), format!("loop body consumes linear variable: {}", var_diff_name).as_str(), ); return Err(()); } } Ok(( Statement::ForLoop( x.clone(), (new_e1, e1.1.clone()), (new_e2, e2.1.clone()), (new_b, *b_span), ), ((Borrowing::Consumed, s_span), (BaseTyp::Unit, s_span)), original_var_context.clone().intersection(var_context), mutated_vars, )) } } } fn typecheck_block( sess: &Session, (b, b_span): Spanned<Block>, top_level_context: &TopLevelContext, original_var_context: &VarContext, function_return_typ: &Spanned<BaseTyp>, ) -> TypecheckingResult<(Block, VarContext)> { let mut var_context = original_var_context.clone(); let mut mutated_vars = VarSet(HashSet::new()); let mut return_typ = Some(( (Borrowing::Consumed, DUMMY_SP.into()), (BaseTyp::Unit, DUMMY_SP.into()), )); let mut new_stmts = Vec::new(); let n_stmts = b.stmts.len(); for (i, s) in b.stmts.into_iter().enumerate() { let s_span = s.1.clone(); let (new_stmt, stmt_typ, new_var_context, new_mutated_vars) = typecheck_statement( sess, s, top_level_context, &var_context, function_return_typ, )?; new_stmts.push((new_stmt, s_span)); var_context = new_var_context; mutated_vars = VarSet(mutated_vars.0.clone().union(new_mutated_vars.0)); if i + 1 < n_stmts { // Statement return types should be unit except for the last one match stmt_typ { ((Borrowing::Consumed, _), (BaseTyp::Unit, _)) => (), _ => { sess.span_rustspec_err(s_span, "statement shoud have an unit type here"); return Err(()); } } } else { return_typ = Some(stmt_typ) } } // We only keep in the list of mutated vars of this block the ones that // were defined at the beginning of the block mutated_vars .0 .retain(|mut_var| original_var_context.contains_key(&mut_var.id)); let mut_tuple = var_set_to_tuple(&mutated_vars, &b_span); let contains_question_mark = Some(new_stmts.iter().any(|s| match s { (Statement::Reassignment(_, _, true), _) | (Statement::LetBinding(_, _, _, true), _) => { true } (Statement::Conditional(_, then_b, else_b, _), _) => { then_b.0.contains_question_mark.unwrap() || (match else_b { None => false, Some(else_b) => else_b.0.contains_question_mark.unwrap(), }) } (Statement::ForLoop(_, _, _, loop_b), _) => loop_b.0.contains_question_mark.unwrap(), _ => false, })); Ok(( Block { stmts: new_stmts, mutated: Some(Box::new(MutatedInfo { vars: mutated_vars, stmt: mut_tuple, })), return_typ, contains_question_mark, }, var_context.intersection(original_var_context.clone()), )) } fn typecheck_item( sess: &Session, item: &DecoratedItem, top_level_context: &TopLevelContext, ) -> TypecheckingResult<DecoratedItem> { let i = &item.item; let i = match &i { Item::NaturalIntegerDecl(typ_ident, secrecy, canvas_size, info) => { let canvas_size_span = canvas_size.1.clone(); let (new_canvas_size, canvas_size_typ, _) = typecheck_expression(sess, canvas_size, top_level_context, &HashMap::new())?; if let None = unify_types( sess, &( (Borrowing::Consumed, canvas_size_span), (BaseTyp::Usize, canvas_size_span), ), &canvas_size_typ, &HashMap::new(), top_level_context, )? { sess.span_rustspec_err( canvas_size_span, format!( "expected type usize, got {}{}", (canvas_size_typ.0).0, (canvas_size_typ.1).0 ) .as_str(), ) }; Ok(Item::NaturalIntegerDecl( typ_ident.clone(), secrecy.clone(), (new_canvas_size, canvas_size_span), info.clone(), )) } Item::AliasDecl(_, _) | Item::ImportedCrate(_) | Item::EnumDecl(_, _) => Ok(i.clone()), Item::FnDecl((f, f_span), sig, (b, b_span)) => { let var_context = HashMap::new(); let var_context = sig .args .iter() .fold(var_context, |var_context, ((x, _x_span), (t, _t_span))| { add_var(&x, t, &var_context) }); let (new_b, _final_var_context) = typecheck_block( sess, (b.clone(), b_span.clone()), top_level_context, &var_context, &sig.ret, )?; let comp_ret_typ = &new_b.return_typ.clone().unwrap(); if let None = unify_types( sess, comp_ret_typ, &((Borrowing::Consumed, DUMMY_SP.into()), sig.ret.clone()), &HashMap::new(), top_level_context, )? { sess.span_rustspec_err( sig.ret.1.clone(), format!( "expected type {}, got {}{}", sig.ret.0, (comp_ret_typ.0).0, (comp_ret_typ.1).0, ) .as_str(), ) } let out = Item::FnDecl( (f.clone(), f_span.clone()), sig.clone(), (new_b, b_span.clone()), ); Ok(out) } Item::ArrayDecl(id, size, cell_t, index_typ) => { let (new_size, size_typ, _) = typecheck_expression(sess, size, top_level_context, &HashMap::new())?; if let None = unify_types( sess, &( (Borrowing::Consumed, size.1.clone()), (BaseTyp::Usize, size.1.clone()), ), &size_typ, &HashMap::new(), top_level_context, )? { sess.span_rustspec_err( size.1.clone(), format!( "expected type usize, got {}{}", (size_typ.0).0, (size_typ.1).0 ) .as_str(), ) } Ok(Item::ArrayDecl( id.clone(), (new_size, size.1.clone()), cell_t.clone(), index_typ.clone(), )) } Item::ConstDecl(id, typ, e) => { let (new_e, new_t, _) = typecheck_expression(sess, e, top_level_context, &HashMap::new())?; if let None = unify_types( sess, &((Borrowing::Consumed, typ.1.clone()), typ.clone()), &new_t, &HashMap::new(), top_level_context, )? { sess.span_rustspec_err( e.1.clone(), format!( "expected type {}, got type {}{}", typ.0, (new_t.0).0, (new_t.1).0 ) .as_str(), ); return Err(()); } Ok(Item::ConstDecl( id.clone(), typ.clone(), (new_e, (e.1).clone()), )) } }; match i { Ok(i) => Ok(DecoratedItem { item: i, tags: item.tags.clone(), }), Err(a) => Err(a), } } pub fn typecheck_program( sess: &Session, p: &Program, top_level_ctx: &mut TopLevelContext, ) -> TypecheckingResult<Program> { Ok(Program { items: check_vec( p.items .iter() .map(|(i, i_span)| { let new_i = typecheck_item(sess, i, &top_level_ctx)?; Ok((new_i, i_span.clone())) }) .collect(), )?, }) }
40.965555
124
0.365657
db294cf07ef36c0277a2a4fe11f13c416d75df3e
58,206
mod multiple_keys; use polars_arrow::utils::CustomIterTools; use crate::frame::hash_join::multiple_keys::{ inner_join_multiple_keys, left_join_multiple_keys, outer_join_multiple_keys, }; use crate::frame::select::Selection; use crate::prelude::*; use crate::utils::{set_partition_size, split_ca}; use crate::vector_hasher::{ create_hash_and_keys_threaded_vectorized, prepare_hashed_relation_threaded, this_partition, AsU64, StrHash, }; use crate::{datatypes::PlHashMap, POOL}; use ahash::RandomState; use hashbrown::hash_map::{Entry, RawEntryMut}; use hashbrown::HashMap; use itertools::Itertools; use rayon::prelude::*; use std::collections::HashSet; use std::fmt::Debug; use std::hash::{BuildHasher, Hash, Hasher}; use std::ops::Deref; use unsafe_unwrap::UnsafeUnwrap; #[cfg(feature = "private")] pub use self::multiple_keys::private_left_join_multiple_keys; use crate::frame::groupby::hashing::HASHMAP_INIT_SIZE; /// If Categorical types are created without a global string cache or under /// a different global string cache the mapping will be incorrect. #[cfg(feature = "dtype-categorical")] pub(crate) fn check_categorical_src(l: &Series, r: &Series) -> Result<()> { if let (Ok(l), Ok(r)) = (l.categorical(), r.categorical()) { let l = l.categorical_map.as_ref().unwrap(); let r = r.categorical_map.as_ref().unwrap(); if !l.same_src(&*r) { return Err(PolarsError::ValueError("joins on categorical dtypes can only happen if they are created under the same global string cache".into())); } } Ok(()) } macro_rules! det_hash_prone_order { ($self:expr, $other:expr) => {{ // The shortest relation will be used to create a hash table. let left_first = $self.len() > $other.len(); let a; let b; if left_first { a = $self; b = $other; } else { b = $self; a = $other; } (a, b, !left_first) }}; } #[derive(Clone, Copy, Debug, PartialEq)] pub enum JoinType { Left, Inner, Outer, #[cfg(feature = "asof_join")] AsOf, #[cfg(feature = "cross_join")] Cross, } unsafe fn get_hash_tbl_threaded_join_partitioned<T, H>( h: u64, hash_tables: &[HashMap<T, Vec<u32>, H>], len: u64, ) -> &HashMap<T, Vec<u32>, H> { let mut idx = 0; for i in 0..len { // can only be done for powers of two. // n % 2^i = n & (2^i - 1) if (h + i) & (len - 1) == 0 { idx = i as usize; } } hash_tables.get_unchecked(idx) } unsafe fn get_hash_tbl_threaded_join_mut_partitioned<T, H>( h: u64, hash_tables: &mut [HashMap<T, Vec<u32>, H>], len: u64, ) -> &mut HashMap<T, Vec<u32>, H> { let mut idx = 0; for i in 0..len { // can only be done for powers of two. // n % 2^i = n & (2^i - 1) if (h + i) & (len - 1) == 0 { idx = i as usize; } } hash_tables.get_unchecked_mut(idx) } /// Probe the build table and add tuples to the results (inner join) fn probe_inner<T, F>( probe: &[T], hash_tbls: &[PlHashMap<T, Vec<u32>>], results: &mut Vec<(u32, u32)>, local_offset: usize, n_tables: u64, swap_fn: F, ) where T: Send + Hash + Eq + Sync + Copy + AsU64, F: Fn(u32, u32) -> (u32, u32), { assert!(hash_tbls.len().is_power_of_two()); probe.iter().enumerate().for_each(|(idx_a, k)| { let idx_a = (idx_a + local_offset) as u32; // probe table that contains the hashed value let current_probe_table = unsafe { get_hash_tbl_threaded_join_partitioned(k.as_u64(), hash_tbls, n_tables) }; let value = current_probe_table.get(k); if let Some(indexes_b) = value { let tuples = indexes_b.iter().map(|&idx_b| swap_fn(idx_a, idx_b)); results.extend(tuples); } }); } pub(crate) fn create_probe_table<T, IntoSlice>(keys: Vec<IntoSlice>) -> Vec<PlHashMap<T, Vec<u32>>> where T: Send + Hash + Eq + Sync + Copy + AsU64, IntoSlice: AsRef<[T]> + Send + Sync, { let n_partitions = set_partition_size(); // We will create a hashtable in every thread. // We use the hash to partition the keys to the matching hashtable. // Every thread traverses all keys/hashes and ignores the ones that doesn't fall in that partition. POOL.install(|| { (0..n_partitions).into_par_iter().map(|partition_no| { let partition_no = partition_no as u64; let mut hash_tbl: PlHashMap<T, Vec<u32>> = PlHashMap::with_capacity(HASHMAP_INIT_SIZE); let n_partitions = n_partitions as u64; let mut offset = 0; for keys in &keys { let keys = keys.as_ref(); let len = keys.len() as u32; let mut cnt = 0; keys.iter().for_each(|k| { let idx = cnt + offset; cnt += 1; if this_partition(k.as_u64(), partition_no, n_partitions) { let entry = hash_tbl.entry(*k); match entry { Entry::Vacant(entry) => { entry.insert(vec![idx]); } Entry::Occupied(mut entry) => { let v = entry.get_mut(); v.push(idx); } } } }); offset += len; } hash_tbl }) }) .collect() } fn hash_join_tuples_inner<T, IntoSlice>( probe: Vec<IntoSlice>, build: Vec<IntoSlice>, // Because b should be the shorter relation we could need to swap to keep left left and right right. swap: bool, ) -> Vec<(u32, u32)> where IntoSlice: AsRef<[T]> + Send + Sync, T: Send + Hash + Eq + Sync + Copy + AsU64, { // NOTE: see the left join for more elaborate comments // first we hash one relation let hash_tbls = create_probe_table(build); let n_tables = hash_tbls.len() as u64; debug_assert!(n_tables.is_power_of_two()); let offsets = probe .iter() .map(|ph| ph.as_ref().len()) .scan(0, |state, val| { let out = *state; *state += val; Some(out) }) .collect::<Vec<_>>(); // next we probe the other relation // code duplication is because we want to only do the swap check once POOL.install(|| { probe .into_par_iter() .zip(offsets) .map(|(probe, offset)| { let probe = probe.as_ref(); // local reference let hash_tbls = &hash_tbls; let mut results = Vec::with_capacity(probe.len()); let local_offset = offset; // branch is to hoist swap out of the inner loop. if swap { probe_inner( probe, hash_tbls, &mut results, local_offset, n_tables, |idx_a, idx_b| (idx_b, idx_a), ) } else { probe_inner( probe, hash_tbls, &mut results, local_offset, n_tables, |idx_a, idx_b| (idx_a, idx_b), ) } results }) .flatten() .collect() }) } fn hash_join_tuples_left<T, IntoSlice>( probe: Vec<IntoSlice>, build: Vec<IntoSlice>, ) -> Vec<(u32, Option<u32>)> where IntoSlice: AsRef<[T]> + Send + Sync, T: Send + Hash + Eq + Sync + Copy + AsU64, { // first we hash one relation let hash_tbls = create_probe_table(build); // we determine the offset so that we later know which index to store in the join tuples let offsets = probe .iter() .map(|ph| ph.as_ref().len()) .scan(0, |state, val| { let out = *state; *state += val; Some(out) }) .collect::<Vec<_>>(); let n_tables = hash_tbls.len() as u64; debug_assert!(n_tables.is_power_of_two()); // next we probe the other relation POOL.install(|| { probe .into_par_iter() .zip(offsets) // probes_hashes: Vec<u64> processed by this thread // offset: offset index .map(|(probe, offset)| { // local reference let hash_tbls = &hash_tbls; let probe = probe.as_ref(); // assume the result tuples equal lenght of the no. of hashes processed by this thread. let mut results = Vec::with_capacity(probe.len()); probe.iter().enumerate().for_each(|(idx_a, k)| { let idx_a = (idx_a + offset) as u32; // probe table that contains the hashed value let current_probe_table = unsafe { get_hash_tbl_threaded_join_partitioned(k.as_u64(), hash_tbls, n_tables) }; // we already hashed, so we don't have to hash again. let value = current_probe_table.get(k); match value { // left and right matches Some(indexes_b) => { results.extend(indexes_b.iter().map(|&idx_b| (idx_a, Some(idx_b)))) } // only left values, right = null None => results.push((idx_a, None)), } }); results }) .flatten() .collect() }) } /// Probe the build table and add tuples to the results (inner join) fn probe_outer<T, F, G, H>( probe_hashes: &[Vec<(u64, T)>], hash_tbls: &mut [PlHashMap<T, Vec<u32>>], results: &mut Vec<(Option<u32>, Option<u32>)>, n_tables: u64, // Function that get index_a, index_b when there is a match and pushes to result swap_fn_match: F, // Function that get index_a when there is no match and pushes to result swap_fn_no_match: G, // Function that get index_b from the build table that did not match any in A and pushes to result swap_fn_drain: H, ) where T: Send + Hash + Eq + Sync + Copy, // idx_a, idx_b -> ... F: Fn(u32, u32) -> (Option<u32>, Option<u32>), // idx_a -> ... G: Fn(u32) -> (Option<u32>, Option<u32>), // idx_b -> ... H: Fn(u32) -> (Option<u32>, Option<u32>), { // needed for the partition shift instead of modulo to make sense assert!(n_tables.is_power_of_two()); let mut idx_a = 0; for probe_hashes in probe_hashes { for (h, key) in probe_hashes { let h = *h; // probe table that contains the hashed value let current_probe_table = unsafe { get_hash_tbl_threaded_join_mut_partitioned(h, hash_tbls, n_tables) }; let entry = current_probe_table .raw_entry_mut() .from_key_hashed_nocheck(h, key); match entry { // match and remove RawEntryMut::Occupied(occupied) => { let indexes_b = occupied.remove(); results.extend(indexes_b.iter().map(|&idx_b| swap_fn_match(idx_a, idx_b))) } // no match RawEntryMut::Vacant(_) => results.push(swap_fn_no_match(idx_a)), } idx_a += 1; } } for hash_tbl in hash_tbls { hash_tbl.iter().for_each(|(_k, indexes_b)| { // remaining joined values from the right table results.extend(indexes_b.iter().map(|&idx_b| swap_fn_drain(idx_b))) }); } } /// Hash join outer. Both left and right can have no match so Options fn hash_join_tuples_outer<T, I, J>( a: Vec<I>, b: Vec<J>, swap: bool, ) -> Vec<(Option<u32>, Option<u32>)> where I: Iterator<Item = T> + Send + TrustedLen, J: Iterator<Item = T> + Send + TrustedLen, T: Hash + Eq + Copy + Sync + Send, { // This function is partially multi-threaded. // Parts that are done in parallel: // - creation of the probe tables // - creation of the hashes // during the probe phase values are removed from the tables, that's done single threaded to // keep it lock free. let size = a.iter().map(|a| a.size_hint().0).sum::<usize>() + b.iter().map(|b| b.size_hint().0).sum::<usize>(); let mut results = Vec::with_capacity(size); // prepare hash table let mut hash_tbls = prepare_hashed_relation_threaded(b); let random_state = hash_tbls[0].hasher().clone(); // we pre hash the probing values let (probe_hashes, _) = create_hash_and_keys_threaded_vectorized(a, Some(random_state)); let n_tables = hash_tbls.len() as u64; // probe the hash table. // Note: indexes from b that are not matched will be None, Some(idx_b) // Therefore we remove the matches and the remaining will be joined from the right // branch is because we want to only do the swap check once if swap { probe_outer( &probe_hashes, &mut hash_tbls, &mut results, n_tables, |idx_a, idx_b| (Some(idx_b), Some(idx_a)), |idx_a| (None, Some(idx_a)), |idx_b| (Some(idx_b), None), ) } else { probe_outer( &probe_hashes, &mut hash_tbls, &mut results, n_tables, |idx_a, idx_b| (Some(idx_a), Some(idx_b)), |idx_a| (Some(idx_a), None), |idx_b| (None, Some(idx_b)), ) } results } pub(crate) trait HashJoin<T> { fn hash_join_inner(&self, _other: &ChunkedArray<T>) -> Vec<(u32, u32)> { unimplemented!() } fn hash_join_left(&self, _other: &ChunkedArray<T>) -> Vec<(u32, Option<u32>)> { unimplemented!() } fn hash_join_outer(&self, _other: &ChunkedArray<T>) -> Vec<(Option<u32>, Option<u32>)> { unimplemented!() } } impl HashJoin<Float32Type> for Float32Chunked { fn hash_join_inner(&self, other: &Float32Chunked) -> Vec<(u32, u32)> { let ca = self.bit_repr_small(); let other = other.bit_repr_small(); ca.hash_join_inner(&other) } fn hash_join_left(&self, other: &Float32Chunked) -> Vec<(u32, Option<u32>)> { let ca = self.bit_repr_small(); let other = other.bit_repr_small(); ca.hash_join_left(&other) } fn hash_join_outer(&self, other: &Float32Chunked) -> Vec<(Option<u32>, Option<u32>)> { let ca = self.bit_repr_small(); let other = other.bit_repr_small(); ca.hash_join_outer(&other) } } impl HashJoin<Float64Type> for Float64Chunked { fn hash_join_inner(&self, other: &Float64Chunked) -> Vec<(u32, u32)> { let ca = self.bit_repr_large(); let other = other.bit_repr_large(); ca.hash_join_inner(&other) } fn hash_join_left(&self, other: &Float64Chunked) -> Vec<(u32, Option<u32>)> { let ca = self.bit_repr_large(); let other = other.bit_repr_large(); ca.hash_join_left(&other) } fn hash_join_outer(&self, other: &Float64Chunked) -> Vec<(Option<u32>, Option<u32>)> { let ca = self.bit_repr_large(); let other = other.bit_repr_large(); ca.hash_join_outer(&other) } } impl HashJoin<CategoricalType> for CategoricalChunked { fn hash_join_inner(&self, other: &CategoricalChunked) -> Vec<(u32, u32)> { self.deref().hash_join_inner(&other.cast().unwrap()) } fn hash_join_left(&self, other: &CategoricalChunked) -> Vec<(u32, Option<u32>)> { self.deref().hash_join_left(&other.cast().unwrap()) } fn hash_join_outer(&self, other: &CategoricalChunked) -> Vec<(Option<u32>, Option<u32>)> { self.deref().hash_join_outer(&other.cast().unwrap()) } } fn num_group_join_inner<T>(left: &ChunkedArray<T>, right: &ChunkedArray<T>) -> Vec<(u32, u32)> where T: PolarsIntegerType, T::Native: Hash + Eq + Send + AsU64 + Copy, Option<T::Native>: AsU64, { let n_threads = POOL.current_num_threads(); let (a, b, swap) = det_hash_prone_order!(left, right); let splitted_a = split_ca(a, n_threads).unwrap(); let splitted_b = split_ca(b, n_threads).unwrap(); match ( left.null_count(), right.null_count(), left.chunks.len(), right.chunks.len(), ) { (0, 0, 1, 1) => { let keys_a = splitted_a .iter() .map(|ca| ca.cont_slice().unwrap()) .collect::<Vec<_>>(); let keys_b = splitted_b .iter() .map(|ca| ca.cont_slice().unwrap()) .collect::<Vec<_>>(); hash_join_tuples_inner(keys_a, keys_b, swap) } (0, 0, _, _) => { let keys_a = splitted_a .iter() .map(|ca| ca.into_no_null_iter().collect::<Vec<_>>()) .collect::<Vec<_>>(); let keys_b = splitted_b .iter() .map(|ca| ca.into_no_null_iter().collect::<Vec<_>>()) .collect::<Vec<_>>(); hash_join_tuples_inner(keys_a, keys_b, swap) } (_, _, 1, 1) => { let keys_a = splitted_a .iter() .map(|ca| { ca.downcast_iter() .map(|v| v.into_iter().map(|v| v.copied().as_u64())) .flatten() .collect::<Vec<_>>() }) .collect::<Vec<_>>(); let keys_b = splitted_b .iter() .map(|ca| { ca.downcast_iter() .map(|v| v.into_iter().map(|v| v.copied().as_u64())) .flatten() .collect::<Vec<_>>() }) .collect::<Vec<_>>(); hash_join_tuples_inner(keys_a, keys_b, swap) } _ => { let keys_a = splitted_a .iter() .map(|ca| ca.into_iter().map(|v| v.as_u64()).collect::<Vec<_>>()) .collect::<Vec<_>>(); let keys_b = splitted_b .iter() .map(|ca| ca.into_iter().map(|v| v.as_u64()).collect::<Vec<_>>()) .collect::<Vec<_>>(); hash_join_tuples_inner(keys_a, keys_b, swap) } } } fn num_group_join_left<T>( left: &ChunkedArray<T>, right: &ChunkedArray<T>, ) -> Vec<(u32, Option<u32>)> where T: PolarsIntegerType, T::Native: Hash + Eq + Send + AsU64, Option<T::Native>: AsU64, { let n_threads = POOL.current_num_threads(); let splitted_a = split_ca(left, n_threads).unwrap(); let splitted_b = split_ca(right, n_threads).unwrap(); match ( left.null_count(), right.null_count(), left.chunks.len(), right.chunks.len(), ) { (0, 0, 1, 1) => { let keys_a = splitted_a .iter() .map(|ca| ca.cont_slice().unwrap()) .collect::<Vec<_>>(); let keys_b = splitted_b .iter() .map(|ca| ca.cont_slice().unwrap()) .collect::<Vec<_>>(); hash_join_tuples_left(keys_a, keys_b) } (0, 0, _, _) => { let keys_a = splitted_a .iter() .map(|ca| ca.into_no_null_iter().collect_trusted::<Vec<_>>()) .collect::<Vec<_>>(); let keys_b = splitted_b .iter() .map(|ca| ca.into_no_null_iter().collect_trusted::<Vec<_>>()) .collect::<Vec<_>>(); hash_join_tuples_left(keys_a, keys_b) } (_, _, 1, 1) => { let keys_a = splitted_a .iter() .map(|ca| { ca.downcast_iter() .map(|v| v.into_iter().map(|v| v.copied().as_u64())) .flatten() .trust_my_length(ca.len()) .collect_trusted::<Vec<_>>() }) .collect::<Vec<_>>(); let keys_b = splitted_b .iter() .map(|ca| { ca.downcast_iter() .map(|v| v.into_iter().map(|v| v.copied().as_u64())) .flatten() .trust_my_length(ca.len()) .collect_trusted::<Vec<_>>() }) .collect::<Vec<_>>(); hash_join_tuples_left(keys_a, keys_b) } _ => { let keys_a = splitted_a .iter() .map(|ca| { ca.into_iter() .map(|v| v.as_u64()) .collect_trusted::<Vec<_>>() }) .collect::<Vec<_>>(); let keys_b = splitted_b .iter() .map(|ca| { ca.into_iter() .map(|v| v.as_u64()) .collect_trusted::<Vec<_>>() }) .collect::<Vec<_>>(); hash_join_tuples_left(keys_a, keys_b) } } } impl<T> HashJoin<T> for ChunkedArray<T> where T: PolarsIntegerType + Sync, T::Native: Eq + Hash + num::NumCast, { fn hash_join_inner(&self, other: &ChunkedArray<T>) -> Vec<(u32, u32)> { match self.dtype() { DataType::UInt64 => { // convince the compiler that we are this type. let ca: &UInt64Chunked = unsafe { &*(self as *const ChunkedArray<T> as *const ChunkedArray<UInt64Type>) }; let other: &UInt64Chunked = unsafe { &*(other as *const ChunkedArray<T> as *const ChunkedArray<UInt64Type>) }; num_group_join_inner(ca, other) } DataType::UInt32 => { // convince the compiler that we are this type. let ca: &UInt32Chunked = unsafe { &*(self as *const ChunkedArray<T> as *const ChunkedArray<UInt32Type>) }; let other: &UInt32Chunked = unsafe { &*(other as *const ChunkedArray<T> as *const ChunkedArray<UInt32Type>) }; num_group_join_inner(ca, other) } DataType::Int64 | DataType::Float64 => { let ca = self.bit_repr_large(); let other = other.bit_repr_large(); num_group_join_inner(&ca, &other) } DataType::Int32 | DataType::Float32 => { let ca = self.bit_repr_small(); let other = other.bit_repr_small(); num_group_join_inner(&ca, &other) } _ => { let ca = self.cast::<UInt32Type>().unwrap(); let other = other.cast::<UInt32Type>().unwrap(); num_group_join_inner(&ca, &other) } } } fn hash_join_left(&self, other: &ChunkedArray<T>) -> Vec<(u32, Option<u32>)> { match self.dtype() { DataType::UInt64 => { // convince the compiler that we are this type. let ca: &UInt64Chunked = unsafe { &*(self as *const ChunkedArray<T> as *const ChunkedArray<UInt64Type>) }; let other: &UInt64Chunked = unsafe { &*(other as *const ChunkedArray<T> as *const ChunkedArray<UInt64Type>) }; num_group_join_left(ca, other) } DataType::UInt32 => { // convince the compiler that we are this type. let ca: &UInt32Chunked = unsafe { &*(self as *const ChunkedArray<T> as *const ChunkedArray<UInt32Type>) }; let other: &UInt32Chunked = unsafe { &*(other as *const ChunkedArray<T> as *const ChunkedArray<UInt32Type>) }; num_group_join_left(ca, other) } DataType::Int64 | DataType::Float64 => { let ca = self.bit_repr_large(); let other = other.bit_repr_large(); num_group_join_left(&ca, &other) } DataType::Int32 | DataType::Float32 => { let ca = self.bit_repr_small(); let other = other.bit_repr_small(); num_group_join_left(&ca, &other) } _ => { let ca = self.cast::<UInt32Type>().unwrap(); let other = other.cast::<UInt32Type>().unwrap(); num_group_join_left(&ca, &other) } } } fn hash_join_outer(&self, other: &ChunkedArray<T>) -> Vec<(Option<u32>, Option<u32>)> { let (a, b, swap) = det_hash_prone_order!(self, other); let n_partitions = set_partition_size(); let splitted_a = split_ca(a, n_partitions).unwrap(); let splitted_b = split_ca(b, n_partitions).unwrap(); match (a.null_count(), b.null_count()) { (0, 0) => { let iters_a = splitted_a .iter() .map(|ca| ca.into_no_null_iter()) .collect_vec(); let iters_b = splitted_b .iter() .map(|ca| ca.into_no_null_iter()) .collect_vec(); hash_join_tuples_outer(iters_a, iters_b, swap) } _ => { let iters_a = splitted_a.iter().map(|ca| ca.into_iter()).collect_vec(); let iters_b = splitted_b.iter().map(|ca| ca.into_iter()).collect_vec(); hash_join_tuples_outer(iters_a, iters_b, swap) } } } } impl HashJoin<BooleanType> for BooleanChunked { fn hash_join_inner(&self, other: &BooleanChunked) -> Vec<(u32, u32)> { let ca = self.cast::<UInt32Type>().unwrap(); let other = other.cast::<UInt32Type>().unwrap(); ca.hash_join_inner(&other) } fn hash_join_left(&self, other: &BooleanChunked) -> Vec<(u32, Option<u32>)> { let ca = self.cast::<UInt32Type>().unwrap(); let other = other.cast::<UInt32Type>().unwrap(); ca.hash_join_left(&other) } fn hash_join_outer(&self, other: &BooleanChunked) -> Vec<(Option<u32>, Option<u32>)> { let (a, b, swap) = det_hash_prone_order!(self, other); let n_partitions = set_partition_size(); let splitted_a = split_ca(a, n_partitions).unwrap(); let splitted_b = split_ca(b, n_partitions).unwrap(); match (a.null_count(), b.null_count()) { (0, 0) => { let iters_a = splitted_a .iter() .map(|ca| ca.into_no_null_iter()) .collect_vec(); let iters_b = splitted_b .iter() .map(|ca| ca.into_no_null_iter()) .collect_vec(); hash_join_tuples_outer(iters_a, iters_b, swap) } _ => { let iters_a = splitted_a.iter().map(|ca| ca.into_iter()).collect_vec(); let iters_b = splitted_b.iter().map(|ca| ca.into_iter()).collect_vec(); hash_join_tuples_outer(iters_a, iters_b, swap) } } } } fn prepare_strs<'a>(been_split: &'a [Utf8Chunked], hb: &RandomState) -> Vec<Vec<StrHash<'a>>> { POOL.install(|| { been_split .par_iter() .map(|ca| { ca.into_iter() .map(|opt_s| { let mut state = hb.build_hasher(); opt_s.hash(&mut state); let hash = state.finish(); StrHash::new(opt_s, hash) }) .collect::<Vec<_>>() }) .collect() }) } impl HashJoin<Utf8Type> for Utf8Chunked { fn hash_join_inner(&self, other: &Utf8Chunked) -> Vec<(u32, u32)> { let n_threads = POOL.current_num_threads(); let (a, b, swap) = det_hash_prone_order!(self, other); let hb = RandomState::default(); let splitted_a = split_ca(a, n_threads).unwrap(); let splitted_b = split_ca(b, n_threads).unwrap(); let str_hashes_a = prepare_strs(&splitted_a, &hb); let str_hashes_b = prepare_strs(&splitted_b, &hb); hash_join_tuples_inner(str_hashes_a, str_hashes_b, swap) } fn hash_join_left(&self, other: &Utf8Chunked) -> Vec<(u32, Option<u32>)> { let n_threads = POOL.current_num_threads(); let hb = RandomState::default(); let splitted_a = split_ca(self, n_threads).unwrap(); let splitted_b = split_ca(other, n_threads).unwrap(); let str_hashes_a = prepare_strs(&splitted_a, &hb); let str_hashes_b = prepare_strs(&splitted_b, &hb); hash_join_tuples_left(str_hashes_a, str_hashes_b) } fn hash_join_outer(&self, other: &Utf8Chunked) -> Vec<(Option<u32>, Option<u32>)> { let (a, b, swap) = det_hash_prone_order!(self, other); let n_partitions = set_partition_size(); let splitted_a = split_ca(a, n_partitions).unwrap(); let splitted_b = split_ca(b, n_partitions).unwrap(); match (a.null_count(), b.null_count()) { (0, 0) => { let iters_a = splitted_a .iter() .map(|ca| ca.into_no_null_iter()) .collect_vec(); let iters_b = splitted_b .iter() .map(|ca| ca.into_no_null_iter()) .collect_vec(); hash_join_tuples_outer(iters_a, iters_b, swap) } _ => { let iters_a = splitted_a .iter() .map(|ca| ca.into_iter()) .collect::<Vec<_>>(); let iters_b = splitted_b .iter() .map(|ca| ca.into_iter()) .collect::<Vec<_>>(); hash_join_tuples_outer(iters_a, iters_b, swap) } } } } pub trait ZipOuterJoinColumn { fn zip_outer_join_column( &self, _right_column: &Series, _opt_join_tuples: &[(Option<u32>, Option<u32>)], ) -> Series { unimplemented!() } } impl<T> ZipOuterJoinColumn for ChunkedArray<T> where T: PolarsIntegerType, ChunkedArray<T>: IntoSeries, { fn zip_outer_join_column( &self, right_column: &Series, opt_join_tuples: &[(Option<u32>, Option<u32>)], ) -> Series { let right_ca = self.unpack_series_matching_type(right_column).unwrap(); let left_rand_access = self.take_rand(); let right_rand_access = right_ca.take_rand(); opt_join_tuples .iter() .map(|(opt_left_idx, opt_right_idx)| { if let Some(left_idx) = opt_left_idx { unsafe { left_rand_access.get_unchecked(*left_idx as usize) } } else { unsafe { let right_idx = opt_right_idx.unsafe_unwrap(); right_rand_access.get_unchecked(right_idx as usize) } } }) .collect_trusted::<ChunkedArray<T>>() .into_series() } } macro_rules! impl_zip_outer_join { ($chunkedtype:ident) => { impl ZipOuterJoinColumn for $chunkedtype { fn zip_outer_join_column( &self, right_column: &Series, opt_join_tuples: &[(Option<u32>, Option<u32>)], ) -> Series { let right_ca = self.unpack_series_matching_type(right_column).unwrap(); let left_rand_access = self.take_rand(); let right_rand_access = right_ca.take_rand(); opt_join_tuples .iter() .map(|(opt_left_idx, opt_right_idx)| { if let Some(left_idx) = opt_left_idx { unsafe { left_rand_access.get_unchecked(*left_idx as usize) } } else { unsafe { let right_idx = opt_right_idx.unsafe_unwrap(); right_rand_access.get_unchecked(right_idx as usize) } } }) .collect::<$chunkedtype>() .into_series() } } }; } impl_zip_outer_join!(BooleanChunked); impl_zip_outer_join!(Utf8Chunked); impl ZipOuterJoinColumn for Float32Chunked { fn zip_outer_join_column( &self, right_column: &Series, opt_join_tuples: &[(Option<u32>, Option<u32>)], ) -> Series { self.apply_as_ints(|s| { s.zip_outer_join_column( &right_column.bit_repr_small().into_series(), opt_join_tuples, ) }) } } impl ZipOuterJoinColumn for Float64Chunked { fn zip_outer_join_column( &self, right_column: &Series, opt_join_tuples: &[(Option<u32>, Option<u32>)], ) -> Series { self.apply_as_ints(|s| { s.zip_outer_join_column( &right_column.bit_repr_large().into_series(), opt_join_tuples, ) }) } } impl DataFrame { /// Utility method to finish a join. pub(crate) fn finish_join( &self, mut df_left: DataFrame, mut df_right: DataFrame, suffix: Option<String>, ) -> Result<DataFrame> { let mut left_names = HashSet::with_capacity_and_hasher(df_left.width(), RandomState::new()); df_left.columns.iter().for_each(|series| { left_names.insert(series.name()); }); let mut rename_strs = Vec::with_capacity(df_right.width()); df_right.columns.iter().for_each(|series| { if left_names.contains(series.name()) { rename_strs.push(series.name().to_owned()) } }); let suffix = suffix.as_deref().unwrap_or("_right"); for name in rename_strs { df_right.rename(&name, &format!("{}{}", name, suffix))?; } df_left.hstack_mut(&df_right.columns)?; Ok(df_left) } fn create_left_df<B: Sync>(&self, join_tuples: &[(u32, B)], left_join: bool) -> DataFrame { if left_join && join_tuples.len() == self.height() { self.clone() } else { unsafe { self.take_iter_unchecked(join_tuples.iter().map(|(left, _right)| *left as usize)) } } } /// Generic join method. Can be used to join on multiple columns. pub fn join<'a, J, S1: Selection<'a, J>, S2: Selection<'a, J>>( &self, other: &DataFrame, left_on: S1, right_on: S2, how: JoinType, suffix: Option<String>, ) -> Result<DataFrame> { #[cfg(feature = "cross_join")] if let JoinType::Cross = how { return self.cross_join(other); } #[allow(unused_mut)] let mut selected_left = self.select_series(left_on)?; #[allow(unused_mut)] let mut selected_right = other.select_series(right_on)?; if selected_right.len() != selected_left.len() { return Err(PolarsError::ValueError( "the number of columns given as join key should be equal".into(), )); } if selected_left .iter() .zip(&selected_right) .any(|(l, r)| l.dtype() != r.dtype()) { return Err(PolarsError::ValueError("the dtype of the join keys don't match. first cast your columns to the correct dtype".into())); } #[cfg(feature = "dtype-categorical")] for (l, r) in selected_left.iter().zip(&selected_right) { check_categorical_src(l, r)? } // Single keys if selected_left.len() == 1 { let s_left = self.column(selected_left[0].name())?; let s_right = other.column(selected_right[0].name())?; return match how { JoinType::Inner => self.inner_join_from_series(other, s_left, s_right, suffix), JoinType::Left => self.left_join_from_series(other, s_left, s_right, suffix), JoinType::Outer => self.outer_join_from_series(other, s_left, s_right, suffix), #[cfg(feature = "asof_join")] JoinType::AsOf => { self.join_asof(other, selected_left[0].name(), selected_right[0].name()) } #[cfg(feature = "cross_join")] JoinType::Cross => { unreachable!() } }; } fn remove_selected(df: &DataFrame, selected: &[Series]) -> DataFrame { let mut new = None; for s in selected { new = match new { None => Some(df.drop(s.name()).unwrap()), Some(new) => Some(new.drop(s.name()).unwrap()), } } new.unwrap() } // hack for a macro impl DataFrame { fn len(&self) -> usize { self.height() } } // multiple keys match how { JoinType::Inner => { let left = DataFrame::new_no_checks(selected_left); let right = DataFrame::new_no_checks(selected_right.clone()); let (left, right, swap) = det_hash_prone_order!(left, right); let join_tuples = inner_join_multiple_keys(&left, &right, swap); let (df_left, df_right) = POOL.join( || self.create_left_df(&join_tuples, false), || unsafe { // remove join columns remove_selected(other, &selected_right).take_iter_unchecked( join_tuples.iter().map(|(_left, right)| *right as usize), ) }, ); self.finish_join(df_left, df_right, suffix) } JoinType::Left => { let left = DataFrame::new_no_checks(selected_left); let right = DataFrame::new_no_checks(selected_right.clone()); let join_tuples = left_join_multiple_keys(&left, &right); let (df_left, df_right) = POOL.join( || self.create_left_df(&join_tuples, true), || unsafe { // remove join columns remove_selected(other, &selected_right).take_opt_iter_unchecked( join_tuples .iter() .map(|(_left, right)| right.map(|i| i as usize)), ) }, ); self.finish_join(df_left, df_right, suffix) } JoinType::Outer => { let left = DataFrame::new_no_checks(selected_left.clone()); let right = DataFrame::new_no_checks(selected_right.clone()); let (left, right, swap) = det_hash_prone_order!(left, right); let opt_join_tuples = outer_join_multiple_keys(&left, &right, swap); // Take the left and right dataframes by join tuples let (mut df_left, df_right) = POOL.join( || unsafe { remove_selected(self, &selected_left).take_opt_iter_unchecked( opt_join_tuples .iter() .map(|(left, _right)| left.map(|i| i as usize)), ) }, || unsafe { remove_selected(other, &selected_right).take_opt_iter_unchecked( opt_join_tuples .iter() .map(|(_left, right)| right.map(|i| i as usize)), ) }, ); for (s_left, s_right) in selected_left.iter().zip(&selected_right) { let mut s = s_left.zip_outer_join_column(s_right, &opt_join_tuples); s.rename(s_left.name()); df_left.hstack_mut(&[s])?; } self.finish_join(df_left, df_right, suffix) } #[cfg(feature = "asof_join")] JoinType::AsOf => Err(PolarsError::ValueError( "asof join not supported for join on multiple keys".into(), )), #[cfg(feature = "cross_join")] JoinType::Cross => { unreachable!() } } } /// Perform an inner join on two DataFrames. /// /// # Example /// /// ``` /// use polars_core::prelude::*; /// fn join_dfs(left: &DataFrame, right: &DataFrame) -> Result<DataFrame> { /// left.inner_join(right, "join_column_left", "join_column_right") /// } /// ``` pub fn inner_join( &self, other: &DataFrame, left_on: &str, right_on: &str, ) -> Result<DataFrame> { let s_left = self.column(left_on)?; let s_right = other.column(right_on)?; self.inner_join_from_series(other, s_left, s_right, None) } pub(crate) fn inner_join_from_series( &self, other: &DataFrame, s_left: &Series, s_right: &Series, suffix: Option<String>, ) -> Result<DataFrame> { #[cfg(feature = "dtype-categorical")] check_categorical_src(s_left, s_right)?; let join_tuples = s_left.hash_join_inner(s_right); let (df_left, df_right) = POOL.join( || self.create_left_df(&join_tuples, false), || unsafe { other .drop(s_right.name()) .unwrap() .take_iter_unchecked(join_tuples.iter().map(|(_left, right)| *right as usize)) }, ); self.finish_join(df_left, df_right, suffix) } /// Perform a left join on two DataFrames /// # Example /// /// ``` /// use polars_core::prelude::*; /// fn join_dfs(left: &DataFrame, right: &DataFrame) -> Result<DataFrame> { /// left.left_join(right, "join_column_left", "join_column_right") /// } /// ``` pub fn left_join(&self, other: &DataFrame, left_on: &str, right_on: &str) -> Result<DataFrame> { let s_left = self.column(left_on)?; let s_right = other.column(right_on)?; self.left_join_from_series(other, s_left, s_right, None) } pub(crate) fn left_join_from_series( &self, other: &DataFrame, s_left: &Series, s_right: &Series, suffix: Option<String>, ) -> Result<DataFrame> { #[cfg(feature = "dtype-categorical")] check_categorical_src(s_left, s_right)?; let opt_join_tuples = s_left.hash_join_left(s_right); let (df_left, df_right) = POOL.join( || self.create_left_df(&opt_join_tuples, true), || unsafe { other.drop(s_right.name()).unwrap().take_opt_iter_unchecked( opt_join_tuples .iter() .map(|(_left, right)| right.map(|i| i as usize)), ) }, ); self.finish_join(df_left, df_right, suffix) } /// Perform an outer join on two DataFrames /// # Example /// /// ``` /// use polars_core::prelude::*; /// fn join_dfs(left: &DataFrame, right: &DataFrame) -> Result<DataFrame> { /// left.outer_join(right, "join_column_left", "join_column_right") /// } /// ``` pub fn outer_join( &self, other: &DataFrame, left_on: &str, right_on: &str, ) -> Result<DataFrame> { let s_left = self.column(left_on)?; let s_right = other.column(right_on)?; self.outer_join_from_series(other, s_left, s_right, None) } pub(crate) fn outer_join_from_series( &self, other: &DataFrame, s_left: &Series, s_right: &Series, suffix: Option<String>, ) -> Result<DataFrame> { #[cfg(feature = "dtype-categorical")] check_categorical_src(s_left, s_right)?; // Get the indexes of the joined relations let opt_join_tuples = s_left.hash_join_outer(s_right); // Take the left and right dataframes by join tuples let (mut df_left, df_right) = POOL.join( || unsafe { self.drop(s_left.name()).unwrap().take_opt_iter_unchecked( opt_join_tuples .iter() .map(|(left, _right)| left.map(|i| i as usize)), ) }, || unsafe { other.drop(s_right.name()).unwrap().take_opt_iter_unchecked( opt_join_tuples .iter() .map(|(_left, right)| right.map(|i| i as usize)), ) }, ); let mut s = s_left.zip_outer_join_column(s_right, &opt_join_tuples); s.rename(s_left.name()); df_left.hstack_mut(&[s])?; self.finish_join(df_left, df_right, suffix) } } #[cfg(test)] mod test { use crate::df; use crate::prelude::*; fn create_frames() -> (DataFrame, DataFrame) { let s0 = Series::new("days", &[0, 1, 2]); let s1 = Series::new("temp", &[22.1, 19.9, 7.]); let s2 = Series::new("rain", &[0.2, 0.1, 0.3]); let temp = DataFrame::new(vec![s0, s1, s2]).unwrap(); let s0 = Series::new("days", &[1, 2, 3, 1]); let s1 = Series::new("rain", &[0.1, 0.2, 0.3, 0.4]); let rain = DataFrame::new(vec![s0, s1]).unwrap(); (temp, rain) } #[test] #[cfg_attr(miri, ignore)] fn test_inner_join() { let (temp, rain) = create_frames(); for i in 1..8 { std::env::set_var("POLARS_MAX_THREADS", format!("{}", i)); let joined = temp.inner_join(&rain, "days", "days").unwrap(); let join_col_days = Series::new("days", &[1, 2, 1]); let join_col_temp = Series::new("temp", &[19.9, 7., 19.9]); let join_col_rain = Series::new("rain", &[0.1, 0.3, 0.1]); let join_col_rain_right = Series::new("rain_right", [0.1, 0.2, 0.4].as_ref()); let true_df = DataFrame::new(vec![ join_col_days, join_col_temp, join_col_rain, join_col_rain_right, ]) .unwrap(); println!("{}", joined); assert!(joined.frame_equal(&true_df)); } } #[test] #[allow(clippy::float_cmp)] #[cfg_attr(miri, ignore)] fn test_left_join() { for i in 1..8 { std::env::set_var("POLARS_MAX_THREADS", format!("{}", i)); let s0 = Series::new("days", &[0, 1, 2, 3, 4]); let s1 = Series::new("temp", &[22.1, 19.9, 7., 2., 3.]); let temp = DataFrame::new(vec![s0, s1]).unwrap(); let s0 = Series::new("days", &[1, 2]); let s1 = Series::new("rain", &[0.1, 0.2]); let rain = DataFrame::new(vec![s0, s1]).unwrap(); let joined = temp.left_join(&rain, "days", "days").unwrap(); println!("{}", &joined); assert_eq!( (joined.column("rain").unwrap().sum::<f32>().unwrap() * 10.).round(), 3. ); assert_eq!(joined.column("rain").unwrap().null_count(), 3); // test join on utf8 let s0 = Series::new("days", &["mo", "tue", "wed", "thu", "fri"]); let s1 = Series::new("temp", &[22.1, 19.9, 7., 2., 3.]); let temp = DataFrame::new(vec![s0, s1]).unwrap(); let s0 = Series::new("days", &["tue", "wed"]); let s1 = Series::new("rain", &[0.1, 0.2]); let rain = DataFrame::new(vec![s0, s1]).unwrap(); let joined = temp.left_join(&rain, "days", "days").unwrap(); println!("{}", &joined); assert_eq!( (joined.column("rain").unwrap().sum::<f32>().unwrap() * 10.).round(), 3. ); assert_eq!(joined.column("rain").unwrap().null_count(), 3); } } #[test] #[cfg_attr(miri, ignore)] fn test_outer_join() -> Result<()> { let (temp, rain) = create_frames(); let joined = temp.outer_join(&rain, "days", "days")?; println!("{:?}", &joined); assert_eq!(joined.height(), 5); assert_eq!(joined.column("days")?.sum::<i32>(), Some(7)); let df_left = df!( "a"=> ["a", "b", "a", "z"], "b"=>[1, 2, 3, 4], "c"=>[6, 5, 4, 3] )?; let df_right = df!( "a"=> ["b", "c", "b", "a"], "k"=> [0, 3, 9, 6], "c"=> [1, 0, 2, 1] )?; let out = df_left.outer_join(&df_right, "a", "a")?; assert_eq!(out.column("c_right")?.null_count(), 1); Ok(()) } #[test] #[cfg_attr(miri, ignore)] fn test_join_with_nulls() { let dts = &[20, 21, 22, 23, 24, 25, 27, 28]; let vals = &[1.2, 2.4, 4.67, 5.8, 4.4, 3.6, 7.6, 6.5]; let df = DataFrame::new(vec![Series::new("date", dts), Series::new("val", vals)]).unwrap(); let vals2 = &[Some(1.1), None, Some(3.3), None, None]; let df2 = DataFrame::new(vec![ Series::new("date", &dts[3..]), Series::new("val2", vals2), ]) .unwrap(); let joined = df.left_join(&df2, "date", "date").unwrap(); assert_eq!( joined .column("val2") .unwrap() .f64() .unwrap() .get(joined.height() - 1), None ); } fn get_dfs() -> (DataFrame, DataFrame) { let df_a = df! { "a" => &[1, 2, 1, 1], "b" => &["a", "b", "c", "c"], "c" => &[0, 1, 2, 3] } .unwrap(); let df_b = df! { "foo" => &[1, 1, 1], "bar" => &["a", "c", "c"], "ham" => &["let", "var", "const"] } .unwrap(); (df_a, df_b) } #[test] #[cfg_attr(miri, ignore)] fn test_join_multiple_columns() { let (mut df_a, mut df_b) = get_dfs(); // First do a hack with concatenated string dummy column let mut s = df_a .column("a") .unwrap() .cast::<Utf8Type>() .unwrap() .utf8() .unwrap() + df_a.column("b").unwrap().utf8().unwrap(); s.rename("dummy"); df_a.with_column(s).unwrap(); let mut s = df_b .column("foo") .unwrap() .cast::<Utf8Type>() .unwrap() .utf8() .unwrap() + df_b.column("bar").unwrap().utf8().unwrap(); s.rename("dummy"); df_b.with_column(s).unwrap(); let joined = df_a.left_join(&df_b, "dummy", "dummy").unwrap(); let ham_col = joined.column("ham").unwrap(); let ca = ham_col.utf8().unwrap(); let correct_ham = &[ Some("let"), None, Some("var"), Some("const"), Some("var"), Some("const"), ]; assert_eq!(Vec::from(ca), correct_ham); // now check the join with multiple columns let joined = df_a .join(&df_b, &["a", "b"], &["foo", "bar"], JoinType::Left, None) .unwrap(); let ca = joined.column("ham").unwrap().utf8().unwrap(); dbg!(&df_a, &df_b); assert_eq!(Vec::from(ca), correct_ham); let joined_inner_hack = df_a.inner_join(&df_b, "dummy", "dummy").unwrap(); let joined_inner = df_a .join(&df_b, &["a", "b"], &["foo", "bar"], JoinType::Inner, None) .unwrap(); dbg!(&joined_inner_hack, &joined_inner); assert!(joined_inner_hack .column("ham") .unwrap() .series_equal_missing(joined_inner.column("ham").unwrap())); let joined_outer_hack = df_a.outer_join(&df_b, "dummy", "dummy").unwrap(); let joined_outer = df_a .join(&df_b, &["a", "b"], &["foo", "bar"], JoinType::Outer, None) .unwrap(); assert!(joined_outer_hack .column("ham") .unwrap() .series_equal_missing(joined_outer.column("ham").unwrap())); } #[test] #[cfg_attr(miri, ignore)] #[cfg(feature = "dtype-categorical")] fn test_join_categorical() { use crate::toggle_string_cache; let _lock = crate::SINGLE_LOCK.lock(); toggle_string_cache(true); let (mut df_a, mut df_b) = get_dfs(); df_a.may_apply("b", |s| s.cast_with_dtype(&DataType::Categorical)) .unwrap(); df_b.may_apply("bar", |s| s.cast_with_dtype(&DataType::Categorical)) .unwrap(); let out = df_a.join(&df_b, "b", "bar", JoinType::Left, None).unwrap(); assert_eq!(out.shape(), (6, 5)); let correct_ham = &[ Some("let"), None, Some("var"), Some("const"), Some("var"), Some("const"), ]; let ham_col = out.column("ham").unwrap(); let ca = ham_col.utf8().unwrap(); assert_eq!(Vec::from(ca), correct_ham); // Test an error when joining on different string cache let (mut df_a, mut df_b) = get_dfs(); df_a.may_apply("b", |s| s.cast_with_dtype(&DataType::Categorical)) .unwrap(); // create a new cache toggle_string_cache(false); toggle_string_cache(true); df_b.may_apply("bar", |s| s.cast_with_dtype(&DataType::Categorical)) .unwrap(); let out = df_a.join(&df_b, "b", "bar", JoinType::Left, None); assert!(out.is_err()) } #[test] #[cfg_attr(miri, ignore)] fn empty_df_join() { let empty: Vec<String> = vec![]; let left = DataFrame::new(vec![ Series::new("key", &empty), Series::new("lval", &empty), ]) .unwrap(); let right = DataFrame::new(vec![ Series::new("key", &["foo"]), Series::new("rval", &[4]), ]) .unwrap(); let res = left.inner_join(&right, "key", "key"); assert!(res.is_ok()); assert_eq!(res.unwrap().height(), 0); right.left_join(&left, "key", "key").unwrap(); right.inner_join(&left, "key", "key").unwrap(); right.outer_join(&left, "key", "key").unwrap(); } #[test] #[cfg_attr(miri, ignore)] fn unit_df_join() -> Result<()> { let df1 = df![ "a" => [1], "b" => [2] ]?; let df2 = df![ "a" => [1, 2, 3, 4], "b" => [Some(1), None, Some(3), Some(4)] ]?; let out = df1.left_join(&df2, "a", "a")?; let expected = df![ "a" => [1], "b" => [2], "b_right" => [1] ]?; assert!(out.frame_equal(&expected)); Ok(()) } #[test] #[cfg_attr(miri, ignore)] fn test_join_err() -> Result<()> { let df1 = df![ "a" => [1, 2], "b" => ["foo", "bar"] ]?; let df2 = df![ "a" => [1, 2, 3, 4], "b" => [true, true, true, false] ]?; // dtypes don't match, error assert!(df1 .join(&df2, vec!["a", "b"], vec!["a", "b"], JoinType::Left, None) .is_err()); // length of join keys don't match error assert!(df1 .join(&df2, vec!["a"], vec!["a", "b"], JoinType::Left, None) .is_err()); Ok(()) } #[test] #[cfg_attr(miri, ignore)] fn test_join_floats() -> Result<()> { let df_a = df! { "a" => &[1.0, 2.0, 1.0, 1.0], "b" => &["a", "b", "c", "c"], "c" => &[0.0, 1.0, 2.0, 3.0] }?; let df_b = df! { "foo" => &[1.0, 2.0, 1.0], "bar" => &[1.0, 1.0, 1.0], "ham" => &["let", "var", "const"] }?; let out = df_a.join( &df_b, vec!["a", "c"], vec!["foo", "bar"], JoinType::Left, None, )?; assert_eq!( Vec::from(out.column("ham")?.utf8()?), &[None, Some("var"), None, None] ); let out = df_a.join( &df_b, vec!["a", "c"], vec!["foo", "bar"], JoinType::Outer, None, )?; assert_eq!( out.dtypes(), &[ DataType::Utf8, DataType::Float64, DataType::Float64, DataType::Utf8 ] ); Ok(()) } }
34.118406
157
0.495516
1446d6d44287350c42ab4b671f292a0dc2fe6287
100
pub mod route_client_param; pub mod route_common; pub mod route_result; pub mod route_server_param;
20
27
0.84
398e2f23ab0355326b0537142ac9c9efc4b6a426
2,194
pub struct Runner { pub input: String, } impl crate::Solution for Runner { fn run_a(&self) -> String { self.run(2020) } fn run_b(&self) -> String { self.run(30_000_000) } } impl Runner { fn run(&self, at: usize) -> String { let start = self.input.matches(',').count() + 2; Memory::new( at, self.input.trim().split(',').map(|c| { c.parse::<usize>() .unwrap_or_else(|_| panic!("Unable to parse input value {}", c)) }), ) .nth(at - start) .unwrap() .to_string() } } #[derive(Debug)] struct Memory { history: Vec<usize>, round: usize, last: usize, } impl Memory { fn new(size: usize, start: impl Iterator<Item = usize>) -> Memory { let mut mem = vec![0; size]; let mut round = 0; let mut last = 0; for v in start { round += 1; mem[v] = round; last = v; } Memory { history: mem, round, last, } } } impl Iterator for Memory { type Item = usize; fn next(&mut self) -> Option<Self::Item> { let mut res = 0; if let Some(last) = self.history.get(self.last) { if *last != 0 { res = self.round - *last; } } self.history[self.last] = self.round; self.last = res; self.round += 1; Some(res) } } #[cfg(test)] mod tests { use super::*; use crate::{read_input, Solution}; fn new() -> Runner { Runner { input: read_input(2020, "15"), } } fn simple() -> Runner { Runner { input: read_input(2020, "15_simple"), } } #[test] fn simple_a() { assert_eq!(simple().run_a(), String::from("436")); } #[test] fn simple_b() { assert_eq!(simple().run_b(), String::from("175594")); } #[test] fn real_a() { assert_eq!(new().run_a(), String::from("319")); } #[test] fn real_b() { assert_eq!(new().run_b(), String::from("2424")); } }
19.765766
84
0.457156
75ede0f02399be4727a90cf7cefcb6c91f116da6
52,498
#[macro_use] extern crate lalrpop_util; use std::borrow::Cow; use std::collections::{btree_map, BTreeMap, HashMap}; use std::convert::TryFrom; use std::fmt::Write; use std::fs::OpenOptions; use std::io::BufRead; use std::io::BufReader; use std::io::Read; use std::io::{BufWriter, Write as OtherWrite}; use std::os::unix::prelude::MetadataExt; use std::path::PathBuf; use std::process::Command; use lazy_static::lazy_static; use log::*; use regex::Regex; use serde::{Deserialize, Serialize}; use structopt::StructOpt; use thiserror::Error; use users::get_current_uid; use xmltree::{Element, XMLNode}; use strum::IntoEnumIterator; use strum_macros::EnumIter; mod utils; #[allow(unused_parens)] mod query; use fstrings::*; use utils::*; use crate::query::{MatchInfo, VMState}; #[derive(Error, Debug)] pub enum Error { #[error("Io error: {0}")] IoError(#[from] std::io::Error), #[error("Var error: {0}")] VarError(#[from] std::env::VarError), #[error("Format error: {0}")] FmtError(#[from] std::fmt::Error), #[error("Config error: {0}")] ConfigError(#[from] config::ConfigError), #[error("Config error: {0}")] BoxError(#[from] Box<dyn std::error::Error + Send>), #[error("Serde error: {0}")] Serderror(#[from] serde_json::Error), #[error("Command error: {0}, stderr: {1}")] CommandError(String, String), #[error("UTF8 error: {0}")] UTF8Error(#[from] std::string::FromUtf8Error), #[error("XML error: {0}")] XMLError(#[from] xmltree::ParseError), #[error("XML write error: {0}")] XMLWriteError(#[from] xmltree::Error), #[error("Regex error: {0}")] Regex(#[from] regex::Error), #[error("No default config file")] ConfigFile, #[error("Not found: {0}")] NotFound(String), #[error("Already exists")] AlreadyExists, #[error("VM currently defined, operation aborted")] CurrentlyDefined, #[error("Parsing PCI spec {0}")] ParsePCISpec(String), #[error("Template {0} doesn't exist")] TemplateDoesntExist(String), #[error("Renaming across parents not supported")] RenameAcrossParentsUnsupported, #[error("Cannot {1} snapshot {0} - has sub snapshots")] HasSubSnapshots(String, &'static str), #[error("No VM defined for {0}")] NoVMDefined(String), #[error("Filter parse error: {0}")] FilterParseError(String), #[error("Under {0}: {1}")] Context(String, Box<Error>), } #[derive(Debug, StructOpt, Clone, Default)] pub struct Fork { /// Full name of the domain pub name: String, /// Enable volatile VM execution - the domain definition will not be saved, and /// the definition will be removed when stopped. #[structopt(name = "volatile", short = "v")] pub volatile: bool, /// Store image in the temp pool, implies 'volatile' #[structopt(name = "temp", short = "t")] pub temp: bool, /// Base template used for actual VM execution #[structopt(name = "base-template", short = "b")] pub base_template: Option<String>, /// Start as paused #[structopt(name = "paused", short = "p")] pub paused: bool, /// Force operation (will kill the VM if it exists) #[structopt(name = "force", short = "f")] pub force: bool, #[structopt(flatten)] pub overrides: Overrides, } #[derive(Debug, StructOpt, Clone, Default)] pub struct Exists { /// Full name of the domain pub name: String, } #[derive(Debug, StructOpt, Clone, Default)] pub struct Kill { /// List of the full names of the domains to kill pub names: Vec<String>, /// Use regex matching for the given names #[structopt(name = "regex", short = "E")] pub regex: bool, /// Do not remove, just print the matching names #[structopt(name = "dry_run", short = "n")] pub dry_run: bool, /// Force operation (kill the VM even if it is running) #[structopt(name = "force", short = "f")] pub force: bool, } #[derive(Debug, StructOpt, Clone)] pub struct Start { /// Full name of the domain pub name: String, } #[derive(Debug, StructOpt, Clone)] pub struct Stop { /// Full name of the domain pub name: String, } #[derive(Debug, StructOpt, Clone, Default)] pub struct ShutdownWait { /// Full name of the domain pub name: String, } #[derive(Debug, StructOpt, Clone)] pub struct Rename { /// Full name of the domain pub name: String, /// New full name of the domain pub new_name: String, } #[derive(Debug, StructOpt, Clone, Default)] pub struct Console { /// Full name of the domain name: String, } #[derive(Debug, StructOpt, Clone, Default)] pub struct Overrides { /// Override GB of memory #[structopt(name = "memory", short = "m", long = "mem")] pub memory_gb: Option<u32>, /// Override number of CPUs #[structopt(name = "cpus", short = "c", long = "cpus")] pub nr_cpus: Option<u32>, /// Host devices from VF pools #[structopt(name = "netdevs", long = "netdev")] pub netdevs: Vec<String>, } #[derive(Debug, StructOpt, Clone)] pub struct Spawn { /// Full name of the domain pub full: String, /// Base template XML to use #[structopt(name = "base-template", short = "b")] pub base_template: String, /// Volatile VM #[structopt(name = "volatile", short = "v")] pub volatile: bool, /// Store image in the temp pool, implies 'volatile' #[structopt(name = "temp", short = "t")] pub temp: bool, /// Don't start the VM after creation #[structopt(name = "paused", short = "s")] pub paused: bool, #[structopt(flatten)] pub overrides: Overrides, } #[derive(Debug, StructOpt, Clone)] pub struct Modify { /// Full name of the domain full: String, #[structopt(flatten)] overrides: Overrides, } #[derive(Debug, StructOpt, Clone)] pub struct Undefine { /// List of the full names of the domains to kill pub names: Vec<String>, } #[derive(Debug, StructOpt, Clone)] pub struct UpdateSshParams { #[structopt(name = "quiet", short = "q")] pub quiet: bool, } #[derive(Debug, StructOpt, Clone)] pub struct List { #[structopt(short = "f", long = "fields")] pub fields: Option<String>, #[structopt(short = "n", long = "no-headers")] pub no_headers: bool, #[structopt(name = "filter")] pub filter: Vec<String>, } #[derive(Debug, StructOpt, Clone)] pub enum CommandMode { /// Fork a new VM out of a suspended VM image and optionally spawn it /// if a template definition is provided Fork(Fork), /// Check for an existence of a VM image Exists(Exists), /// Spawn an image into a running VM based on template Spawn(Spawn), /// Start console on a VM Console(Console), /// Stop a running VM but don't remove its definition Stop(Stop), /// Clean shutdown and wait for VM to be off ShutdownWait(ShutdownWait), /// Rename a VM that stopped Rename(Rename), /// Start a VM that was shutdown Start(Start), /// Remove a VM and its image file Kill(Kill), /// Remove a VM definition (but not its image file) Undefine(Undefine), /// Modify an existing VM definition Modify(Modify), /// List image files and VMs List(List), /// Update SSH config based on DHCP of client VMs UpdateSsh(UpdateSshParams), } enum UpdateSshDisposition { NotConfigured, Updated, NotNeeded, } #[derive(Debug, StructOpt, Clone)] pub struct CommandArgs { #[structopt(name = "config-file", short = "c")] config: Option<PathBuf>, #[structopt(subcommand)] command: CommandMode, } #[derive(Debug, Deserialize, Clone)] pub struct Config { #[serde(rename = "pool-path")] pub pool_path: PathBuf, #[serde(rename = "tmp-path")] pub tmp_path: PathBuf, #[serde(default)] #[serde(rename = "multi-user")] pub multi_user: bool, #[serde(default)] #[serde(rename = "ssh-config")] pub ssh_config: Option<SSHConfig>, } #[derive(Debug, Deserialize, Clone)] pub struct SSHConfig { #[serde(rename = "identity-file")] pub identity_file: String, #[serde(rename = "pubkey-file")] pub pubkey_file: String, #[serde(rename = "config-file")] pub config_file: PathBuf, } pub struct VMess { config: Config, command: Option<CommandMode>, } #[derive(Debug)] struct Snapshot { rel_path: PathBuf, size_mb: u64, vm_using: Option<String>, sub: BTreeMap<String, Snapshot>, } lazy_static! { static ref PARSE_QCOW2: Regex = Regex::new("^([^%]+)([%]([^.]*))?[.]qcow2?$").unwrap(); } impl Snapshot { fn join(&self, x: &str) -> PathBuf { let name = self.rel_path.file_name().unwrap().to_str().unwrap(); if let Some(cap) = PARSE_QCOW2.captures(&name) { let name = cap.get(1).unwrap().as_str(); let mut v = if let Some(snapshot_path) = cap.get(3) { snapshot_path.as_str().split("%").collect() } else { vec![] }; v.push(x); self.rel_path .with_file_name(format!("{}%{}.qcow2", name, v.join("%"))) } else { panic!(); } } } #[derive(Debug)] struct Image { root: Snapshot, } #[derive(Debug)] struct VM { name: String, attrs: BTreeMap<String, String>, } #[derive(Debug)] pub struct Pool { images: BTreeMap<String, Image>, vms: BTreeMap<String, VM>, } struct GetInfo<'a> { snap: &'a Snapshot, vm: Option<&'a VM>, } impl<'a> GetInfo<'a> { fn image_path(&self) -> &'a PathBuf { &self.snap.rel_path } } impl Pool { fn get_by_name<'a>(&'a self, name: &str) -> Result<GetInfo<'a>, Error> { fn by_snapshot<'a>( pool: &'a Pool, lookup: &str, image: &Image, snapshot: &'a Snapshot, level: usize, name_path: String, ) -> Option<GetInfo<'a>> { if lookup == &name_path { return Some(GetInfo { snap: snapshot, vm: if let Some(name) = &snapshot.vm_using { pool.vms.get(name) } else { None }, }); } for (key, snapshot) in snapshot.sub.iter() { if let Some(i) = by_snapshot( pool, lookup, image, &snapshot, level + 1, format!("{}.{}", name_path, key), ) { return Some(i); } } None } fn by_image<'a>( lookup: &str, pool: &'a Pool, image: &'a Image, name_path: String, ) -> Option<GetInfo<'a>> { by_snapshot(pool, lookup, &image, &image.root, 0, name_path.clone()) } for (key, image) in self.images.iter() { if let Some(i) = by_image(name, self, &image, key.clone()) { return Ok(i); } } Err(Error::NotFound(name.to_owned())) } } impl Snapshot { fn new( root_path: &PathBuf, path: PathBuf, files_to_domains: &HashMap<PathBuf, String>, ) -> Result<Self, Error> { let abs_path = root_path.join(&path); Ok(Snapshot { sub: Default::default(), vm_using: files_to_domains.get(&abs_path).map(|x| (*x).to_owned()), size_mb: (std::fs::metadata(&abs_path)?.blocks() * 512) / (1024 * 1024), rel_path: path, }) } } impl VMess { pub fn command(opt: &CommandArgs) -> Result<Self, Error> { let opt = (*opt).clone(); Self::new(opt.config, Some(opt.command)) } pub fn default() -> Result<Self, Error> { Self::new(None, None) } fn new(config_path: Option<PathBuf>, command: Option<CommandMode>) -> Result<Self, Error> { let config_path = if let Some(config) = &config_path { config.clone() } else { if let Ok(path) = std::env::var("VMESS_CONFIG_PATH") { PathBuf::from(path) } else { if let Some(dir) = dirs::config_dir() { dir.join("vmess").join("config.toml") } else { return Err(Error::ConfigFile); } } }; let mut settings = config::Config::default(); settings .merge(config::File::new(config_path.to_str() .ok_or_else(|| Error::ConfigFile)?, config::FileFormat::Toml))? // Add in settings from the environment (with a prefix of APP) // Eg.. `APP_DEBUG=1 ./target/app` would set the `debug` key .merge(config::Environment::with_prefix("VMESS_ENV_CONFIG"))?; let mut config = settings.try_into::<Config>()?; config.pool_path = adjust_path_by_env(config.pool_path); config.tmp_path = PathBuf::try_from( config .tmp_path .into_os_string() .into_string() .unwrap() .replace("$USER", &std::env::var("USER").expect("USER not defined")), ) .unwrap(); Ok(Self { command, config }) } fn get_vm_prefix(&self) -> String { match self.config.multi_user { true => format!("{}-", std::env::var("USER").expect("USER not defined")), false => "".to_owned(), } } pub fn run(&mut self) -> Result<(), Error> { let command = match &self.command { Some(command) => command.clone(), None => return Ok(()), }; match command { CommandMode::List(params) => { self.list(params)?; } CommandMode::Fork(params) => { self.fork(params)?; } CommandMode::Exists(params) => { self.exists(params)?; } CommandMode::Kill(params) => { self.kill(params)?; } CommandMode::Start(params) => { self.start(params)?; } CommandMode::Stop(params) => { self.stop(params)?; } CommandMode::ShutdownWait(params) => { self.shutdown_wait(params)?; } CommandMode::Rename(params) => { self.rename(params)?; } CommandMode::Console(params) => { self.console(params)?; } CommandMode::Spawn(params) => { self.spawn(params)?; } CommandMode::Modify(params) => { self.modify(params)?; } CommandMode::Undefine(params) => { self.undefine(params)?; } CommandMode::UpdateSsh(params) => { self.update_ssh(params)?; } } Ok(()) } pub fn image_full_basename(&self, full_name: impl AsRef<str>) -> PathBuf { let basename = full_name.as_ref().to_owned().replace(".", "%"); self.config.pool_path.join(basename) } pub fn get_config(&self) -> &Config { &self.config } pub fn get_pool(&self) -> Result<Pool, Error> { let mut pool = Pool { images: Default::default(), vms: Default::default(), }; let mut files_to_domains = HashMap::new(); let vmname_prefix = self.get_vm_prefix(); for line in ibash_stdout!("virsh list --all --name")?.lines() { let line = line.trim(); if line.is_empty() { continue; } let vmname = line; let short_vmname = if vmname.starts_with(&vmname_prefix) { &vmname[vmname_prefix.len()..] } else { continue; }; match Self::load_extra_domain_info(&mut files_to_domains, short_vmname, vmname, &mut pool) { Ok(_) => {}, Err(_) => { // Assume VM went away during iteration pool.vms.remove(short_vmname); }, } } let pool_path = &self.config.pool_path; for entry in std::fs::read_dir(&self.config.pool_path)? { let entry = entry?; let name = entry.file_name(); let name = name.to_string_lossy(); let path = entry.path(); if !path.is_file() { continue; } if let Some(cap) = PARSE_QCOW2.captures(&name) { let name = cap.get(1).unwrap().as_str(); let v = if let Some(snapshot_path) = cap.get(3) { snapshot_path.as_str().split("%").collect() } else { vec![] }; let image = match pool.images.entry(name.to_owned()) { btree_map::Entry::Vacant(v) => { let path = PathBuf::from(format!("{}.qcow2", name)); v.insert(Image { root: Snapshot::new(&pool_path, path, &files_to_domains)?, }) } btree_map::Entry::Occupied(o) => o.into_mut(), }; let mut node = &mut image.root; let mut r = vec![]; for sub in v.into_iter() { r.push(sub.clone()); let sub_path = if r.len() == 0 { "".to_string() } else { format!("%{}", r.join("%")) }; let image = match node.sub.entry(sub.to_owned()) { btree_map::Entry::Vacant(v) => { let path = PathBuf::from(format!("{}{}.qcow2", name, sub_path)); v.insert(Snapshot::new(&pool_path, path, &files_to_domains)?) } btree_map::Entry::Occupied(o) => o.into_mut(), }; node = image; } } } Ok(pool) } fn list(&mut self, params: List) -> Result<(), Error> { let pool = self.get_pool()?; use indexmap::IndexSet; use prettytable::{format, Cell, Row, Table}; let filter_expr = query::Expr::parse_cmd(&params.filter)?; let mut table = Table::new(); table.set_format(*format::consts::FORMAT_NO_BORDER_LINE_SEPARATOR); #[derive(Serialize, Deserialize, Hash, Eq, PartialEq, Debug, EnumIter)] enum Column { Name, Volatile, State, MemUsage, DiskUsage, } let mut hm = HashMap::new(); for field in Column::iter() { hm.insert(format!("{:?}", field), field); } let mut columns = indexmap::IndexSet::new(); match params.fields { Some(s) => { for s in s.split(",") { match hm.remove(s) { Some(x) => { columns.insert(x); }, None => {} } } } None => { columns.insert(Column::Name); columns.insert(Column::Volatile); columns.insert(Column::State); columns.insert(Column::MemUsage); columns.insert(Column::DiskUsage); } } if !params.no_headers { table.set_titles(Row::new( columns .iter() .map(|x| Cell::new(&ron::ser::to_string(x).expect("serialization"))) .collect(), )); } fn by_snapshot( columns: &IndexSet<Column>, config: &Config, table: &mut Table, pool: &Pool, image: &Image, snapshot: &Snapshot, path: String, filter_expr: &query::Expr, ) { let abs_image = config.pool_path.join(&snapshot.rel_path); let tmp = if let Ok(link) = std::fs::read_link(abs_image) { if link.starts_with(&config.tmp_path) { "Y" } else { "" } } else { "" }; let (vm_state, volatile, mem_size) = if let Some(vm_using) = &snapshot.vm_using { if let Some(vm) = pool.vms.get(vm_using) { let state = vm.attrs.get("State").map(|x| x.as_str()).unwrap_or(""); let vol = if vm.attrs.get("Persistent").map(|x| x.as_str()).unwrap_or("") == "no" { "y" } else { tmp }; let mem_size = if state == "running" { vm.attrs .get("Max memory") .map(|x| { Cow::from(format!( "{:.2} GB", (x.as_str() .split(" ") .nth(0) .unwrap() .parse::<i64>() .unwrap() / 1024) as f32 / 1024.0 )) }) .unwrap_or(Cow::from("")) } else { Cow::from("") }; (state, vol, mem_size) } else { ("", tmp, Cow::from("")) } } else { ("", tmp, Cow::from("")) }; let disk_size = format!("{:.2} GB", snapshot.size_mb as f32 / 1024.0); let mut row = Row::empty(); for column in columns { let s = match column { Column::Name => &path, Column::Volatile => volatile, Column::State => &vm_state, Column::MemUsage => &mem_size, Column::DiskUsage => &disk_size, }; row.add_cell(Cell::new(s)); } let mi = MatchInfo { vm_state: match vm_state { "running" => Some(VMState::Running), "shut off" => Some(VMState::Stopped), _ => None, }, name: &path, }; if filter_expr.match_info(&mi) { table.add_row(row); } for (key, snapshot) in snapshot.sub.iter() { by_snapshot( &columns, config, table, pool, image, &snapshot, format!("{}.{}", path, key), filter_expr, ); } } fn by_image( columns: &IndexSet<Column>, config: &Config, table: &mut Table, pool: &Pool, image: &Image, path: String, filter_expr: &query::Expr, ) { by_snapshot(&columns, config, table, pool, &image, &image.root, path, filter_expr); } for (key, image) in pool.images.iter() { by_image( &columns, &self.config, &mut table, &pool, &image, key.clone(), &filter_expr, ); } table.print_tty(false); Ok(()) } fn get_template(&self, name: &str) -> Result<Element, Error> { let filename = self .config .pool_path .join(format!("templates/{}.xml", name)); if !filename.exists() { return Err(Error::TemplateDoesntExist(name.to_owned())); } let mut file = std::fs::File::open(filename)?; let mut contents = String::new(); file.read_to_string(&mut contents)?; Ok(Element::parse(contents.as_bytes())?) } fn modify_xml_using_overrides(xml: &mut Element, overrides: &Overrides) -> Result<(), Error> { if let Some(given_memory) = overrides.memory_gb { if let Some(memory) = xml.get_mut_child("memory") { memory .attributes .insert("unit".to_owned(), "KiB".to_owned()); memory.children[0] = XMLNode::Text(format!("{}", given_memory as u64 * 0x100000u64)); } if let Some(memory) = xml.get_mut_child("currentMemory") { memory .attributes .insert("unit".to_owned(), "KiB".to_owned()); memory.children[0] = XMLNode::Text(format!("{}", given_memory as u64 * 0x100000u64)); } } if let Some(nr_cpus) = overrides.nr_cpus { if let Some(vcpu) = xml.get_mut_child("vcpu") { vcpu.children[0] = XMLNode::Text(format!("{}", nr_cpus)); } } if let Some(devices) = xml.get_mut_child("devices") { // Remove existing host devices while let Some(_netdev) = devices.take_child("netdevs") {} for netdev in &overrides.netdevs { if netdev.starts_with("pool:") { let netdev = &netdev[5..]; let mut model = "".to_owned(); let mut network = ""; for part in netdev.split(",") { if part.starts_with("model:") { let model_type = &part[6..]; model = format!("<model type='{model_type}'/>"); } else { network = part; } } let new_elem = format!( r#" <interface type='network'> {model} <source network='{network}' /> </interface> "#, ); let elem = Element::parse(new_elem.as_bytes())?; devices.children.push(XMLNode::Element(elem)); } else if let Some((host, guest)) = netdev.split_once("/") { lazy_static! { static ref BUS_SLOT: Regex = Regex::new(r"^([0-9a-f]+):([0-9a-f]+)[.]([0-9a-f]+)$").unwrap(); } if let (Some(cap_host), Some(cap_guest)) = (BUS_SLOT.captures(&host), BUS_SLOT.captures(&guest)) { let new_elem = format!( r#" <hostdev mode='subsystem' type='pci' managed='yes'> <source> <address domain='0x0000' bus='0x{}' slot='0x{}' function='0x{}'/> </source> <address type='pci' domain='0x0000' bus='0x{}' slot='0x{}' function='0x{}'/> </hostdev>"#, cap_host.get(1).unwrap().as_str(), cap_host.get(2).unwrap().as_str(), cap_host.get(3).unwrap().as_str(), cap_guest.get(1).unwrap().as_str(), cap_guest.get(2).unwrap().as_str(), cap_guest.get(3).unwrap().as_str(), ); let elem = Element::parse(new_elem.as_bytes())?; devices.children.push(XMLNode::Element(elem)); } else { return Err(Error::ParsePCISpec(netdev.to_owned())); } } else { return Err(Error::ParsePCISpec(netdev.to_owned())); } } } Ok(()) } fn spawn(&mut self, params: Spawn) -> Result<(), Error> { let pool = self.get_pool()?; let to_bring_up = pool.get_by_name(&params.full)?; if !to_bring_up.snap.sub.is_empty() { return Err(Error::HasSubSnapshots(params.full.clone(), "")); } info!("Preparing to spawn VM {}", params.full); let mut xml = self.get_template(&params.base_template)?; let to_bring_up_image = self.config.pool_path.join(&to_bring_up.image_path()); let to_bring_up_image_path = to_bring_up_image.display(); if to_bring_up_image.metadata()?.permissions().readonly() { info!("Setting image to read-write"); if to_bring_up_image.metadata()?.uid() != get_current_uid() { ibash_stdout!("sudo -u qemu chmod u+w {to_bring_up_image_path}")?; } else { ibash_stdout!("chmod u+w {to_bring_up_image_path}")?; } } let hash: u64 = calculate_hash(&params.full); let new_mac = format!( "52:52:{:02x}:{:02x}:{:02x}:{:02x}", (hash >> 32) & 0xff, (hash >> 40) & 0xff, (hash >> 48) & 0xff, (hash >> 56) & 0xff, ); if let Some(uuid) = xml.get_mut_child("uuid") { uuid.children[0] = XMLNode::Text(format!("{}", uuid::Uuid::new_v4())); } let vmname_prefix = self.get_vm_prefix(); if let Some(name) = xml.get_mut_child("name") { let vm = params.full.clone(); let prefixed_vm_name = format!("{}{}", vmname_prefix, vm); name.children[0] = XMLNode::Text(prefixed_vm_name); } if let Some(devices) = xml.get_mut_child("devices") { if let Some(interface) = devices.get_mut_child("interface") { if let Some(mac) = interface.get_mut_child("mac") { mac.attributes.insert("address".to_owned(), new_mac); } } if let Some(disk) = devices.get_mut_child("disk") { if let Some(source) = disk.get_mut_child("source") { source .attributes .insert("file".to_owned(), format!("{}", to_bring_up_image_path)); } } } Self::modify_xml_using_overrides(&mut xml, &params.overrides)?; info!("Writing VM definition"); let dir = tempdir::TempDir::new("vmess")?; let file_path = dir.path().join("domain.xml"); let f = std::fs::File::create(&file_path)?; let file_path = file_path.display(); xml.write_with_config( &f, xmltree::EmitterConfig { perform_indent: true, ..Default::default() }, )?; f.sync_all()?; drop(f); let volatile = if params.temp { true } else { params.volatile }; let v = if volatile { info!("Creating volatile VM"); if params.paused { ibash_stdout!("virsh create {file_path} --paused")? } else { ibash_stdout!("virsh create {file_path} ")? } } else { info!("Defining VM"); ibash_stdout!("virsh define {file_path}")? }; info!("Result: {}", v.trim()); if !volatile && !params.paused { ibash_stdout!("virsh start {vmname_prefix}{params.full}")?; } dir.close()?; Ok(()) } fn modify(&mut self, params: Modify) -> Result<(), Error> { let pool = self.get_pool()?; let existing = pool.get_by_name(&params.full)?; if let Some(vm) = &existing.vm { let vmname_prefix = self.get_vm_prefix(); let contents = ibash_stdout!("virsh dumpxml {vmname_prefix}{vm.name}")?; let mut xml = Element::parse(contents.as_bytes())?; Self::modify_xml_using_overrides(&mut xml, &params.overrides)?; let dir = tempdir::TempDir::new("vmess")?; let file_path = dir.path().join("domain.xml"); let f = std::fs::File::create(&file_path)?; xml.write_with_config( &f, xmltree::EmitterConfig { perform_indent: true, ..Default::default() }, )?; f.sync_all()?; drop(f); let file_path = file_path.display(); ibash_stdout!("virsh define {file_path}")?; dir.close()?; } Ok(()) } fn undefine(&mut self, params: Undefine) -> Result<(), Error> { let vmname_prefix = self.get_vm_prefix(); for name in &params.names { ibash_stdout!("virsh undefine {vmname_prefix}{name}")?; } Ok(()) } fn fork(&mut self, params: Fork) -> Result<(), Error> { let pool = self.get_pool()?; if let Some(base_template) = &params.base_template { let _xml = self.get_template(&base_template)?; } let new_full_name = params.name.clone(); let mut parts: Vec<_> = params.name.split(".").collect(); let name = parts.pop().expect("name"); let parent_name = parts.join("."); let parent = pool.get_by_name(&parent_name)?; let new_base_name = parent.snap.join(name); let new_adv = self.config.pool_path.join(&new_base_name); // TODO: verify parent is not running if let Ok(existing) = pool.get_by_name(&new_full_name) { if params.force { if let Some(vm) = &existing.vm { info!("Removing VM (state {:?})", existing.snap.sub.get("State")); let vmname_prefix = self.get_vm_prefix(); let r1 = ibash_stdout!("virsh destroy {vmname_prefix}{vm.name}"); let r2 = ibash_stdout!("virsh undefine {vmname_prefix}{vm.name}"); if r1.is_err() && r2.is_err() { r2?; } } std::fs::remove_file(&new_adv)?; } else { return Err(Error::AlreadyExists); } } if params.temp { std::fs::create_dir_all(&self.config.tmp_path)?; } let new = if !params.temp { &self.config.pool_path } else { &self.config.tmp_path }.join(&new_base_name); let _ = std::fs::remove_file(&new); let new_disp = new.display(); if params.temp { let _ = std::fs::remove_file(&new_adv); std::os::unix::fs::symlink(&new, &new_adv).map_err(|e| { Error::Context( format!("symlink {} creation", new_adv.display()), Box::new(e.into()), ) })?; } let pool_path = &self.config.pool_path; let backing = pool_path.join(&parent.image_path()); let backing_basename = backing.file_name().unwrap().to_str().unwrap(); let backing_disp = backing.display(); info!( "Creating new snapshot: {} -> {}", parent_name, new_full_name ); if params.temp { std::fs::create_dir_all(&self.config.tmp_path)?; } if !backing.metadata()?.permissions().readonly() { info!("Setting parent image to read-only"); if backing.metadata()?.uid() != get_current_uid() { ibash_stdout!("sudo -u qemu chmod u-w {backing_disp}")?; } else { ibash_stdout!("chmod u-w {backing_disp}")?; } } let cmd = format!("qemu-img create -f qcow2 {new_disp} -F qcow2 -b {backing_disp}"); let v = ibash_stdout!("{}", cmd)?; info!("qemu-image create result: {:?}", v); // Make sure the backing store pathname is relative. let cmd = format!("qemu-img rebase -F qcow2 -u {new_disp} -b {backing_basename}"); let v = ibash_stdout!("{}", cmd)?; if v != "" { info!("qemu-image rebase result: {:?}", v); } if let Some(template) = params.base_template { self.spawn(Spawn { full: params.name.clone(), base_template: template, temp: params.temp, volatile: params.volatile, paused: params.paused, overrides: params.overrides.clone(), })?; } Ok(()) } fn exists(&mut self, params: Exists) -> Result<(), Error> { let pool = self.get_pool()?; pool.get_by_name(&params.name).map(|_| ()) } fn start(&mut self, params: Start) -> Result<(), Error> { let pool = self.get_pool()?; let existing = pool.get_by_name(&params.name)?; if !existing.snap.sub.is_empty() { return Err(Error::HasSubSnapshots(params.name.clone(), "start")); } let vmname_prefix = self.get_vm_prefix(); if let Some(vm) = &existing.vm { ibash_stdout!("virsh start {vmname_prefix}{vm.name}")?; } else { return Err(Error::NoVMDefined(params.name)); } Ok(()) } fn stop(&mut self, params: Stop) -> Result<(), Error> { let pool = self.get_pool()?; let vmname_prefix = self.get_vm_prefix(); let existing = pool.get_by_name(&params.name)?; if let Some(vm) = &existing.vm { ibash_stdout!("virsh shutdown {vmname_prefix}{vm.name}")?; } else { return Err(Error::NoVMDefined(params.name)); } Ok(()) } fn shutdown_wait(&mut self, params: ShutdownWait) -> Result<(), Error> { let pool = self.get_pool()?; let existing = pool.get_by_name(&params.name)?; if let Some(vm) = &existing.vm { let vmname_prefix = self.get_vm_prefix(); ibash_stdout!("virsh shutdown {vmname_prefix}{vm.name}")?; while let Err(_) = ibash_stdout!( "virsh list --state-shutoff --name | grep -E '^{vmname_prefix}{vm.name}$'" ) { if let Err(_) = ibash_stdout!( "virsh list --name | grep -E '^{vmname_prefix}{vmname}$'", vmname = vm.name ) { // Volatile VMs disappear break; } std::thread::sleep(std::time::Duration::from_millis(1000)); } } else { return Err(Error::NoVMDefined(params.name)); } Ok(()) } fn rename(&mut self, params: Rename) -> Result<(), Error> { let pool = self.get_pool()?; // Check that the VM exists let existing = pool.get_by_name(&params.name)?; if let Some(_) = &existing.vm { // TODO: this can have a workaround return Err(Error::CurrentlyDefined); } let mut parts: Vec<_> = params.name.split(".").collect(); let _name = parts.pop().expect("name"); let existing_parent_name = parts.join("."); // Check that the destination does not exist if let Err(Error::NotFound(_)) = pool.get_by_name(&params.new_name) { let mut parts: Vec<_> = params.new_name.split(".").collect(); let name = parts.pop().expect("name"); let parent_name = parts.join("."); if parent_name != existing_parent_name { return Err(Error::RenameAcrossParentsUnsupported); } let new_base_name = { if parent_name == "" { PathBuf::from(format!("{name}.qcow2")) } else { let parent = pool.get_by_name(&parent_name)?; parent.snap.join(name) } }; let existing = &existing.image_path(); let tmp_image_path = self.config.tmp_path.join(existing); if tmp_image_path.exists() { let new_adv = self.config.tmp_path.join(&new_base_name); let image_path = self.config.tmp_path.join(existing); let new_link_path = self.config.pool_path.join(&new_base_name); let old_link_path = self.config.pool_path.join(existing); std::fs::rename(&image_path, &new_adv).map_err(|e| { Error::Context( format!("rename: {} -> {}", image_path.display(), new_adv.display()), Box::new(e.into()), ) })?; std::fs::remove_file(&old_link_path).map_err(|e| { Error::Context( format!("remove {}", old_link_path.display()), Box::new(e.into()), ) })?; std::os::unix::fs::symlink(&new_adv, &new_link_path).map_err(|e| { Error::Context( format!("symlink {} creation", new_adv.display()), Box::new(e.into()), ) })?; } else { let new_adv = self.config.pool_path.join(&new_base_name); let image_path = self.config.pool_path.join(existing); std::fs::rename(&image_path, &new_adv).map_err(|e| { Error::Context( format!("rename: {} -> {}", image_path.display(), new_adv.display()), Box::new(e.into()), ) })?; } } else { return Err(Error::AlreadyExists); } Ok(()) } fn console(&mut self, params: Console) -> Result<(), Error> { let pool = self.get_pool()?; let existing = pool.get_by_name(&params.name)?; if let Some(vm) = &existing.vm { let vmname_prefix = self.get_vm_prefix(); let vm = format!("{vmname_prefix}{}", vm.name); let mut v = Command::new("virsh").arg("console").arg(&vm).spawn()?; let _status = v.wait()?; } else { return Err(Error::NoVMDefined(params.name)); } Ok(()) } pub fn background_console(&self, fullname: &str) -> Result<Command, Error> { let pool = self.get_pool()?; let existing = pool.get_by_name(&fullname)?; if let Some(vm) = &existing.vm { let vmname_prefix = self.get_vm_prefix(); let vm = format!("{vmname_prefix}{}", vm.name); let mut command = Command::new("virsh"); command.arg("console").arg(&vm); return Ok(command); } else { return Err(Error::NoVMDefined(fullname.to_owned())); } } fn kill(&mut self, params: Kill) -> Result<(), Error> { let pool = self.get_pool()?; let check_match = &|s: &str| -> Result<bool, Error> { if params.regex { for name in params.names.iter() { let regex = Regex::new(name)?; if regex.is_match(s) { return Ok(true); } } } else { for name in params.names.iter() { if name == s { return Ok(true); } } } Ok(false) }; struct Closure<'a> { by_snapshot: &'a dyn Fn(&Closure, &Image, &Snapshot, String) -> Result<(), Error>, by_name: &'a dyn Fn(&Closure, &Image, String) -> Result<(), Error>, } let recursive = Closure { by_snapshot: &|closure, image, snapshot, name_path| { for (key, snapshot) in snapshot.sub.iter() { (closure.by_snapshot)( closure, image, &snapshot, format!("{}.{}", name_path, key))?; } let vm = if let Some(name) = &snapshot.vm_using { pool.vms.get(name) } else { None }; if !check_match(name_path.as_str())? { return Ok(()); } if params.dry_run { println!("{}", name_path); return Ok(()); } info!("About to remove VM and image files for {}", name_path); let image_path = &snapshot.rel_path; if let Some(vm) = &vm { if !params.force { return Err(Error::CurrentlyDefined); } info!("Stopping VM for {}{}", name_path, vm.attrs.get("State").map(|s| format!(", state: {s}")) .unwrap_or("".to_owned())); let vmname_prefix = self.get_vm_prefix(); match vm.attrs.get("State").as_ref().map(|x| x.as_str()) { Some("shut off") => { ibash_stdout!("virsh undefine {vmname_prefix}{vm.name}")?; } _ => { ibash_stdout!("virsh destroy {vmname_prefix}{vm.name}")?; } } } info!("Remove image files for {}", name_path); let image_path = self.config.pool_path.join(&image_path); std::fs::remove_file(&image_path)?; let tmp_image_path = self.config.tmp_path.join(&image_path); if tmp_image_path.exists() { std::fs::remove_file(&tmp_image_path)?; } Ok(()) }, by_name: &|closure, image, path| { (closure.by_snapshot)(closure, &image, &image.root, path) }, }; for (key, image) in pool.images.iter() { (recursive.by_name)(&recursive, &image, key.clone())?; } Ok(()) } fn update_ssh(&mut self, params: UpdateSshParams) -> Result<UpdateSshDisposition, Error> { let mut ssh_config = if let Some(ssh_config) = &self.config.ssh_config { ssh_config.clone() } else { return Ok(UpdateSshDisposition::NotConfigured); }; let pool = self.get_pool()?; ssh_config.config_file = adjust_path_by_env(ssh_config.config_file); let mut base = BTreeMap::new(); if ssh_config.config_file.exists() { lazy_static! { static ref HOST: Regex = Regex::new("^Host (.*)$").unwrap(); static ref HOSTNAME: Regex = Regex::new("^Hostname (.*)$").unwrap(); } let mut host: Option<String> = None; let file = std::fs::File::open(&ssh_config.config_file)?; for line in BufReader::new(file).lines() { let line = line?; if let Some(cap) = HOSTNAME.captures(&line) { let hostname = Some(cap.get(1).unwrap().as_str().to_owned()); if let (Some(host), Some(hostname)) = (&host, &hostname) { if pool.get_by_name(host).is_ok() { base.insert(host.clone(), hostname.clone()); } } } else if let Some(cap) = HOST.captures(&line) { host = Some(cap.get(1).unwrap().as_str().to_owned()); } } } let mut config = String::new(); let vmname_prefix = self.get_vm_prefix(); for line in ibash_stdout!("virsh list --name")?.lines() { let vmname = line.trim(); if vmname.is_empty() { continue; } let short_vmname = if vmname.starts_with(&vmname_prefix) { &vmname[vmname_prefix.len()..] } else { continue; }; let address = ibash_stdout!( r#"virsh domifaddr {vmname} | grep ipv4 \ | awk '{{print $4}}' | awk -F/ '{{print $1}}' | tail -n 1"# )?; let address = address.trim().to_owned(); if address.len() > 0 { base.insert(short_vmname.to_owned(), address.trim().to_owned()); } } for (host, address) in base.iter() { writeln!( &mut config, r#"Host {} User user Hostname {} IdentityFile {} "#, host, address.trim(), ssh_config.identity_file )?; } if ssh_config.config_file.exists() { let mut file = std::fs::File::open(&ssh_config.config_file)?; let mut contents = String::new(); file.read_to_string(&mut contents)?; if contents == config { if !params.quiet { // No need to rewrite info!("no update needed for {}", ssh_config.config_file.display()); } return Ok(UpdateSshDisposition::NotNeeded); } } let with_tmp = ssh_config .config_file .add_extension(format!("tmp.{}", std::process::id())); let file = OpenOptions::new() .write(true) .truncate(true) .create(true) .open(&with_tmp)?; let mut file = BufWriter::new(file); write!(&mut file, "{}", config)?; drop(file); use std::os::unix::fs::PermissionsExt; let metadata = with_tmp.metadata()?; let mut permissions = metadata.permissions(); permissions.set_mode(0o600); std::fs::set_permissions(&with_tmp, permissions)?; std::fs::rename(with_tmp, &ssh_config.config_file)?; if !params.quiet { info!( "updated {} with {} hosts", ssh_config.config_file.display(), base.len() ); } Ok(UpdateSshDisposition::Updated) } fn load_extra_domain_info( files_to_domains: &mut HashMap<PathBuf, String>, short_vmname: &str, vmname: &str, pool: &mut Pool) -> Result<(), Error> { lazy_static! { static ref SOURCE_FILE: Regex = Regex::new(r"^[\t ]+[^ ]+[\t ]+([^']+)$").unwrap(); static ref DOM_PROP: Regex = Regex::new(r"^([^:]+):[ \t]*([^ \t]+.*)$").unwrap(); } for line in ibash_stdout!("virsh domblklist {vmname}")?.lines() { if let Some(cap) = SOURCE_FILE.captures(&line) { let s = cap.get(1).unwrap().as_str(); files_to_domains.insert(PathBuf::from(s), short_vmname.to_owned()); } } let mut vm = VM { attrs: Default::default(), name: short_vmname.to_owned(), }; for line in ibash_stdout!("virsh dominfo {vmname}")?.lines() { if let Some(cap) = DOM_PROP.captures(&line) { let key = cap.get(1).unwrap().as_str(); let value = cap.get(2).unwrap().as_str(); vm.attrs.insert(key.to_owned(), value.to_owned()); } } pool.vms.insert(short_vmname.to_owned(), vm); Ok(()) } } pub fn command(command: CommandMode) -> Result<(), Error> { let opt = CommandArgs { config: None, command, }; match VMess::command(&opt) { Err(err) => return Err(err), Ok(mut vmess) => { vmess.run()?; } } Ok(()) }
31.304711
104
0.480666
ccec4bd1432695c81bc47569036d1e3536fbbfbb
25,054
// This implements the dead-code warning pass. It follows middle::reachable // closely. The idea is that all reachable symbols are live, codes called // from live codes are live, and everything else is dead. use crate::hir::Node; use crate::hir::{self, PatKind, TyKind}; use crate::hir::intravisit::{self, Visitor, NestedVisitorMap}; use crate::hir::itemlikevisit::ItemLikeVisitor; use crate::hir::def::Def; use crate::hir::CodegenFnAttrFlags; use crate::hir::def_id::{DefId, LOCAL_CRATE}; use crate::lint; use crate::middle::privacy; use crate::ty::{self, DefIdTree, TyCtxt}; use crate::util::nodemap::FxHashSet; use rustc_data_structures::fx::FxHashMap; use syntax::{ast, source_map}; use syntax::attr; use syntax_pos; // Any local node that may call something in its body block should be // explored. For example, if it's a live Node::Item that is a // function, then we should explore its block to check for codes that // may need to be marked as live. fn should_explore<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, hir_id: hir::HirId) -> bool { match tcx.hir().find_by_hir_id(hir_id) { Some(Node::Item(..)) | Some(Node::ImplItem(..)) | Some(Node::ForeignItem(..)) | Some(Node::TraitItem(..)) => true, _ => false } } struct MarkSymbolVisitor<'a, 'tcx: 'a> { worklist: Vec<hir::HirId>, tcx: TyCtxt<'a, 'tcx, 'tcx>, tables: &'a ty::TypeckTables<'tcx>, live_symbols: FxHashSet<hir::HirId>, repr_has_repr_c: bool, in_pat: bool, inherited_pub_visibility: bool, ignore_variant_stack: Vec<DefId>, // maps from tuple struct constructors to tuple struct items struct_constructors: FxHashMap<hir::HirId, hir::HirId>, } impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> { fn check_def_id(&mut self, def_id: DefId) { if let Some(hir_id) = self.tcx.hir().as_local_hir_id(def_id) { if should_explore(self.tcx, hir_id) || self.struct_constructors.contains_key(&hir_id) { self.worklist.push(hir_id); } self.live_symbols.insert(hir_id); } } fn insert_def_id(&mut self, def_id: DefId) { if let Some(hir_id) = self.tcx.hir().as_local_hir_id(def_id) { debug_assert!(!should_explore(self.tcx, hir_id)); self.live_symbols.insert(hir_id); } } fn handle_definition(&mut self, def: Def) { match def { Def::Const(_) | Def::AssociatedConst(..) | Def::TyAlias(_) => { self.check_def_id(def.def_id()); } _ if self.in_pat => (), Def::PrimTy(..) | Def::SelfTy(..) | Def::SelfCtor(..) | Def::Local(..) | Def::Upvar(..) => {} Def::Variant(variant_id) | Def::VariantCtor(variant_id, ..) => { if let Some(enum_id) = self.tcx.parent(variant_id) { self.check_def_id(enum_id); } if !self.ignore_variant_stack.contains(&variant_id) { self.check_def_id(variant_id); } } _ => { self.check_def_id(def.def_id()); } } } fn lookup_and_handle_method(&mut self, id: hir::HirId) { if let Some(def) = self.tables.type_dependent_defs().get(id) { self.check_def_id(def.def_id()); } else { bug!("no type-dependent def for method"); } } fn handle_field_access(&mut self, lhs: &hir::Expr, hir_id: hir::HirId) { match self.tables.expr_ty_adjusted(lhs).sty { ty::Adt(def, _) => { let index = self.tcx.field_index(hir_id, self.tables); self.insert_def_id(def.non_enum_variant().fields[index].did); } ty::Tuple(..) => {} _ => span_bug!(lhs.span, "named field access on non-ADT"), } } fn handle_field_pattern_match(&mut self, lhs: &hir::Pat, def: Def, pats: &[source_map::Spanned<hir::FieldPat>]) { let variant = match self.tables.node_type(lhs.hir_id).sty { ty::Adt(adt, _) => adt.variant_of_def(def), _ => span_bug!(lhs.span, "non-ADT in struct pattern") }; for pat in pats { if let PatKind::Wild = pat.node.pat.node { continue; } let index = self.tcx.field_index(pat.node.hir_id, self.tables); self.insert_def_id(variant.fields[index].did); } } fn mark_live_symbols(&mut self) { let mut scanned = FxHashSet::default(); while let Some(id) = self.worklist.pop() { if !scanned.insert(id) { continue } // in the case of tuple struct constructors we want to check the item, not the generated // tuple struct constructor function let id = self.struct_constructors.get(&id).cloned().unwrap_or(id); if let Some(node) = self.tcx.hir().find_by_hir_id(id) { self.live_symbols.insert(id); self.visit_node(node); } } } fn visit_node(&mut self, node: Node<'tcx>) { let had_repr_c = self.repr_has_repr_c; self.repr_has_repr_c = false; let had_inherited_pub_visibility = self.inherited_pub_visibility; self.inherited_pub_visibility = false; match node { Node::Item(item) => { match item.node { hir::ItemKind::Struct(..) | hir::ItemKind::Union(..) => { let def_id = self.tcx.hir().local_def_id_from_hir_id(item.hir_id); let def = self.tcx.adt_def(def_id); self.repr_has_repr_c = def.repr.c(); intravisit::walk_item(self, &item); } hir::ItemKind::Enum(..) => { self.inherited_pub_visibility = item.vis.node.is_pub(); intravisit::walk_item(self, &item); } hir::ItemKind::Fn(..) | hir::ItemKind::Ty(..) | hir::ItemKind::Static(..) | hir::ItemKind::Existential(..) | hir::ItemKind::Const(..) => { intravisit::walk_item(self, &item); } _ => () } } Node::TraitItem(trait_item) => { intravisit::walk_trait_item(self, trait_item); } Node::ImplItem(impl_item) => { intravisit::walk_impl_item(self, impl_item); } Node::ForeignItem(foreign_item) => { intravisit::walk_foreign_item(self, &foreign_item); } _ => () } self.repr_has_repr_c = had_repr_c; self.inherited_pub_visibility = had_inherited_pub_visibility; } fn mark_as_used_if_union(&mut self, adt: &ty::AdtDef, fields: &hir::HirVec<hir::Field>) { if adt.is_union() && adt.non_enum_variant().fields.len() > 1 && adt.did.is_local() { for field in fields { let index = self.tcx.field_index(field.hir_id, self.tables); self.insert_def_id(adt.non_enum_variant().fields[index].did); } } } } impl<'a, 'tcx> Visitor<'tcx> for MarkSymbolVisitor<'a, 'tcx> { fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { NestedVisitorMap::None } fn visit_nested_body(&mut self, body: hir::BodyId) { let old_tables = self.tables; self.tables = self.tcx.body_tables(body); let body = self.tcx.hir().body(body); self.visit_body(body); self.tables = old_tables; } fn visit_variant_data(&mut self, def: &'tcx hir::VariantData, _: ast::Name, _: &hir::Generics, _: hir::HirId, _: syntax_pos::Span) { let has_repr_c = self.repr_has_repr_c; let inherited_pub_visibility = self.inherited_pub_visibility; let live_fields = def.fields().iter().filter(|f| { has_repr_c || inherited_pub_visibility || f.vis.node.is_pub() }); self.live_symbols.extend(live_fields.map(|f| f.hir_id)); intravisit::walk_struct_def(self, def); } fn visit_expr(&mut self, expr: &'tcx hir::Expr) { match expr.node { hir::ExprKind::Path(ref qpath @ hir::QPath::TypeRelative(..)) => { let def = self.tables.qpath_def(qpath, expr.hir_id); self.handle_definition(def); } hir::ExprKind::MethodCall(..) => { self.lookup_and_handle_method(expr.hir_id); } hir::ExprKind::Field(ref lhs, ..) => { self.handle_field_access(&lhs, expr.hir_id); } hir::ExprKind::Struct(_, ref fields, _) => { if let ty::Adt(ref adt, _) = self.tables.expr_ty(expr).sty { self.mark_as_used_if_union(adt, fields); } } _ => () } intravisit::walk_expr(self, expr); } fn visit_arm(&mut self, arm: &'tcx hir::Arm) { if arm.pats.len() == 1 { let variants = arm.pats[0].necessary_variants(); // Inside the body, ignore constructions of variants // necessary for the pattern to match. Those construction sites // can't be reached unless the variant is constructed elsewhere. let len = self.ignore_variant_stack.len(); self.ignore_variant_stack.extend_from_slice(&variants); intravisit::walk_arm(self, arm); self.ignore_variant_stack.truncate(len); } else { intravisit::walk_arm(self, arm); } } fn visit_pat(&mut self, pat: &'tcx hir::Pat) { match pat.node { PatKind::Struct(hir::QPath::Resolved(_, ref path), ref fields, _) => { self.handle_field_pattern_match(pat, path.def, fields); } PatKind::Path(ref qpath @ hir::QPath::TypeRelative(..)) => { let def = self.tables.qpath_def(qpath, pat.hir_id); self.handle_definition(def); } _ => () } self.in_pat = true; intravisit::walk_pat(self, pat); self.in_pat = false; } fn visit_path(&mut self, path: &'tcx hir::Path, _: hir::HirId) { self.handle_definition(path.def); intravisit::walk_path(self, path); } fn visit_ty(&mut self, ty: &'tcx hir::Ty) { match ty.node { TyKind::Def(item_id, _) => { let item = self.tcx.hir().expect_item(item_id.id); intravisit::walk_item(self, item); } _ => () } intravisit::walk_ty(self, ty); } } fn has_allow_dead_code_or_lang_attr(tcx: TyCtxt<'_, '_, '_>, id: hir::HirId, attrs: &[ast::Attribute]) -> bool { if attr::contains_name(attrs, "lang") { return true; } // Stable attribute for #[lang = "panic_impl"] if attr::contains_name(attrs, "panic_handler") { return true; } // (To be) stable attribute for #[lang = "oom"] if attr::contains_name(attrs, "alloc_error_handler") { return true; } // Don't lint about global allocators if attr::contains_name(attrs, "global_allocator") { return true; } let def_id = tcx.hir().local_def_id_from_hir_id(id); let cg_attrs = tcx.codegen_fn_attrs(def_id); // #[used], #[no_mangle], #[export_name], etc also keeps the item alive // forcefully, e.g., for placing it in a specific section. if cg_attrs.contains_extern_indicator() || cg_attrs.flags.contains(CodegenFnAttrFlags::USED) { return true; } tcx.lint_level_at_node(lint::builtin::DEAD_CODE, id).0 == lint::Allow } // This visitor seeds items that // 1) We want to explicitly consider as live: // * Item annotated with #[allow(dead_code)] // - This is done so that if we want to suppress warnings for a // group of dead functions, we only have to annotate the "root". // For example, if both `f` and `g` are dead and `f` calls `g`, // then annotating `f` with `#[allow(dead_code)]` will suppress // warning for both `f` and `g`. // * Item annotated with #[lang=".."] // - This is because lang items are always callable from elsewhere. // or // 2) We are not sure to be live or not // * Implementation of a trait method struct LifeSeeder<'k, 'tcx: 'k> { worklist: Vec<hir::HirId>, krate: &'k hir::Crate, tcx: TyCtxt<'k, 'tcx, 'tcx>, // see `MarkSymbolVisitor::struct_constructors` struct_constructors: FxHashMap<hir::HirId, hir::HirId>, } impl<'v, 'k, 'tcx> ItemLikeVisitor<'v> for LifeSeeder<'k, 'tcx> { fn visit_item(&mut self, item: &hir::Item) { let allow_dead_code = has_allow_dead_code_or_lang_attr(self.tcx, item.hir_id, &item.attrs); if allow_dead_code { self.worklist.push(item.hir_id); } match item.node { hir::ItemKind::Enum(ref enum_def, _) if allow_dead_code => { self.worklist.extend(enum_def.variants.iter() .map(|variant| variant.node.data.hir_id())); } hir::ItemKind::Trait(.., ref trait_item_refs) => { for trait_item_ref in trait_item_refs { let trait_item = self.krate.trait_item(trait_item_ref.id); match trait_item.node { hir::TraitItemKind::Const(_, Some(_)) | hir::TraitItemKind::Method(_, hir::TraitMethod::Provided(_)) => { if has_allow_dead_code_or_lang_attr(self.tcx, trait_item.hir_id, &trait_item.attrs) { self.worklist.push(trait_item.hir_id); } } _ => {} } } } hir::ItemKind::Impl(.., ref opt_trait, _, ref impl_item_refs) => { for impl_item_ref in impl_item_refs { let impl_item = self.krate.impl_item(impl_item_ref.id); if opt_trait.is_some() || has_allow_dead_code_or_lang_attr(self.tcx, impl_item.hir_id, &impl_item.attrs) { self.worklist.push(impl_item_ref.id.hir_id); } } } hir::ItemKind::Struct(ref variant_data, _) => { self.struct_constructors.insert(variant_data.hir_id(), item.hir_id); } _ => () } } fn visit_trait_item(&mut self, _item: &hir::TraitItem) { // ignore: we are handling this in `visit_item` above } fn visit_impl_item(&mut self, _item: &hir::ImplItem) { // ignore: we are handling this in `visit_item` above } } fn create_and_seed_worklist<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, access_levels: &privacy::AccessLevels, krate: &hir::Crate, ) -> (Vec<hir::HirId>, FxHashMap<hir::HirId, hir::HirId>) { let worklist = access_levels.map.iter().filter_map(|(&id, level)| { if level >= &privacy::AccessLevel::Reachable { Some(id) } else { None } }).chain( // Seed entry point tcx.entry_fn(LOCAL_CRATE).map(|(def_id, _)| tcx.hir().as_local_hir_id(def_id).unwrap()) ).collect::<Vec<_>>(); // Seed implemented trait items let mut life_seeder = LifeSeeder { worklist, krate, tcx, struct_constructors: Default::default(), }; krate.visit_all_item_likes(&mut life_seeder); (life_seeder.worklist, life_seeder.struct_constructors) } fn find_live<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, access_levels: &privacy::AccessLevels, krate: &hir::Crate) -> FxHashSet<hir::HirId> { let (worklist, struct_constructors) = create_and_seed_worklist(tcx, access_levels, krate); let mut symbol_visitor = MarkSymbolVisitor { worklist, tcx, tables: &ty::TypeckTables::empty(None), live_symbols: Default::default(), repr_has_repr_c: false, in_pat: false, inherited_pub_visibility: false, ignore_variant_stack: vec![], struct_constructors, }; symbol_visitor.mark_live_symbols(); symbol_visitor.live_symbols } struct DeadVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, live_symbols: FxHashSet<hir::HirId>, } impl<'a, 'tcx> DeadVisitor<'a, 'tcx> { fn should_warn_about_item(&mut self, item: &hir::Item) -> bool { let should_warn = match item.node { hir::ItemKind::Static(..) | hir::ItemKind::Const(..) | hir::ItemKind::Fn(..) | hir::ItemKind::Ty(..) | hir::ItemKind::Enum(..) | hir::ItemKind::Struct(..) | hir::ItemKind::Union(..) => true, _ => false }; should_warn && !self.symbol_is_live(item.hir_id) } fn should_warn_about_field(&mut self, field: &hir::StructField) -> bool { let field_type = self.tcx.type_of(self.tcx.hir().local_def_id_from_hir_id(field.hir_id)); !field.is_positional() && !self.symbol_is_live(field.hir_id) && !field_type.is_phantom_data() && !has_allow_dead_code_or_lang_attr(self.tcx, field.hir_id, &field.attrs) } fn should_warn_about_variant(&mut self, variant: &hir::VariantKind) -> bool { !self.symbol_is_live(variant.data.hir_id()) && !has_allow_dead_code_or_lang_attr(self.tcx, variant.data.hir_id(), &variant.attrs) } fn should_warn_about_foreign_item(&mut self, fi: &hir::ForeignItem) -> bool { !self.symbol_is_live(fi.hir_id) && !has_allow_dead_code_or_lang_attr(self.tcx, fi.hir_id, &fi.attrs) } // id := HIR id of an item's definition. fn symbol_is_live( &mut self, id: hir::HirId, ) -> bool { if self.live_symbols.contains(&id) { return true; } // If it's a type whose items are live, then it's live, too. // This is done to handle the case where, for example, the static // method of a private type is used, but the type itself is never // called directly. let def_id = self.tcx.hir().local_def_id_from_hir_id(id); let inherent_impls = self.tcx.inherent_impls(def_id); for &impl_did in inherent_impls.iter() { for &item_did in &self.tcx.associated_item_def_ids(impl_did)[..] { if let Some(item_hir_id) = self.tcx.hir().as_local_hir_id(item_did) { if self.live_symbols.contains(&item_hir_id) { return true; } } } } false } fn warn_dead_code(&mut self, id: hir::HirId, span: syntax_pos::Span, name: ast::Name, node_type: &str, participle: &str) { if !name.as_str().starts_with("_") { self.tcx .lint_hir(lint::builtin::DEAD_CODE, id, span, &format!("{} is never {}: `{}`", node_type, participle, name)); } } } impl<'a, 'tcx> Visitor<'tcx> for DeadVisitor<'a, 'tcx> { /// Walk nested items in place so that we don't report dead-code /// on inner functions when the outer function is already getting /// an error. We could do this also by checking the parents, but /// this is how the code is setup and it seems harmless enough. fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { NestedVisitorMap::All(&self.tcx.hir()) } fn visit_item(&mut self, item: &'tcx hir::Item) { if self.should_warn_about_item(item) { // For items that have a definition with a signature followed by a // block, point only at the signature. let span = match item.node { hir::ItemKind::Fn(..) | hir::ItemKind::Mod(..) | hir::ItemKind::Enum(..) | hir::ItemKind::Struct(..) | hir::ItemKind::Union(..) | hir::ItemKind::Trait(..) | hir::ItemKind::Impl(..) => self.tcx.sess.source_map().def_span(item.span), _ => item.span, }; let participle = match item.node { hir::ItemKind::Struct(..) => "constructed", // Issue #52325 _ => "used" }; self.warn_dead_code( item.hir_id, span, item.ident.name, item.node.descriptive_variant(), participle, ); } else { // Only continue if we didn't warn intravisit::walk_item(self, item); } } fn visit_variant(&mut self, variant: &'tcx hir::Variant, g: &'tcx hir::Generics, id: hir::HirId) { if self.should_warn_about_variant(&variant.node) { self.warn_dead_code(variant.node.data.hir_id(), variant.span, variant.node.ident.name, "variant", "constructed"); } else { intravisit::walk_variant(self, variant, g, id); } } fn visit_foreign_item(&mut self, fi: &'tcx hir::ForeignItem) { if self.should_warn_about_foreign_item(fi) { self.warn_dead_code(fi.hir_id, fi.span, fi.ident.name, fi.node.descriptive_variant(), "used"); } intravisit::walk_foreign_item(self, fi); } fn visit_struct_field(&mut self, field: &'tcx hir::StructField) { if self.should_warn_about_field(&field) { self.warn_dead_code(field.hir_id, field.span, field.ident.name, "field", "used"); } intravisit::walk_struct_field(self, field); } fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem) { match impl_item.node { hir::ImplItemKind::Const(_, body_id) => { if !self.symbol_is_live(impl_item.hir_id) { self.warn_dead_code(impl_item.hir_id, impl_item.span, impl_item.ident.name, "associated const", "used"); } self.visit_nested_body(body_id) } hir::ImplItemKind::Method(_, body_id) => { if !self.symbol_is_live(impl_item.hir_id) { let span = self.tcx.sess.source_map().def_span(impl_item.span); self.warn_dead_code(impl_item.hir_id, span, impl_item.ident.name, "method", "used"); } self.visit_nested_body(body_id) } hir::ImplItemKind::Existential(..) | hir::ImplItemKind::Type(..) => {} } } // Overwrite so that we don't warn the trait item itself. fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem) { match trait_item.node { hir::TraitItemKind::Const(_, Some(body_id)) | hir::TraitItemKind::Method(_, hir::TraitMethod::Provided(body_id)) => { self.visit_nested_body(body_id) } hir::TraitItemKind::Const(_, None) | hir::TraitItemKind::Method(_, hir::TraitMethod::Required(_)) | hir::TraitItemKind::Type(..) => {} } } } pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { let access_levels = &tcx.privacy_access_levels(LOCAL_CRATE); let krate = tcx.hir().krate(); let live_symbols = find_live(tcx, access_levels, krate); let mut visitor = DeadVisitor { tcx, live_symbols, }; intravisit::walk_crate(&mut visitor, krate); }
38.133942
100
0.532849
bb3fa09960e0369fd1150bad3038abfc9ff2a6eb
27,979
use crate::arena::{Arena, Handle}; use thiserror::Error; #[derive(Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize))] #[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] pub enum TypeResolution { Handle(Handle<crate::Type>), Value(crate::TypeInner), } impl TypeResolution { pub fn handle(&self) -> Option<Handle<crate::Type>> { match *self { Self::Handle(handle) => Some(handle), Self::Value(_) => None, } } pub fn inner_with<'a>(&'a self, arena: &'a Arena<crate::Type>) -> &'a crate::TypeInner { match *self { Self::Handle(handle) => &arena[handle].inner, Self::Value(ref inner) => inner, } } } // Clone is only implemented for numeric variants of `TypeInner`. impl Clone for TypeResolution { fn clone(&self) -> Self { use crate::TypeInner as Ti; match *self { Self::Handle(handle) => Self::Handle(handle), Self::Value(ref v) => Self::Value(match *v { Ti::Scalar { kind, width } => Ti::Scalar { kind, width }, Ti::Vector { size, kind, width } => Ti::Vector { size, kind, width }, Ti::Matrix { rows, columns, width, } => Ti::Matrix { rows, columns, width, }, Ti::Pointer { base, class } => Ti::Pointer { base, class }, Ti::ValuePointer { size, kind, width, class, } => Ti::ValuePointer { size, kind, width, class, }, _ => unreachable!("Unexpected clone type: {:?}", v), }), } } } impl crate::ConstantInner { pub fn resolve_type(&self) -> TypeResolution { match *self { Self::Scalar { width, ref value } => TypeResolution::Value(crate::TypeInner::Scalar { kind: value.scalar_kind(), width, }), Self::Composite { ty, components: _ } => TypeResolution::Handle(ty), } } } #[derive(Clone, Debug, Error, PartialEq)] pub enum ResolveError { #[error("Index {index} is out of bounds for expression {expr:?}")] OutOfBoundsIndex { expr: Handle<crate::Expression>, index: u32, }, #[error("Invalid access into expression {expr:?}, indexed: {indexed}")] InvalidAccess { expr: Handle<crate::Expression>, indexed: bool, }, #[error("Invalid sub-access into type {ty:?}, indexed: {indexed}")] InvalidSubAccess { ty: Handle<crate::Type>, indexed: bool, }, #[error("Invalid scalar {0:?}")] InvalidScalar(Handle<crate::Expression>), #[error("Invalid vector {0:?}")] InvalidVector(Handle<crate::Expression>), #[error("Invalid pointer {0:?}")] InvalidPointer(Handle<crate::Expression>), #[error("Invalid image {0:?}")] InvalidImage(Handle<crate::Expression>), #[error("Function {name} not defined")] FunctionNotDefined { name: String }, #[error("Function without return type")] FunctionReturnsVoid, #[error("Type is not found in the given immutable arena")] TypeNotFound, #[error("Incompatible operands: {0}")] IncompatibleOperands(String), } pub struct ResolveContext<'a> { pub constants: &'a Arena<crate::Constant>, pub types: &'a Arena<crate::Type>, pub global_vars: &'a Arena<crate::GlobalVariable>, pub local_vars: &'a Arena<crate::LocalVariable>, pub functions: &'a Arena<crate::Function>, pub arguments: &'a [crate::FunctionArgument], } impl<'a> ResolveContext<'a> { pub fn resolve( &self, expr: &crate::Expression, past: impl Fn(Handle<crate::Expression>) -> &'a TypeResolution, ) -> Result<TypeResolution, ResolveError> { use crate::TypeInner as Ti; let types = self.types; Ok(match *expr { crate::Expression::Access { base, .. } => match *past(base).inner_with(types) { // Arrays and matrices can only be indexed dynamically behind a // pointer, but that's a validation error, not a type error, so // go ahead provide a type here. Ti::Array { base, .. } => TypeResolution::Handle(base), Ti::Matrix { rows, width, .. } => TypeResolution::Value(Ti::Vector { size: rows, kind: crate::ScalarKind::Float, width, }), Ti::Vector { size: _, kind, width, } => TypeResolution::Value(Ti::Scalar { kind, width }), Ti::ValuePointer { size: Some(_), kind, width, class, } => TypeResolution::Value(Ti::ValuePointer { size: None, kind, width, class, }), Ti::Pointer { base, class } => { TypeResolution::Value(match types[base].inner { Ti::Array { base, .. } => Ti::Pointer { base, class }, Ti::Vector { size: _, kind, width, } => Ti::ValuePointer { size: None, kind, width, class, }, // Matrices are only dynamically indexed behind a pointer Ti::Matrix { columns: _, rows, width, } => Ti::ValuePointer { kind: crate::ScalarKind::Float, size: Some(rows), width, class, }, ref other => { log::error!("Access sub-type {:?}", other); return Err(ResolveError::InvalidSubAccess { ty: base, indexed: false, }); } }) } ref other => { log::error!("Access type {:?}", other); return Err(ResolveError::InvalidAccess { expr: base, indexed: false, }); } }, crate::Expression::AccessIndex { base, index } => match *past(base).inner_with(types) { Ti::Vector { size, kind, width } => { if index >= size as u32 { return Err(ResolveError::OutOfBoundsIndex { expr: base, index }); } TypeResolution::Value(Ti::Scalar { kind, width }) } Ti::Matrix { columns, rows, width, } => { if index >= columns as u32 { return Err(ResolveError::OutOfBoundsIndex { expr: base, index }); } TypeResolution::Value(crate::TypeInner::Vector { size: rows, kind: crate::ScalarKind::Float, width, }) } Ti::Array { base, .. } => TypeResolution::Handle(base), Ti::Struct { ref members, .. } => { let member = members .get(index as usize) .ok_or(ResolveError::OutOfBoundsIndex { expr: base, index })?; TypeResolution::Handle(member.ty) } Ti::ValuePointer { size: Some(size), kind, width, class, } => { if index >= size as u32 { return Err(ResolveError::OutOfBoundsIndex { expr: base, index }); } TypeResolution::Value(Ti::ValuePointer { size: None, kind, width, class, }) } Ti::Pointer { base: ty_base, class, } => TypeResolution::Value(match types[ty_base].inner { Ti::Array { base, .. } => Ti::Pointer { base, class }, Ti::Vector { size, kind, width } => { if index >= size as u32 { return Err(ResolveError::OutOfBoundsIndex { expr: base, index }); } Ti::ValuePointer { size: None, kind, width, class, } } Ti::Matrix { rows, columns, width, } => { if index >= columns as u32 { return Err(ResolveError::OutOfBoundsIndex { expr: base, index }); } Ti::ValuePointer { size: Some(rows), kind: crate::ScalarKind::Float, width, class, } } Ti::Struct { ref members, .. } => { let member = members .get(index as usize) .ok_or(ResolveError::OutOfBoundsIndex { expr: base, index })?; Ti::Pointer { base: member.ty, class, } } ref other => { log::error!("Access index sub-type {:?}", other); return Err(ResolveError::InvalidSubAccess { ty: ty_base, indexed: true, }); } }), ref other => { log::error!("Access index type {:?}", other); return Err(ResolveError::InvalidAccess { expr: base, indexed: true, }); } }, crate::Expression::Constant(h) => match self.constants[h].inner { crate::ConstantInner::Scalar { width, ref value } => { TypeResolution::Value(Ti::Scalar { kind: value.scalar_kind(), width, }) } crate::ConstantInner::Composite { ty, components: _ } => TypeResolution::Handle(ty), }, crate::Expression::Splat { size, value } => match *past(value).inner_with(types) { Ti::Scalar { kind, width } => { TypeResolution::Value(Ti::Vector { size, kind, width }) } ref other => { log::error!("Scalar type {:?}", other); return Err(ResolveError::InvalidScalar(value)); } }, crate::Expression::Swizzle { size, vector, pattern: _, } => match *past(vector).inner_with(types) { Ti::Vector { size: _, kind, width, } => TypeResolution::Value(Ti::Vector { size, kind, width }), ref other => { log::error!("Vector type {:?}", other); return Err(ResolveError::InvalidVector(vector)); } }, crate::Expression::Compose { ty, .. } => TypeResolution::Handle(ty), crate::Expression::FunctionArgument(index) => { TypeResolution::Handle(self.arguments[index as usize].ty) } crate::Expression::GlobalVariable(h) => { let var = &self.global_vars[h]; if var.class == crate::StorageClass::Handle { TypeResolution::Handle(var.ty) } else { TypeResolution::Value(Ti::Pointer { base: var.ty, class: var.class, }) } } crate::Expression::LocalVariable(h) => { let var = &self.local_vars[h]; TypeResolution::Value(Ti::Pointer { base: var.ty, class: crate::StorageClass::Function, }) } crate::Expression::Load { pointer } => match *past(pointer).inner_with(types) { Ti::Pointer { base, class: _ } => TypeResolution::Handle(base), Ti::ValuePointer { size, kind, width, class: _, } => TypeResolution::Value(match size { Some(size) => Ti::Vector { size, kind, width }, None => Ti::Scalar { kind, width }, }), ref other => { log::error!("Pointer type {:?}", other); return Err(ResolveError::InvalidPointer(pointer)); } }, crate::Expression::ImageSample { image, .. } | crate::Expression::ImageLoad { image, .. } => match *past(image).inner_with(types) { Ti::Image { class, .. } => TypeResolution::Value(match class { crate::ImageClass::Depth { multi: _ } => Ti::Scalar { kind: crate::ScalarKind::Float, width: 4, }, crate::ImageClass::Sampled { kind, multi: _ } => Ti::Vector { kind, width: 4, size: crate::VectorSize::Quad, }, crate::ImageClass::Storage(format) => Ti::Vector { kind: format.into(), width: 4, size: crate::VectorSize::Quad, }, }), ref other => { log::error!("Image type {:?}", other); return Err(ResolveError::InvalidImage(image)); } }, crate::Expression::ImageQuery { image, query } => TypeResolution::Value(match query { crate::ImageQuery::Size { level: _ } => match *past(image).inner_with(types) { Ti::Image { dim, .. } => match dim { crate::ImageDimension::D1 => Ti::Scalar { kind: crate::ScalarKind::Sint, width: 4, }, crate::ImageDimension::D2 | crate::ImageDimension::Cube => Ti::Vector { size: crate::VectorSize::Bi, kind: crate::ScalarKind::Sint, width: 4, }, crate::ImageDimension::D3 => Ti::Vector { size: crate::VectorSize::Tri, kind: crate::ScalarKind::Sint, width: 4, }, }, ref other => { log::error!("Image type {:?}", other); return Err(ResolveError::InvalidImage(image)); } }, crate::ImageQuery::NumLevels | crate::ImageQuery::NumLayers | crate::ImageQuery::NumSamples => Ti::Scalar { kind: crate::ScalarKind::Sint, width: 4, }, }), crate::Expression::Unary { expr, .. } => past(expr).clone(), crate::Expression::Binary { op, left, right } => match op { crate::BinaryOperator::Add | crate::BinaryOperator::Subtract | crate::BinaryOperator::Divide | crate::BinaryOperator::Modulo => past(left).clone(), crate::BinaryOperator::Multiply => { let (res_left, res_right) = (past(left), past(right)); match (res_left.inner_with(types), res_right.inner_with(types)) { ( &Ti::Matrix { columns: _, rows, width, }, &Ti::Matrix { columns, .. }, ) => TypeResolution::Value(Ti::Matrix { columns, rows, width, }), ( &Ti::Matrix { columns: _, rows, width, }, &Ti::Vector { .. }, ) => TypeResolution::Value(Ti::Vector { size: rows, kind: crate::ScalarKind::Float, width, }), ( &Ti::Vector { .. }, &Ti::Matrix { columns, rows: _, width, }, ) => TypeResolution::Value(Ti::Vector { size: columns, kind: crate::ScalarKind::Float, width, }), (&Ti::Scalar { .. }, _) => res_right.clone(), (_, &Ti::Scalar { .. }) => res_left.clone(), (&Ti::Vector { .. }, &Ti::Vector { .. }) => res_left.clone(), (tl, tr) => { return Err(ResolveError::IncompatibleOperands(format!( "{:?} * {:?}", tl, tr ))) } } } crate::BinaryOperator::Equal | crate::BinaryOperator::NotEqual | crate::BinaryOperator::Less | crate::BinaryOperator::LessEqual | crate::BinaryOperator::Greater | crate::BinaryOperator::GreaterEqual | crate::BinaryOperator::LogicalAnd | crate::BinaryOperator::LogicalOr => { let kind = crate::ScalarKind::Bool; let width = crate::BOOL_WIDTH; let inner = match *past(left).inner_with(types) { Ti::Scalar { .. } => Ti::Scalar { kind, width }, Ti::Vector { size, .. } => Ti::Vector { size, kind, width }, ref other => { return Err(ResolveError::IncompatibleOperands(format!( "{:?}({:?}, _)", op, other ))) } }; TypeResolution::Value(inner) } crate::BinaryOperator::And | crate::BinaryOperator::ExclusiveOr | crate::BinaryOperator::InclusiveOr | crate::BinaryOperator::ShiftLeft | crate::BinaryOperator::ShiftRight => past(left).clone(), }, crate::Expression::Select { accept, .. } => past(accept).clone(), crate::Expression::Derivative { axis: _, expr } => past(expr).clone(), crate::Expression::Relational { .. } => TypeResolution::Value(Ti::Scalar { kind: crate::ScalarKind::Bool, width: crate::BOOL_WIDTH, }), crate::Expression::Math { fun, arg, arg1, arg2: _, } => { use crate::MathFunction as Mf; let res_arg = past(arg); match fun { // comparison Mf::Abs | Mf::Min | Mf::Max | Mf::Clamp | // trigonometry Mf::Cos | Mf::Cosh | Mf::Sin | Mf::Sinh | Mf::Tan | Mf::Tanh | Mf::Acos | Mf::Asin | Mf::Atan | Mf::Atan2 | // decomposition Mf::Ceil | Mf::Floor | Mf::Round | Mf::Fract | Mf::Trunc | Mf::Modf | Mf::Frexp | Mf::Ldexp | // exponent Mf::Exp | Mf::Exp2 | Mf::Log | Mf::Log2 | Mf::Pow => res_arg.clone(), // geometry Mf::Dot => match *res_arg.inner_with(types) { Ti::Vector { kind, size: _, width, } => TypeResolution::Value(Ti::Scalar { kind, width }), ref other => return Err(ResolveError::IncompatibleOperands( format!("{:?}({:?}, _)", fun, other) )), }, Mf::Outer => { let arg1 = arg1.ok_or_else(|| ResolveError::IncompatibleOperands( format!("{:?}(_, None)", fun) ))?; match (res_arg.inner_with(types), past(arg1).inner_with(types)) { (&Ti::Vector {kind: _, size: columns,width}, &Ti::Vector{ size: rows, .. }) => TypeResolution::Value(Ti::Matrix { columns, rows, width }), (left, right) => return Err(ResolveError::IncompatibleOperands( format!("{:?}({:?}, {:?})", fun, left, right) )), } }, Mf::Cross => res_arg.clone(), Mf::Distance | Mf::Length => match *res_arg.inner_with(types) { Ti::Scalar {width,kind} | Ti::Vector {width,kind,size:_} => TypeResolution::Value(Ti::Scalar { kind, width }), ref other => return Err(ResolveError::IncompatibleOperands( format!("{:?}({:?})", fun, other) )), }, Mf::Normalize | Mf::FaceForward | Mf::Reflect | Mf::Refract => res_arg.clone(), // computational Mf::Sign | Mf::Fma | Mf::Mix | Mf::Step | Mf::SmoothStep | Mf::Sqrt | Mf::InverseSqrt => res_arg.clone(), Mf::Transpose => match *res_arg.inner_with(types) { Ti::Matrix { columns, rows, width, } => TypeResolution::Value(Ti::Matrix { columns: rows, rows: columns, width, }), ref other => return Err(ResolveError::IncompatibleOperands( format!("{:?}({:?})", fun, other) )), }, Mf::Inverse => match *res_arg.inner_with(types) { Ti::Matrix { columns, rows, width, } if columns == rows => TypeResolution::Value(Ti::Matrix { columns, rows, width, }), ref other => return Err(ResolveError::IncompatibleOperands( format!("{:?}({:?})", fun, other) )), }, Mf::Determinant => match *res_arg.inner_with(types) { Ti::Matrix { width, .. } => TypeResolution::Value(Ti::Scalar { kind: crate::ScalarKind::Float, width }), ref other => return Err(ResolveError::IncompatibleOperands( format!("{:?}({:?})", fun, other) )), }, // bits Mf::CountOneBits | Mf::ReverseBits => res_arg.clone(), } } crate::Expression::As { expr, kind, convert, } => match *past(expr).inner_with(types) { Ti::Scalar { kind: _, width } => TypeResolution::Value(Ti::Scalar { kind, width: convert.unwrap_or(width), }), Ti::Vector { kind: _, size, width, } => TypeResolution::Value(Ti::Vector { kind, size, width: convert.unwrap_or(width), }), ref other => { return Err(ResolveError::IncompatibleOperands(format!( "{:?} as {:?}", other, kind ))) } }, crate::Expression::Call(function) => { let result = self.functions[function] .result .as_ref() .ok_or(ResolveError::FunctionReturnsVoid)?; TypeResolution::Handle(result.ty) } crate::Expression::ArrayLength(_) => TypeResolution::Value(Ti::Scalar { kind: crate::ScalarKind::Uint, width: 4, }), }) } } #[test] fn test_error_size() { use std::mem::size_of; assert_eq!(size_of::<ResolveError>(), 32); }
40.964861
166
0.372744
22ba550c5f8ea18b8edf35169068696cd522b356
615
use aorura_emu::{Pty, Server}; use docopt::Docopt; use failure::*; use serde::*; use std::env; use std::path::PathBuf; const USAGE: &'static str = " Usage: aorura-emu <path> aorura-emu --help Emulates AORURA LED device over a PTY symlinked to given path. "; #[derive(Debug, Deserialize)] struct Args { arg_path: PathBuf, } fn main() -> Fallible<()> { env_logger::init(); let args: Args = Docopt::new(USAGE)? .argv(env::args()) .deserialize() .unwrap_or_else(|e| e.exit()); let mut pty = Pty::open(args.arg_path)?; Server::new().run(&mut pty.master, true) }
19.21875
62
0.617886
5d3cc50557cde8445fe6b3c14c411275b23e5590
6,260
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use ast::{MetaItem, Expr}; use ast; use codemap::Span; use ext::base::{ExtCtxt, Annotatable}; use ext::build::AstBuilder; use ext::deriving::generic::*; use ext::deriving::generic::ty::*; use parse::token::InternedString; use ptr::P; pub fn expand_deriving_from_primitive(cx: &mut ExtCtxt, span: Span, mitem: &MetaItem, item: &Annotatable, push: &mut FnMut(Annotatable)) { let inline = cx.meta_word(span, InternedString::new("inline")); let attrs = vec!(cx.attribute(span, inline)); let trait_def = TraitDef { span: span, attributes: Vec::new(), path: path_std!(cx, core::num::FromPrimitive), additional_bounds: Vec::new(), generics: LifetimeBounds::empty(), is_unsafe: false, methods: vec!( MethodDef { name: "from_i64", generics: LifetimeBounds::empty(), explicit_self: None, args: vec!(Literal(path_local!(i64))), ret_ty: Literal(Path::new_(pathvec_std!(cx, core::option::Option), None, vec!(Box::new(Self_)), true)), // #[inline] liable to cause code-bloat attributes: attrs.clone(), is_unsafe: false, combine_substructure: combine_substructure(Box::new(|c, s, sub| { cs_from("i64", c, s, sub) })), }, MethodDef { name: "from_u64", generics: LifetimeBounds::empty(), explicit_self: None, args: vec!(Literal(path_local!(u64))), ret_ty: Literal(Path::new_(pathvec_std!(cx, core::option::Option), None, vec!(Box::new(Self_)), true)), // #[inline] liable to cause code-bloat attributes: attrs, is_unsafe: false, combine_substructure: combine_substructure(Box::new(|c, s, sub| { cs_from("u64", c, s, sub) })), } ), associated_types: Vec::new(), }; trait_def.expand(cx, mitem, item, push) } fn cs_from(name: &str, cx: &mut ExtCtxt, trait_span: Span, substr: &Substructure) -> P<Expr> { let n = match (substr.nonself_args.len(), substr.nonself_args.get(0)) { (1, Some(o_f)) => o_f, _ => cx.span_bug(trait_span, "incorrect number of arguments in `derive(FromPrimitive)`") }; match *substr.fields { StaticStruct(..) => { cx.span_err(trait_span, "`FromPrimitive` cannot be derived for structs"); return cx.expr_fail(trait_span, InternedString::new("")); } StaticEnum(enum_def, _) => { if enum_def.variants.is_empty() { cx.span_err(trait_span, "`FromPrimitive` cannot be derived for enums with no variants"); return cx.expr_fail(trait_span, InternedString::new("")); } let mut arms = Vec::new(); for variant in &enum_def.variants { match variant.node.kind { ast::TupleVariantKind(ref args) => { if !args.is_empty() { cx.span_err(trait_span, "`FromPrimitive` cannot be derived for \ enum variants with arguments"); return cx.expr_fail(trait_span, InternedString::new("")); } let span = variant.span; // expr for `$n == $variant as $name` let path = cx.path(span, vec![substr.type_ident, variant.node.name]); let variant = cx.expr_path(path); let ty = cx.ty_ident(span, cx.ident_of(name)); let cast = cx.expr_cast(span, variant.clone(), ty); let guard = cx.expr_binary(span, ast::BiEq, n.clone(), cast); // expr for `Some($variant)` let body = cx.expr_some(span, variant); // arm for `_ if $guard => $body` let arm = ast::Arm { attrs: vec!(), pats: vec!(cx.pat_wild(span)), guard: Some(guard), body: body, }; arms.push(arm); } ast::StructVariantKind(_) => { cx.span_err(trait_span, "`FromPrimitive` cannot be derived for enums \ with struct variants"); return cx.expr_fail(trait_span, InternedString::new("")); } } } // arm for `_ => None` let arm = ast::Arm { attrs: vec!(), pats: vec!(cx.pat_wild(trait_span)), guard: None, body: cx.expr_none(trait_span), }; arms.push(arm); cx.expr_match(trait_span, n.clone(), arms) } _ => cx.span_bug(trait_span, "expected StaticEnum in derive(FromPrimitive)") } }
41.184211
96
0.4623
c1e1fe38ff7bc78bacc29c594a80eb544b478a40
299
pub mod components; mod engine; mod font; mod handler; mod render_settings; mod shaders; mod texture; pub use components::*; pub(crate) use engine::*; pub use font::*; pub use handler::*; pub use render_settings::*; pub(crate) use shaders::*; pub(crate) use texture::*; pub use texture::TextureKey;
18.6875
28
0.722408
269fc66a83c42b7f7dddd0a71ce09a41c90a4e4b
34
fn main() { println!("test") }
11.333333
20
0.5
b90f2a7945eccf3001c59d18f4de56f7349114d3
5,677
// Copyright (c) SimpleStaking and Tezedge Contributors // SPDX-License-Identifier: MIT use failure::Fail; use std::cmp::Ordering as Ord; use std::collections::HashMap; use std::sync::atomic::{AtomicI32, AtomicU64, Ordering}; use std::sync::{Arc, Condvar, Mutex, PoisonError}; use crate::persistent::database::RocksDbKeyValueSchema; use crate::persistent::{DBError, KeyValueSchema, KeyValueStoreWithSchema}; /// Provider a system wide unique sequence generators backed by a permanent RocksDB storage. /// This struct can be safely shared by a multiple threads. /// Because sequence number is stored into eventually consistent key-value store it is not /// safe to create multiple instances of this struct. /// One exception is when only a single and unique generator is created in call to `Sequences::generator()` function. #[derive(Clone)] pub struct Sequences { /// Persistent storage db: Arc<SequencerDatabase>, /// Represents how many sequence numbers will be pre-allocated in a single batch. seq_batch_size: u16, /// Map of all loaded generators generators: Arc<Mutex<HashMap<String, Arc<SequenceGenerator>>>>, } pub type SequenceNumber = u64; pub type SequencerDatabase = dyn KeyValueStoreWithSchema<Sequences> + Sync + Send; impl Sequences { pub fn new(db: Arc<SequencerDatabase>, seq_batch_size: u16) -> Self { assert_ne!(seq_batch_size, 0, "Batch size must be a positive number"); Self { db, seq_batch_size, generators: Arc::new(Mutex::new(HashMap::new())), } } /// Retrieve a sequence generator by it's unique name. If generator does not exist it is created. pub fn generator(&self, name: &str) -> Arc<SequenceGenerator> { let mut generators = self.generators.lock().unwrap(); match generators.get(name) { Some(generator) => generator.clone(), None => { let generator = Arc::new(SequenceGenerator::new( name.to_owned(), self.seq_batch_size, self.db.clone(), )); generators.insert(name.into(), generator.clone()); generator } } } } impl KeyValueSchema for Sequences { type Key = String; type Value = SequenceNumber; } impl RocksDbKeyValueSchema for Sequences { fn name() -> &'static str { "sequence" } } pub struct SequenceGenerator { /// Database db: Arc<SequencerDatabase>, /// Current value of the sequence seq_cur: AtomicU64, /// This value represents an offset from the base seq_available: AtomicI32, /// Represents how many sequence numbers will be pre-allocated in a single batch. seq_batch_size: u16, /// unique identifier of the sequence seq_name: String, /// Guarding write access to a database guard: (Mutex<()>, Condvar), } impl SequenceGenerator { fn new(seq_name: String, seq_batch_size: u16, db: Arc<SequencerDatabase>) -> Self { Self { seq_cur: AtomicU64::new(db.get(&seq_name).unwrap_or_default().unwrap_or(0)), seq_available: AtomicI32::new(0), guard: (Mutex::new(()), Condvar::new()), db, seq_name, seq_batch_size, } } /// Get next unique sequence number. Value by this function is positive and always increasing. pub fn next(&self) -> Result<SequenceNumber, SequenceError> { let seq = loop { let available = self.seq_available.fetch_add(-1, Ordering::SeqCst); match available.cmp(&0) { Ord::Greater => { // no need to allocate new sequence numbers yet let seq = self.seq_cur.fetch_add(1, Ordering::SeqCst); break seq; } Ord::Equal => { // last pre-allocated sequence numbers was allocated now, we have to perform a new allocation let seq = self.seq_cur.fetch_add(1, Ordering::SeqCst); // obtain mutex lock to ensure exclusive access to the database let _allocated = self.guard.0.lock()?; // pre-allocate sequence numbers let seq_prev = self.db.get(&self.seq_name)?.unwrap_or(0); let seq_new = seq_prev + u64::from(self.seq_batch_size); self.db.put(&self.seq_name, &seq_new)?; // reset available counter self.seq_available .store(i32::from(self.seq_batch_size) - 1, Ordering::SeqCst); // notify waiting threads self.guard.1.notify_all(); break seq; } Ord::Less => { // wait until seq_available is positive number again let _lock = self.guard.1.wait_while(self.guard.0.lock()?, |_| { self.seq_available.load(Ordering::SeqCst) <= 0 })?; } } }; Ok(seq) } } #[derive(Debug, Fail)] pub enum SequenceError { #[fail(display = "Persistent storage error: {}", error)] PersistentStorageError { error: DBError }, #[fail(display = "Thread synchronization error")] SynchronizationError, } impl From<DBError> for SequenceError { fn from(error: DBError) -> Self { SequenceError::PersistentStorageError { error } } } impl<T> From<PoisonError<T>> for SequenceError { fn from(_: PoisonError<T>) -> Self { SequenceError::SynchronizationError } }
35.04321
117
0.597675
e9e939480d86ffc6ad658ff761e02e1fed005eaf
40
pub struct Config { } impl Config { }
5.714286
19
0.625
ab55d9cc04a663feeb665bac9c3fa525c4674f51
46,524
//! See `CompletionContext` structure. use std::iter; use base_db::SourceDatabaseExt; use hir::{HasAttrs, Local, Name, ScopeDef, Semantics, SemanticsScope, Type, TypeInfo}; use ide_db::{ active_parameter::ActiveParameter, base_db::{FilePosition, SourceDatabase}, helpers::FamousDefs, RootDatabase, }; use syntax::{ algo::find_node_at_offset, ast::{self, HasName, NameOrNameRef}, match_ast, AstNode, NodeOrToken, SyntaxKind::{self, *}, SyntaxNode, SyntaxToken, TextRange, TextSize, T, }; use text_edit::Indel; use crate::{ patterns::{ determine_location, determine_prev_sibling, for_is_prev2, inside_impl_trait_block, is_in_loop_body, previous_token, ImmediateLocation, ImmediatePrevSibling, }, CompletionConfig, }; const COMPLETION_MARKER: &str = "intellijRulezz"; #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub(crate) enum PatternRefutability { Refutable, Irrefutable, } pub(crate) enum Visible { Yes, Editable, No, } #[derive(Copy, Clone, Debug)] pub(super) enum PathKind { Expr, Type, Attr, Mac, Pat, Vis { has_in_token: bool }, Use, } #[derive(Debug)] pub(crate) struct PathCompletionContext { /// If this is a call with () already there has_call_parens: bool, /// A single-indent path, like `foo`. `::foo` should not be considered a trivial path. pub(super) is_trivial_path: bool, /// If not a trivial path, the prefix (qualifier). pub(super) qualifier: Option<ast::Path>, #[allow(dead_code)] /// If not a trivial path, the suffix (parent). pub(super) parent: Option<ast::Path>, /// Whether the qualifier comes from a use tree parent or not pub(super) use_tree_parent: bool, pub(super) kind: Option<PathKind>, /// Whether the path segment has type args or not. pub(super) has_type_args: bool, /// `true` if we are a statement or a last expr in the block. pub(super) can_be_stmt: bool, pub(super) in_loop_body: bool, } #[derive(Debug)] pub(super) struct PatternContext { pub(super) refutability: PatternRefutability, pub(super) param_ctx: Option<(ast::ParamList, ast::Param, ParamKind)>, pub(super) has_type_ascription: bool, } #[derive(Debug)] pub(super) enum LifetimeContext { LifetimeParam(Option<ast::LifetimeParam>), Lifetime, LabelRef, LabelDef, } #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) enum ParamKind { Function(ast::Fn), Closure(ast::ClosureExpr), } /// `CompletionContext` is created early during completion to figure out, where /// exactly is the cursor, syntax-wise. #[derive(Debug)] pub(crate) struct CompletionContext<'a> { pub(super) sema: Semantics<'a, RootDatabase>, pub(super) scope: SemanticsScope<'a>, pub(super) db: &'a RootDatabase, pub(super) config: &'a CompletionConfig, pub(super) position: FilePosition, /// The token before the cursor, in the original file. pub(super) original_token: SyntaxToken, /// The token before the cursor, in the macro-expanded file. pub(super) token: SyntaxToken, /// The crate of the current file. pub(super) krate: Option<hir::Crate>, /// The module of the `scope`. pub(super) module: Option<hir::Module>, pub(super) expected_name: Option<NameOrNameRef>, pub(super) expected_type: Option<Type>, /// The parent function of the cursor position if it exists. pub(super) function_def: Option<ast::Fn>, pub(super) attr: Option<ast::Attr>, /// The parent impl of the cursor position if it exists. pub(super) impl_def: Option<ast::Impl>, /// The NameLike under the cursor in the original file if it exists. pub(super) name_syntax: Option<ast::NameLike>, pub(super) incomplete_let: bool, pub(super) completion_location: Option<ImmediateLocation>, pub(super) prev_sibling: Option<ImmediatePrevSibling>, pub(super) fake_attribute_under_caret: Option<ast::Attr>, pub(super) previous_token: Option<SyntaxToken>, pub(super) lifetime_ctx: Option<LifetimeContext>, pub(super) pattern_ctx: Option<PatternContext>, pub(super) path_context: Option<PathCompletionContext>, pub(super) locals: Vec<(Name, Local)>, no_completion_required: bool, } impl<'a> CompletionContext<'a> { /// Checks whether completions in that particular case don't make much sense. /// Examples: /// - `fn $0` -- we expect function name, it's unlikely that "hint" will be helpful. /// Exception for this case is `impl Trait for Foo`, where we would like to hint trait method names. /// - `for _ i$0` -- obviously, it'll be "in" keyword. pub(crate) fn no_completion_required(&self) -> bool { self.no_completion_required } /// The range of the identifier that is being completed. pub(crate) fn source_range(&self) -> TextRange { // check kind of macro-expanded token, but use range of original token let kind = self.token.kind(); if kind == IDENT || kind == LIFETIME_IDENT || kind == UNDERSCORE || kind.is_keyword() { self.original_token.text_range() } else if kind == CHAR { // assume we are completing a lifetime but the user has only typed the ' cov_mark::hit!(completes_if_lifetime_without_idents); TextRange::at(self.original_token.text_range().start(), TextSize::from(1)) } else { TextRange::empty(self.position.offset) } } pub(crate) fn previous_token_is(&self, kind: SyntaxKind) -> bool { self.previous_token.as_ref().map_or(false, |tok| tok.kind() == kind) } pub(crate) fn famous_defs(&self) -> FamousDefs { FamousDefs(&self.sema, self.krate) } pub(crate) fn dot_receiver(&self) -> Option<&ast::Expr> { match &self.completion_location { Some( ImmediateLocation::MethodCall { receiver, .. } | ImmediateLocation::FieldAccess { receiver, .. }, ) => receiver.as_ref(), _ => None, } } pub(crate) fn has_dot_receiver(&self) -> bool { matches!( &self.completion_location, Some(ImmediateLocation::FieldAccess { receiver, .. } | ImmediateLocation::MethodCall { receiver,.. }) if receiver.is_some() ) } pub(crate) fn expects_assoc_item(&self) -> bool { matches!(self.completion_location, Some(ImmediateLocation::Trait | ImmediateLocation::Impl)) } pub(crate) fn expects_variant(&self) -> bool { matches!(self.completion_location, Some(ImmediateLocation::Variant)) } pub(crate) fn expects_non_trait_assoc_item(&self) -> bool { matches!(self.completion_location, Some(ImmediateLocation::Impl)) } pub(crate) fn expects_item(&self) -> bool { matches!(self.completion_location, Some(ImmediateLocation::ItemList)) } pub(crate) fn expects_generic_arg(&self) -> bool { matches!(self.completion_location, Some(ImmediateLocation::GenericArgList(_))) } pub(crate) fn has_block_expr_parent(&self) -> bool { matches!(self.completion_location, Some(ImmediateLocation::StmtList)) } pub(crate) fn expects_ident_pat_or_ref_expr(&self) -> bool { matches!( self.completion_location, Some(ImmediateLocation::IdentPat | ImmediateLocation::RefExpr) ) } pub(crate) fn expect_field(&self) -> bool { matches!( self.completion_location, Some(ImmediateLocation::RecordField | ImmediateLocation::TupleField) ) } pub(crate) fn has_impl_or_trait_prev_sibling(&self) -> bool { matches!( self.prev_sibling, Some(ImmediatePrevSibling::ImplDefType | ImmediatePrevSibling::TraitDefName) ) } pub(crate) fn has_impl_prev_sibling(&self) -> bool { matches!(self.prev_sibling, Some(ImmediatePrevSibling::ImplDefType)) } pub(crate) fn has_visibility_prev_sibling(&self) -> bool { matches!(self.prev_sibling, Some(ImmediatePrevSibling::Visibility)) } pub(crate) fn after_if(&self) -> bool { matches!(self.prev_sibling, Some(ImmediatePrevSibling::IfExpr)) } pub(crate) fn is_path_disallowed(&self) -> bool { self.previous_token_is(T![unsafe]) || matches!( self.prev_sibling, Some(ImmediatePrevSibling::Attribute | ImmediatePrevSibling::Visibility) ) || matches!( self.completion_location, Some( ImmediateLocation::ModDeclaration(_) | ImmediateLocation::RecordPat(_) | ImmediateLocation::RecordExpr(_) | ImmediateLocation::Rename ) ) } pub(crate) fn expects_expression(&self) -> bool { matches!(self.path_context, Some(PathCompletionContext { kind: Some(PathKind::Expr), .. })) } pub(crate) fn expects_type(&self) -> bool { matches!(self.path_context, Some(PathCompletionContext { kind: Some(PathKind::Type), .. })) } pub(crate) fn path_is_call(&self) -> bool { self.path_context.as_ref().map_or(false, |it| it.has_call_parens) } pub(crate) fn is_trivial_path(&self) -> bool { matches!(self.path_context, Some(PathCompletionContext { is_trivial_path: true, .. })) } pub(crate) fn is_non_trivial_path(&self) -> bool { matches!(self.path_context, Some(PathCompletionContext { is_trivial_path: false, .. })) } pub(crate) fn path_qual(&self) -> Option<&ast::Path> { self.path_context.as_ref().and_then(|it| it.qualifier.as_ref()) } pub(crate) fn path_kind(&self) -> Option<PathKind> { self.path_context.as_ref().and_then(|it| it.kind) } /// Checks if an item is visible and not `doc(hidden)` at the completion site. pub(crate) fn is_visible<I>(&self, item: &I) -> Visible where I: hir::HasVisibility + hir::HasAttrs + hir::HasCrate + Copy, { self.is_visible_impl(&item.visibility(self.db), &item.attrs(self.db), item.krate(self.db)) } pub(crate) fn is_scope_def_hidden(&self, scope_def: ScopeDef) -> bool { if let (Some(attrs), Some(krate)) = (scope_def.attrs(self.db), scope_def.krate(self.db)) { return self.is_doc_hidden(&attrs, krate); } false } /// Check if an item is `#[doc(hidden)]`. pub(crate) fn is_item_hidden(&self, item: &hir::ItemInNs) -> bool { let attrs = item.attrs(self.db); let krate = item.krate(self.db); match (attrs, krate) { (Some(attrs), Some(krate)) => self.is_doc_hidden(&attrs, krate), _ => false, } } pub(crate) fn is_immediately_after_macro_bang(&self) -> bool { self.token.kind() == BANG && self.token.parent().map_or(false, |it| it.kind() == MACRO_CALL) } /// Whether the given trait is an operator trait or not. pub(crate) fn is_ops_trait(&self, trait_: hir::Trait) -> bool { match trait_.attrs(self.db).lang() { Some(lang) => OP_TRAIT_LANG_NAMES.contains(&lang.as_str()), None => false, } } /// A version of [`SemanticsScope::process_all_names`] that filters out `#[doc(hidden)]` items. pub(crate) fn process_all_names(&self, f: &mut dyn FnMut(Name, ScopeDef)) { let _p = profile::span("CompletionContext::process_all_names"); self.scope.process_all_names(&mut |name, def| { if self.is_scope_def_hidden(def) { return; } f(name, def); }) } fn is_visible_impl( &self, vis: &hir::Visibility, attrs: &hir::Attrs, defining_crate: hir::Crate, ) -> Visible { let module = match self.module { Some(it) => it, None => return Visible::No, }; if !vis.is_visible_from(self.db, module.into()) { // If the definition location is editable, also show private items let root_file = defining_crate.root_file(self.db); let source_root_id = self.db.file_source_root(root_file); let is_editable = !self.db.source_root(source_root_id).is_library; return if is_editable { Visible::Editable } else { Visible::No }; } if self.is_doc_hidden(attrs, defining_crate) { Visible::No } else { Visible::Yes } } fn is_doc_hidden(&self, attrs: &hir::Attrs, defining_crate: hir::Crate) -> bool { let krate = match self.krate { Some(it) => it, None => return true, }; if krate != defining_crate && attrs.has_doc_hidden() { // `doc(hidden)` items are only completed within the defining crate. return true; } false } } // CompletionContext construction impl<'a> CompletionContext<'a> { pub(super) fn new( db: &'a RootDatabase, position @ FilePosition { file_id, offset }: FilePosition, config: &'a CompletionConfig, ) -> Option<CompletionContext<'a>> { let _p = profile::span("CompletionContext::new"); let sema = Semantics::new(db); let original_file = sema.parse(file_id); // Insert a fake ident to get a valid parse tree. We will use this file // to determine context, though the original_file will be used for // actual completion. let file_with_fake_ident = { let parse = db.parse(file_id); let edit = Indel::insert(offset, COMPLETION_MARKER.to_string()); parse.reparse(&edit).tree() }; let fake_ident_token = file_with_fake_ident.syntax().token_at_offset(offset).right_biased()?; let original_token = original_file.syntax().token_at_offset(offset).left_biased()?; let token = sema.descend_into_macros_single(original_token.clone()); let scope = sema.scope_at_offset(&token.parent()?, offset); let krate = scope.krate(); let module = scope.module(); let mut locals = vec![]; scope.process_all_names(&mut |name, scope| { if let ScopeDef::Local(local) = scope { locals.push((name, local)); } }); let mut ctx = CompletionContext { sema, scope, db, config, position, original_token, token, krate, module, expected_name: None, expected_type: None, function_def: None, attr: None, impl_def: None, name_syntax: None, lifetime_ctx: None, pattern_ctx: None, completion_location: None, prev_sibling: None, fake_attribute_under_caret: None, previous_token: None, path_context: None, locals, incomplete_let: false, no_completion_required: false, }; ctx.expand_and_fill( original_file.syntax().clone(), file_with_fake_ident.syntax().clone(), offset, fake_ident_token, ); Some(ctx) } /// Do the attribute expansion at the current cursor position for both original file and fake file /// as long as possible. As soon as one of the two expansions fail we stop to stay in sync. fn expand_and_fill( &mut self, mut original_file: SyntaxNode, mut speculative_file: SyntaxNode, mut offset: TextSize, mut fake_ident_token: SyntaxToken, ) { let _p = profile::span("CompletionContext::expand_and_fill"); 'expansion: loop { let parent_item = |item: &ast::Item| item.syntax().ancestors().skip(1).find_map(ast::Item::cast); let ancestor_items = iter::successors( Option::zip( find_node_at_offset::<ast::Item>(&original_file, offset), find_node_at_offset::<ast::Item>(&speculative_file, offset), ), |(a, b)| parent_item(a).zip(parent_item(b)), ); for (actual_item, item_with_fake_ident) in ancestor_items { match ( self.sema.expand_attr_macro(&actual_item), self.sema.speculative_expand_attr_macro( &actual_item, &item_with_fake_ident, fake_ident_token.clone(), ), ) { // maybe parent items have attributes (None, None) => (), // successful expansions (Some(actual_expansion), Some((fake_expansion, fake_mapped_token))) => { let new_offset = fake_mapped_token.text_range().start(); if new_offset > actual_expansion.text_range().end() { break 'expansion; } original_file = actual_expansion; speculative_file = fake_expansion; fake_ident_token = fake_mapped_token; offset = new_offset; continue 'expansion; } // exactly one expansion failed, inconsistent state so stop expanding completely _ => break 'expansion, } } // Expand fn-like macro calls if let (Some(actual_macro_call), Some(macro_call_with_fake_ident)) = ( find_node_at_offset::<ast::MacroCall>(&original_file, offset), find_node_at_offset::<ast::MacroCall>(&speculative_file, offset), ) { let mac_call_path0 = actual_macro_call.path().as_ref().map(|s| s.syntax().text()); let mac_call_path1 = macro_call_with_fake_ident.path().as_ref().map(|s| s.syntax().text()); if mac_call_path0 != mac_call_path1 { break; } let speculative_args = match macro_call_with_fake_ident.token_tree() { Some(tt) => tt, None => break, }; match ( self.sema.expand(&actual_macro_call), self.sema.speculative_expand( &actual_macro_call, &speculative_args, fake_ident_token.clone(), ), ) { // successful expansions (Some(actual_expansion), Some((fake_expansion, fake_mapped_token))) => { let new_offset = fake_mapped_token.text_range().start(); if new_offset > actual_expansion.text_range().end() { break; } original_file = actual_expansion; speculative_file = fake_expansion; fake_ident_token = fake_mapped_token; offset = new_offset; continue; } _ => break, } } break; } self.fill(&original_file, speculative_file, offset); } fn expected_type_and_name(&self) -> (Option<Type>, Option<NameOrNameRef>) { let mut node = match self.token.parent() { Some(it) => it, None => return (None, None), }; loop { break match_ast! { match node { ast::LetStmt(it) => { cov_mark::hit!(expected_type_let_with_leading_char); cov_mark::hit!(expected_type_let_without_leading_char); let ty = it.pat() .and_then(|pat| self.sema.type_of_pat(&pat)) .or_else(|| it.initializer().and_then(|it| self.sema.type_of_expr(&it))) .map(TypeInfo::original); let name = match it.pat() { Some(ast::Pat::IdentPat(ident)) => ident.name().map(NameOrNameRef::Name), Some(_) | None => None, }; (ty, name) }, ast::ArgList(_) => { cov_mark::hit!(expected_type_fn_param); ActiveParameter::at_token( &self.sema, self.token.clone(), ).map(|ap| { let name = ap.ident().map(NameOrNameRef::Name); let ty = if has_ref(&self.token) { cov_mark::hit!(expected_type_fn_param_ref); ap.ty.remove_ref() } else { Some(ap.ty) }; (ty, name) }) .unwrap_or((None, None)) }, ast::RecordExprFieldList(it) => { // wouldn't try {} be nice... (|| { if self.token.kind() == T![..] || self.token.prev_token().map(|t| t.kind()) == Some(T![..]) { cov_mark::hit!(expected_type_struct_func_update); let record_expr = it.syntax().parent().and_then(ast::RecordExpr::cast)?; let ty = self.sema.type_of_expr(&record_expr.into())?; Some(( Some(ty.original), None )) } else { cov_mark::hit!(expected_type_struct_field_without_leading_char); let expr_field = self.token.prev_sibling_or_token()? .into_node() .and_then(ast::RecordExprField::cast)?; let (_, _, ty) = self.sema.resolve_record_field(&expr_field)?; Some(( Some(ty), expr_field.field_name().map(NameOrNameRef::NameRef), )) } })().unwrap_or((None, None)) }, ast::RecordExprField(it) => { if let Some(expr) = it.expr() { cov_mark::hit!(expected_type_struct_field_with_leading_char); ( self.sema.type_of_expr(&expr).map(TypeInfo::original), it.field_name().map(NameOrNameRef::NameRef), ) } else { cov_mark::hit!(expected_type_struct_field_followed_by_comma); let ty = self.sema.resolve_record_field(&it) .map(|(_, _, ty)| ty); ( ty, it.field_name().map(NameOrNameRef::NameRef), ) } }, ast::MatchExpr(it) => { cov_mark::hit!(expected_type_match_arm_without_leading_char); let ty = it.expr().and_then(|e| self.sema.type_of_expr(&e)).map(TypeInfo::original); (ty, None) }, ast::IfExpr(it) => { cov_mark::hit!(expected_type_if_let_without_leading_char); let ty = it.condition() .and_then(|cond| cond.expr()) .and_then(|e| self.sema.type_of_expr(&e)) .map(TypeInfo::original); (ty, None) }, ast::IdentPat(it) => { cov_mark::hit!(expected_type_if_let_with_leading_char); cov_mark::hit!(expected_type_match_arm_with_leading_char); let ty = self.sema.type_of_pat(&ast::Pat::from(it)).map(TypeInfo::original); (ty, None) }, ast::Fn(it) => { cov_mark::hit!(expected_type_fn_ret_with_leading_char); cov_mark::hit!(expected_type_fn_ret_without_leading_char); let def = self.sema.to_def(&it); (def.map(|def| def.ret_type(self.db)), None) }, ast::ClosureExpr(it) => { let ty = self.sema.type_of_expr(&it.into()); ty.and_then(|ty| ty.original.as_callable(self.db)) .map(|c| (Some(c.return_type()), None)) .unwrap_or((None, None)) }, ast::ParamList(_) => (None, None), ast::Stmt(_) => (None, None), ast::Item(_) => (None, None), _ => { match node.parent() { Some(n) => { node = n; continue; }, None => (None, None), } }, } }; } } fn fill( &mut self, original_file: &SyntaxNode, file_with_fake_ident: SyntaxNode, offset: TextSize, ) { let fake_ident_token = file_with_fake_ident.token_at_offset(offset).right_biased().unwrap(); let syntax_element = NodeOrToken::Token(fake_ident_token); self.previous_token = previous_token(syntax_element.clone()); self.no_completion_required = { let inside_impl_trait_block = inside_impl_trait_block(syntax_element.clone()); let fn_is_prev = self.previous_token_is(T![fn]); let for_is_prev2 = for_is_prev2(syntax_element.clone()); (fn_is_prev && !inside_impl_trait_block) || for_is_prev2 }; self.attr = self .sema .token_ancestors_with_macros(self.token.clone()) .take_while(|it| it.kind() != SOURCE_FILE && it.kind() != MODULE) .find_map(ast::Attr::cast); self.fake_attribute_under_caret = syntax_element.ancestors().find_map(ast::Attr::cast); self.incomplete_let = syntax_element.ancestors().take(6).find_map(ast::LetStmt::cast).map_or(false, |it| { it.syntax().text_range().end() == syntax_element.text_range().end() }); let (expected_type, expected_name) = self.expected_type_and_name(); self.expected_type = expected_type; self.expected_name = expected_name; let name_like = match find_node_at_offset(&file_with_fake_ident, offset) { Some(it) => it, None => return, }; self.completion_location = determine_location(&self.sema, original_file, offset, &name_like); self.prev_sibling = determine_prev_sibling(&name_like); self.name_syntax = find_node_at_offset(original_file, name_like.syntax().text_range().start()); self.impl_def = self .sema .token_ancestors_with_macros(self.token.clone()) .take_while(|it| it.kind() != SOURCE_FILE && it.kind() != MODULE) .find_map(ast::Impl::cast); self.function_def = self .sema .token_ancestors_with_macros(self.token.clone()) .take_while(|it| it.kind() != SOURCE_FILE && it.kind() != MODULE) .find_map(ast::Fn::cast); match name_like { ast::NameLike::Lifetime(lifetime) => { self.lifetime_ctx = Self::classify_lifetime(&self.sema, original_file, lifetime, offset); } ast::NameLike::NameRef(name_ref) => { if let Some((path_ctx, pat_ctx)) = Self::classify_name_ref(&self.sema, original_file, name_ref) { self.path_context = Some(path_ctx); self.pattern_ctx = pat_ctx; } } ast::NameLike::Name(name) => { self.pattern_ctx = Self::classify_name(&self.sema, original_file, name); } } } fn classify_lifetime( sema: &Semantics<RootDatabase>, original_file: &SyntaxNode, lifetime: ast::Lifetime, offset: TextSize, ) -> Option<LifetimeContext> { let parent = lifetime.syntax().parent()?; if parent.kind() == ERROR { return None; } Some(match_ast! { match parent { ast::LifetimeParam(_) => LifetimeContext::LifetimeParam(sema.find_node_at_offset_with_macros(original_file, offset)), ast::BreakExpr(_) => LifetimeContext::LabelRef, ast::ContinueExpr(_) => LifetimeContext::LabelRef, ast::Label(_) => LifetimeContext::LabelDef, _ => LifetimeContext::Lifetime, } }) } fn classify_name( _sema: &Semantics<RootDatabase>, original_file: &SyntaxNode, name: ast::Name, ) -> Option<PatternContext> { let bind_pat = name.syntax().parent().and_then(ast::IdentPat::cast)?; let is_name_in_field_pat = bind_pat .syntax() .parent() .and_then(ast::RecordPatField::cast) .map_or(false, |pat_field| pat_field.name_ref().is_none()); if is_name_in_field_pat { return None; } if !bind_pat.is_simple_ident() { return None; } Some(pattern_context_for(original_file, bind_pat.into())) } fn classify_name_ref( _sema: &Semantics<RootDatabase>, original_file: &SyntaxNode, name_ref: ast::NameRef, ) -> Option<(PathCompletionContext, Option<PatternContext>)> { let parent = name_ref.syntax().parent()?; let segment = ast::PathSegment::cast(parent)?; let path = segment.parent_path(); let mut path_ctx = PathCompletionContext { has_call_parens: false, is_trivial_path: false, qualifier: None, parent: None, has_type_args: false, can_be_stmt: false, in_loop_body: false, use_tree_parent: false, kind: None, }; let mut pat_ctx = None; path_ctx.in_loop_body = is_in_loop_body(name_ref.syntax()); path_ctx.kind = path.syntax().ancestors().find_map(|it| { match_ast! { match it { ast::PathType(_) => Some(PathKind::Type), ast::PathExpr(it) => { path_ctx.has_call_parens = it.syntax().parent().map_or(false, |it| ast::CallExpr::can_cast(it.kind())); Some(PathKind::Expr) }, ast::TupleStructPat(it) => { path_ctx.has_call_parens = true; pat_ctx = Some(pattern_context_for(original_file, it.into())); Some(PathKind::Pat) }, ast::RecordPat(it) => { pat_ctx = Some(pattern_context_for(original_file, it.into())); Some(PathKind::Pat) }, ast::PathPat(it) => { pat_ctx = Some(pattern_context_for(original_file, it.into())); Some(PathKind::Pat) }, ast::MacroCall(it) => it.excl_token().and(Some(PathKind::Mac)), ast::Meta(_) => Some(PathKind::Attr), ast::Visibility(it) => Some(PathKind::Vis { has_in_token: it.in_token().is_some() }), ast::UseTree(_) => Some(PathKind::Use), _ => None, } } }); path_ctx.has_type_args = segment.generic_arg_list().is_some(); if let Some((path, use_tree_parent)) = path_or_use_tree_qualifier(&path) { path_ctx.use_tree_parent = use_tree_parent; path_ctx.qualifier = path .segment() .and_then(|it| find_node_in_file(original_file, &it)) .map(|it| it.parent_path()); return Some((path_ctx, pat_ctx)); } if let Some(segment) = path.segment() { if segment.coloncolon_token().is_some() { return Some((path_ctx, pat_ctx)); } } path_ctx.is_trivial_path = true; // Find either enclosing expr statement (thing with `;`) or a // block. If block, check that we are the last expr. path_ctx.can_be_stmt = name_ref .syntax() .ancestors() .find_map(|node| { if let Some(stmt) = ast::ExprStmt::cast(node.clone()) { return Some(stmt.syntax().text_range() == name_ref.syntax().text_range()); } if let Some(stmt_list) = ast::StmtList::cast(node) { return Some( stmt_list.tail_expr().map(|e| e.syntax().text_range()) == Some(name_ref.syntax().text_range()), ); } None }) .unwrap_or(false); Some((path_ctx, pat_ctx)) } } fn pattern_context_for(original_file: &SyntaxNode, pat: ast::Pat) -> PatternContext { let mut is_param = None; let (refutability, has_type_ascription) = pat .syntax() .ancestors() .skip_while(|it| ast::Pat::can_cast(it.kind())) .next() .map_or((PatternRefutability::Irrefutable, false), |node| { let refutability = match_ast! { match node { ast::LetStmt(let_) => return (PatternRefutability::Irrefutable, let_.ty().is_some()), ast::Param(param) => { let has_type_ascription = param.ty().is_some(); is_param = (|| { let fake_param_list = param.syntax().parent().and_then(ast::ParamList::cast)?; let param_list = find_node_in_file_compensated(original_file, &fake_param_list)?; let param_list_owner = param_list.syntax().parent()?; let kind = match_ast! { match param_list_owner { ast::ClosureExpr(closure) => ParamKind::Closure(closure), ast::Fn(fn_) => ParamKind::Function(fn_), _ => return None, } }; Some((param_list, param, kind)) })(); return (PatternRefutability::Irrefutable, has_type_ascription) }, ast::MatchArm(_) => PatternRefutability::Refutable, ast::Condition(_) => PatternRefutability::Refutable, ast::ForExpr(_) => PatternRefutability::Irrefutable, _ => PatternRefutability::Irrefutable, } }; (refutability, false) }); PatternContext { refutability, param_ctx: is_param, has_type_ascription } } fn find_node_in_file<N: AstNode>(syntax: &SyntaxNode, node: &N) -> Option<N> { let syntax_range = syntax.text_range(); let range = node.syntax().text_range(); let intersection = range.intersect(syntax_range)?; syntax.covering_element(intersection).ancestors().find_map(N::cast) } /// Compensates for the offset introduced by the fake ident /// This is wrong if `node` comes before the insertion point! Use `find_node_in_file` instead. fn find_node_in_file_compensated<N: AstNode>(syntax: &SyntaxNode, node: &N) -> Option<N> { let syntax_range = syntax.text_range(); let range = node.syntax().text_range(); let end = range.end().checked_sub(TextSize::try_from(COMPLETION_MARKER.len()).ok()?)?; if end < range.start() { return None; } let range = TextRange::new(range.start(), end); // our inserted ident could cause `range` to be go outside of the original syntax, so cap it let intersection = range.intersect(syntax_range)?; syntax.covering_element(intersection).ancestors().find_map(N::cast) } fn path_or_use_tree_qualifier(path: &ast::Path) -> Option<(ast::Path, bool)> { if let Some(qual) = path.qualifier() { return Some((qual, false)); } let use_tree_list = path.syntax().ancestors().find_map(ast::UseTreeList::cast)?; let use_tree = use_tree_list.syntax().parent().and_then(ast::UseTree::cast)?; use_tree.path().zip(Some(true)) } fn has_ref(token: &SyntaxToken) -> bool { let mut token = token.clone(); for skip in [IDENT, WHITESPACE, T![mut]] { if token.kind() == skip { token = match token.prev_token() { Some(it) => it, None => return false, } } } token.kind() == T![&] } const OP_TRAIT_LANG_NAMES: &[&str] = &[ "add_assign", "add", "bitand_assign", "bitand", "bitor_assign", "bitor", "bitxor_assign", "bitxor", "deref_mut", "deref", "div_assign", "div", "eq", "fn_mut", "fn_once", "fn", "index_mut", "index", "mul_assign", "mul", "neg", "not", "partial_ord", "rem_assign", "rem", "shl_assign", "shl", "shr_assign", "shr", "sub", ]; #[cfg(test)] mod tests { use expect_test::{expect, Expect}; use hir::HirDisplay; use crate::tests::{position, TEST_CONFIG}; use super::CompletionContext; fn check_expected_type_and_name(ra_fixture: &str, expect: Expect) { let (db, pos) = position(ra_fixture); let config = TEST_CONFIG; let completion_context = CompletionContext::new(&db, pos, &config).unwrap(); let ty = completion_context .expected_type .map(|t| t.display_test(&db).to_string()) .unwrap_or("?".to_owned()); let name = completion_context .expected_name .map_or_else(|| "?".to_owned(), |name| name.to_string()); expect.assert_eq(&format!("ty: {}, name: {}", ty, name)); } #[test] fn expected_type_let_without_leading_char() { cov_mark::check!(expected_type_let_without_leading_char); check_expected_type_and_name( r#" fn foo() { let x: u32 = $0; } "#, expect![[r#"ty: u32, name: x"#]], ); } #[test] fn expected_type_let_with_leading_char() { cov_mark::check!(expected_type_let_with_leading_char); check_expected_type_and_name( r#" fn foo() { let x: u32 = c$0; } "#, expect![[r#"ty: u32, name: x"#]], ); } #[test] fn expected_type_let_pat() { check_expected_type_and_name( r#" fn foo() { let x$0 = 0u32; } "#, expect![[r#"ty: u32, name: ?"#]], ); check_expected_type_and_name( r#" fn foo() { let $0 = 0u32; } "#, expect![[r#"ty: u32, name: ?"#]], ); } #[test] fn expected_type_fn_param() { cov_mark::check!(expected_type_fn_param); check_expected_type_and_name( r#" fn foo() { bar($0); } fn bar(x: u32) {} "#, expect![[r#"ty: u32, name: x"#]], ); check_expected_type_and_name( r#" fn foo() { bar(c$0); } fn bar(x: u32) {} "#, expect![[r#"ty: u32, name: x"#]], ); } #[test] fn expected_type_fn_param_ref() { cov_mark::check!(expected_type_fn_param_ref); check_expected_type_and_name( r#" fn foo() { bar(&$0); } fn bar(x: &u32) {} "#, expect![[r#"ty: u32, name: x"#]], ); check_expected_type_and_name( r#" fn foo() { bar(&mut $0); } fn bar(x: &mut u32) {} "#, expect![[r#"ty: u32, name: x"#]], ); check_expected_type_and_name( r#" fn foo() { bar(& c$0); } fn bar(x: &u32) {} "#, expect![[r#"ty: u32, name: x"#]], ); check_expected_type_and_name( r#" fn foo() { bar(&mut c$0); } fn bar(x: &mut u32) {} "#, expect![[r#"ty: u32, name: x"#]], ); check_expected_type_and_name( r#" fn foo() { bar(&c$0); } fn bar(x: &u32) {} "#, expect![[r#"ty: u32, name: x"#]], ); } #[test] fn expected_type_struct_field_without_leading_char() { cov_mark::check!(expected_type_struct_field_without_leading_char); check_expected_type_and_name( r#" struct Foo { a: u32 } fn foo() { Foo { a: $0 }; } "#, expect![[r#"ty: u32, name: a"#]], ) } #[test] fn expected_type_struct_field_followed_by_comma() { cov_mark::check!(expected_type_struct_field_followed_by_comma); check_expected_type_and_name( r#" struct Foo { a: u32 } fn foo() { Foo { a: $0, }; } "#, expect![[r#"ty: u32, name: a"#]], ) } #[test] fn expected_type_generic_struct_field() { check_expected_type_and_name( r#" struct Foo<T> { a: T } fn foo() -> Foo<u32> { Foo { a: $0 } } "#, expect![[r#"ty: u32, name: a"#]], ) } #[test] fn expected_type_struct_field_with_leading_char() { cov_mark::check!(expected_type_struct_field_with_leading_char); check_expected_type_and_name( r#" struct Foo { a: u32 } fn foo() { Foo { a: c$0 }; } "#, expect![[r#"ty: u32, name: a"#]], ); } #[test] fn expected_type_match_arm_without_leading_char() { cov_mark::check!(expected_type_match_arm_without_leading_char); check_expected_type_and_name( r#" enum E { X } fn foo() { match E::X { $0 } } "#, expect![[r#"ty: E, name: ?"#]], ); } #[test] fn expected_type_match_arm_with_leading_char() { cov_mark::check!(expected_type_match_arm_with_leading_char); check_expected_type_and_name( r#" enum E { X } fn foo() { match E::X { c$0 } } "#, expect![[r#"ty: E, name: ?"#]], ); } #[test] fn expected_type_if_let_without_leading_char() { cov_mark::check!(expected_type_if_let_without_leading_char); check_expected_type_and_name( r#" enum Foo { Bar, Baz, Quux } fn foo() { let f = Foo::Quux; if let $0 = f { } } "#, expect![[r#"ty: Foo, name: ?"#]], ) } #[test] fn expected_type_if_let_with_leading_char() { cov_mark::check!(expected_type_if_let_with_leading_char); check_expected_type_and_name( r#" enum Foo { Bar, Baz, Quux } fn foo() { let f = Foo::Quux; if let c$0 = f { } } "#, expect![[r#"ty: Foo, name: ?"#]], ) } #[test] fn expected_type_fn_ret_without_leading_char() { cov_mark::check!(expected_type_fn_ret_without_leading_char); check_expected_type_and_name( r#" fn foo() -> u32 { $0 } "#, expect![[r#"ty: u32, name: ?"#]], ) } #[test] fn expected_type_fn_ret_with_leading_char() { cov_mark::check!(expected_type_fn_ret_with_leading_char); check_expected_type_and_name( r#" fn foo() -> u32 { c$0 } "#, expect![[r#"ty: u32, name: ?"#]], ) } #[test] fn expected_type_fn_ret_fn_ref_fully_typed() { check_expected_type_and_name( r#" fn foo() -> u32 { foo$0 } "#, expect![[r#"ty: u32, name: ?"#]], ) } #[test] fn expected_type_closure_param_return() { // FIXME: make this work with `|| $0` check_expected_type_and_name( r#" //- minicore: fn fn foo() { bar(|| a$0); } fn bar(f: impl FnOnce() -> u32) {} "#, expect![[r#"ty: u32, name: ?"#]], ); } #[test] fn expected_type_generic_function() { check_expected_type_and_name( r#" fn foo() { bar::<u32>($0); } fn bar<T>(t: T) {} "#, expect![[r#"ty: u32, name: t"#]], ); } #[test] fn expected_type_generic_method() { check_expected_type_and_name( r#" fn foo() { S(1u32).bar($0); } struct S<T>(T); impl<T> S<T> { fn bar(self, t: T) {} } "#, expect![[r#"ty: u32, name: t"#]], ); } #[test] fn expected_type_functional_update() { cov_mark::check!(expected_type_struct_func_update); check_expected_type_and_name( r#" struct Foo { field: u32 } fn foo() { Foo { ..$0 } } "#, expect![[r#"ty: Foo, name: ?"#]], ); } #[test] fn expected_type_param_pat() { check_expected_type_and_name( r#" struct Foo { field: u32 } fn foo(a$0: Foo) {} "#, expect![[r#"ty: Foo, name: ?"#]], ); check_expected_type_and_name( r#" struct Foo { field: u32 } fn foo($0: Foo) {} "#, // FIXME make this work, currently fails due to pattern recovery eating the `:` expect![[r#"ty: ?, name: ?"#]], ); } }
34.133529
133
0.529232
e5ba9ce6b95a285cd4accb41829f45a03e5bace4
3,456
use mimir::enums; use mimir::enums::ODPINativeTypeNum::{Bytes, Double}; use mimir::flags; use mimir::Result; use mimir::{Context, Pool}; use CREDS; fn pool_res(ctxt: &Context) -> Result<()> { let mut ccp = ctxt.init_common_create_params()?; ccp.set_encoding("UTF-8")?; ccp.set_nchar_encoding("UTF-8")?; let pool = Pool::create( ctxt, Some(&CREDS[0]), Some(&CREDS[1]), Some("//oic.cbsnae86d3iv.us-east-2.rds.amazonaws.com/ORCL"), Some(ccp), None, )?; let ei = pool.get_encoding_info()?; assert_eq!(ei.encoding(), "UTF-8"); assert_eq!(ei.nchar_encoding(), "UTF-8"); assert_eq!(ei.max_bytes_per_char(), 4); assert_eq!(ei.max_bytes_per_nchar(), 4); let mut get_mode = pool.get_get_mode()?; assert_eq!(get_mode, enums::ODPIPoolGetMode::NoWait); pool.set_get_mode(enums::ODPIPoolGetMode::ForceGet)?; get_mode = pool.get_get_mode()?; assert_eq!(get_mode, enums::ODPIPoolGetMode::ForceGet); let mut max_lifetime_session = pool.get_max_lifetime_session()?; assert_eq!(max_lifetime_session, 0); pool.set_max_lifetime_session(3600)?; max_lifetime_session = pool.get_max_lifetime_session()?; assert_eq!(max_lifetime_session, 3600); let mut stmt_cache_size = pool.get_stmt_cache_size()?; assert_eq!(stmt_cache_size, 20); pool.set_stmt_cache_size(100)?; stmt_cache_size = pool.get_stmt_cache_size()?; assert_eq!(stmt_cache_size, 100); let mut timeout = pool.get_timeout()?; assert_eq!(timeout, 0); pool.set_timeout(3600)?; timeout = pool.get_timeout()?; assert_eq!(timeout, 3600); { let conn = pool.acquire_connection(None, None, None)?; let version_info = conn.get_server_version()?; assert_eq!(version_info.version(), "12.1.0.2.0"); assert_eq!(version_info.version_num(), 1_201_000_200); assert_eq!( version_info.release(), "Oracle Database 12c Standard Edition Release 12.1.0.2.0 - \ 64bit Production" ); { let stmt = conn.prepare_stmt( Some("select * from username where username = 'jozias'"), None, false, )?; stmt.execute(flags::DPI_MODE_EXEC_DEFAULT)?; stmt.fetch()?; let (id_type, id_data) = stmt.get_query_value(1)?; let (username_type, username_data) = stmt.get_query_value(2)?; assert_eq!(id_type, Double); assert!((id_data.get_double() - 1.0) < ::std::f64::EPSILON); assert_eq!(username_type, Bytes); assert_eq!(username_data.get_string(), "jozias"); let busy_count = pool.get_busy_count()?; assert_eq!(busy_count, 1); let open_count = pool.get_open_count()?; assert_eq!(open_count, 1); let pl_sql = conn.prepare_stmt( Some( r" BEGIN EXECUTE IMMEDIATE 'DROP TABLE person'; EXCEPTION WHEN OTHERS THEN IF SQLCODE != -942 THEN RAISE; END IF; END; ", ), None, false, )?; pl_sql.execute(flags::DPI_MODE_EXEC_DEFAULT)?; } conn.close(flags::DPI_MODE_CONN_CLOSE_DEFAULT, None)?; } pool.close(flags::DPI_MODE_POOL_CLOSE_DEFAULT)?; Ok(()) } #[test] fn pool() { check_with_ctxt!(pool_res) }
29.538462
74
0.59809
d76535f5b492cb5242960e21aabfeedc8315ae4c
3,110
// Copyright 2020 Netwarps Ltd. // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. use libp2prs_core::identity::Keypair; use libp2prs_runtime::{ net::{TcpListener, TcpStream}, task, }; use log::{info, LevelFilter}; use futures::{AsyncReadExt, AsyncWriteExt}; use libp2prs_plaintext::PlainTextConfig; fn main() { env_logger::builder().filter_level(LevelFilter::Info).init(); if std::env::args().nth(1) == Some("server".to_string()) { info!("Starting server ......"); server(); } else { info!("Starting client ......"); client(); } } fn server() { let key = Keypair::generate_secp256k1(); let config = PlainTextConfig::new(key); task::block_on(async move { let listener = TcpListener::bind("127.0.0.1:1337").await.unwrap(); while let Ok((socket, _)) = listener.accept().await { let config = config.clone(); task::spawn(async move { let (mut handle, _) = config.handshake(socket).await.unwrap(); info!("session started!"); let mut buf = [0u8; 100]; while let Ok(n) = handle.read(&mut buf).await { buf[11] = b"!"[0]; if handle.write_all(&buf[..n + 1]).await.is_err() { break; } } info!("session closed!"); let _ = handle.close().await; }); } }); } fn client() { let key = Keypair::generate_secp256k1(); let config = PlainTextConfig::new(key); let data = b"hello world"; task::block_on(async move { let stream = TcpStream::connect("127.0.0.1:1337").await.unwrap(); let (mut handle, _) = config.handshake(stream).await.unwrap(); match handle.write_all(data.as_ref()).await { Ok(_) => info!("send all"), Err(e) => info!("err: {:?}", e), } let mut buf = [0; 100]; let n = handle.read(&mut buf).await.unwrap(); info!("receive: {:?}", &buf[..n]); }); }
33.804348
78
0.605145
4899c645e045701a7aadbc067cd838e22b8c9012
6,099
use digest::generic_array::typenum::Unsigned; use digest::{BlockInput, Digest, FixedOutput, Reset, Update}; use hmac::{Hmac, Mac, NewMac}; use wasm_bindgen::prelude::*; trait DynHmac { /// Update MAC state with the given data. fn update(&mut self, data: &[u8]); /// Obtain the result of a [`Mac`] computation as a [`Output`] and reset /// [`Mac`] instance. fn finalize_reset(&mut self) -> Box<[u8]>; /// Obtain the result of a [`Mac`] computation as a [`Output`] and consume /// [`Mac`] instance. fn box_finalize(self: Box<Self>) -> Box<[u8]>; /// Reset [`Mac`] instance. fn reset(&mut self); /// Get output size of the [`Mac`]. fn output_size(&self) -> usize; /// Clone [`Mac`] state into a boxed trait object fn box_clone(&self) -> Box<dyn DynHmac>; /// Check if tag/code value is correct for the processed input. fn box_verify(self: Box<Self>, tag: &[u8]) -> bool; } impl<D: Mac + 'static> DynHmac for D { fn update(&mut self, data: &[u8]) { Mac::update(self, data); } fn finalize_reset(&mut self) -> Box<[u8]> { let res = self .clone() .finalize() .into_bytes() .to_vec() .into_boxed_slice(); self.reset(); res } fn box_finalize(self: Box<Self>) -> Box<[u8]> { self.finalize().into_bytes().to_vec().into_boxed_slice() } fn reset(&mut self) { Mac::reset(self); } fn output_size(&self) -> usize { <Self as Mac>::OutputSize::to_usize() } fn box_clone(&self) -> Box<dyn DynHmac> { Box::new(self.clone()) } fn box_verify(self: Box<Self>, tag: &[u8]) -> bool { match self.verify(tag) { Ok(_) => true, _ => false, } } } #[wasm_bindgen] pub struct FixedHmac { inner: Box<dyn DynHmac>, } #[wasm_bindgen] impl FixedHmac { #[wasm_bindgen] pub fn update(&mut self, data: &[u8]) { self.inner.update(data); } #[wasm_bindgen(js_name = finalizeReset)] pub fn finalize_reset(&mut self) -> Box<[u8]> { self.inner.finalize_reset() } #[wasm_bindgen] pub fn finalize(self) -> Box<[u8]> { self.inner.box_finalize() } #[wasm_bindgen] pub fn reset(&mut self) { self.inner.reset(); } #[wasm_bindgen(js_name = outputSize)] pub fn output_size(&self) -> usize { self.inner.output_size() } #[wasm_bindgen] pub fn clone(&mut self) -> FixedHmac { FixedHmac { inner: self.inner.box_clone(), } } #[wasm_bindgen] pub fn verify(self, tag: &[u8]) -> bool { self.inner.box_verify(tag) } } #[wasm_bindgen(js_name = createHmac)] pub fn create_hmac(algorithm: &str, key: &str) -> Result<FixedHmac, JsValue> { let algorithm = algorithm.replace("-", "").to_ascii_uppercase(); let algorithm = algorithm.as_str(); let hasher: Option<Box<dyn DynHmac>> = match algorithm { "BLAKE2B512" => get_some::<blake2::Blake2b>(key), "BLAKE2S256" => get_some::<blake2::Blake2s>(key), "FSB160" => get_some::<fsb::Fsb160>(key), "FSB224" => get_some::<fsb::Fsb224>(key), "FSB256" => get_some::<fsb::Fsb256>(key), "FSB384" => get_some::<fsb::Fsb384>(key), "FSB512" => get_some::<fsb::Fsb512>(key), "GOST94CRYPTOPRO" => get_some::<gost94::Gost94CryptoPro>(key), "GOST94TEST" => get_some::<gost94::Gost94Test>(key), "GOST94S2015" => get_some::<gost94::Gost94s2015>(key), "GROESTL224" => get_some::<groestl::Groestl224>(key), "GROESTL256" => get_some::<groestl::Groestl256>(key), "GROESTL384" => get_some::<groestl::Groestl384>(key), "GROESTL512" => get_some::<groestl::Groestl512>(key), "MD2" => get_some::<md2::Md2>(key), "MD4" => get_some::<md4::Md4>(key), "MD5" => get_some::<md5::Md5>(key), "RIPEMD160" => get_some::<ripemd160::Ripemd160>(key), "RIPEMD256" => get_some::<ripemd256::Ripemd256>(key), "RIPEMD320" => get_some::<ripemd320::Ripemd320>(key), "SHA1" => get_some::<sha1::Sha1>(key), "SHA224" => get_some::<sha2::Sha224>(key), "SHA256" => get_some::<sha2::Sha256>(key), "SHA384" => get_some::<sha2::Sha384>(key), "SHA512" => get_some::<sha2::Sha512>(key), "SHA512224" => get_some::<sha2::Sha512Trunc224>(key), "SHA512256" => get_some::<sha2::Sha512Trunc256>(key), "KECCAK224" => get_some::<sha3::Keccak224>(key), "KECCAK256" => get_some::<sha3::Keccak256>(key), "KECCAK256FULL" => get_some::<sha3::Keccak256Full>(key), "KECCAK384" => get_some::<sha3::Keccak384>(key), "KECCAK512" => get_some::<sha3::Keccak512>(key), "SHA3224" => get_some::<sha3::Sha3_224>(key), "SHA3256" => get_some::<sha3::Sha3_256>(key), "SHA3384" => get_some::<sha3::Sha3_384>(key), "SHA3512" => get_some::<sha3::Sha3_512>(key), "SHABAL192" => get_some::<shabal::Shabal192>(key), "SHABAL224" => get_some::<shabal::Shabal224>(key), "SHABAL256" => get_some::<shabal::Shabal256>(key), "SHABAL384" => get_some::<shabal::Shabal384>(key), "SHABAL512" => get_some::<shabal::Shabal512>(key), "SM3" => get_some::<sm3::Sm3>(key), "STREEBOG256" => get_some::<streebog::Streebog256>(key), "STREEBOG512" => get_some::<streebog::Streebog512>(key), "TIGER" => get_some::<tiger::Tiger>(key), "WHIRLPOOL" => get_some::<whirlpool::Whirlpool>(key), _ => None, }; if let Some(h) = hasher { Ok(FixedHmac { inner: h }) } else { let err_msg = format!("unsupported hash algorithm: {}", algorithm); Err(JsValue::from_str(&err_msg)) } } fn get_some< D: Digest + Update + BlockInput + FixedOutput + Reset + Default + Clone + 'static, >( key: &str, ) -> Option<Box<dyn DynHmac>> { Some(Box::new( Hmac::<D>::new_from_slice(key.as_bytes()).expect("HMAC can take key of any size"), )) }
34.072626
90
0.569929
892f68a4874148dde11caea35583790093e6b24f
71,636
//! Domain specific language for the Lazy api. #[cfg(feature = "dtype-categorical")] pub mod cat; #[cfg(feature = "dtype-categorical")] pub use cat::*; #[cfg(feature = "temporal")] mod dt; mod eval; mod expr; mod from; pub(crate) mod function_expr; #[cfg(feature = "compile")] mod functions; #[cfg(feature = "list")] mod list; mod options; #[cfg(feature = "strings")] pub mod string; #[cfg(feature = "dtype-struct")] mod struct_; use polars_time::series::SeriesOpsTime; use crate::prelude::*; use crate::utils::has_expr; #[cfg(feature = "is_in")] use crate::utils::has_root_literal_expr; use polars_arrow::prelude::QuantileInterpolOptions; use polars_core::export::arrow::{array::BooleanArray, bitmap::MutableBitmap}; use polars_core::prelude::*; use std::fmt::Debug; use std::{ ops::{Add, Div, Mul, Rem, Sub}, sync::Arc, }; // reexport the lazy method pub use crate::frame::IntoLazy; pub use crate::logical_plan::lit; pub use expr::*; pub use functions::*; pub use options::*; use crate::dsl::function_expr::FunctionExpr; use polars_arrow::array::default_arrays::FromData; #[cfg(feature = "diff")] use polars_core::series::ops::NullBehavior; use polars_core::series::IsSorted; use polars_core::utils::{get_supertype, NoNull}; use polars_ops::prelude::SeriesOps; pub fn binary_expr(l: Expr, op: Operator, r: Expr) -> Expr { Expr::BinaryExpr { left: Box::new(l), op, right: Box::new(r), } } /// Intermediate state of `when(..).then(..).otherwise(..)` expr. #[derive(Clone)] pub struct When { predicate: Expr, } /// Intermediate state of `when(..).then(..).otherwise(..)` expr. #[derive(Clone)] pub struct WhenThen { predicate: Expr, then: Expr, } /// Intermediate state of chain when then exprs. /// /// ```text /// when(..).then(..) /// when(..).then(..) /// when(..).then(..) /// .otherwise(..)` /// ``` #[derive(Clone)] #[must_use] pub struct WhenThenThen { predicates: Vec<Expr>, thens: Vec<Expr>, } impl When { pub fn then<E: Into<Expr>>(self, expr: E) -> WhenThen { WhenThen { predicate: self.predicate, then: expr.into(), } } } impl WhenThen { pub fn when<E: Into<Expr>>(self, predicate: E) -> WhenThenThen { WhenThenThen { predicates: vec![self.predicate, predicate.into()], thens: vec![self.then], } } pub fn otherwise<E: Into<Expr>>(self, expr: E) -> Expr { Expr::Ternary { predicate: Box::new(self.predicate), truthy: Box::new(self.then), falsy: Box::new(expr.into()), } } } impl WhenThenThen { pub fn then(mut self, expr: Expr) -> Self { self.thens.push(expr); self } pub fn when(mut self, predicate: Expr) -> Self { self.predicates.push(predicate); self } pub fn otherwise(self, expr: Expr) -> Expr { // we iterate the preds/ exprs last in first out // and nest them. // // // this expr: // when((col('x') == 'a')).then(1) // .when(col('x') == 'a').then(2) // .when(col('x') == 'b').then(3) // .otherwise(4) // // needs to become: // when((col('x') == 'a')).then(1) - // .otherwise( | // when(col('x') == 'a').then(2) - | // .otherwise( | | // pl.when(col('x') == 'b').then(3) | | // .otherwise(4) | inner | outer // ) | | // ) _| _| // // by iterating lifo we first create // `inner` and then assign that to `otherwise`, // which will be used in the next layer `outer` // let pred_iter = self.predicates.into_iter().rev(); let mut then_iter = self.thens.into_iter().rev(); let mut otherwise = expr; for e in pred_iter { otherwise = Expr::Ternary { predicate: Box::new(e), truthy: Box::new( then_iter .next() .expect("expr expected, did you call when().then().otherwise?"), ), falsy: Box::new(otherwise), } } if then_iter.next().is_some() { panic!( "this expr is not properly constructed. \ Every `when` should have an accompanied `then` call." ) } otherwise } } /// Start a when-then-otherwise expression pub fn when<E: Into<Expr>>(predicate: E) -> When { When { predicate: predicate.into(), } } pub fn ternary_expr(predicate: Expr, truthy: Expr, falsy: Expr) -> Expr { Expr::Ternary { predicate: Box::new(predicate), truthy: Box::new(truthy), falsy: Box::new(falsy), } } impl Expr { /// Modify the Options passed to the `Function` node. pub(crate) fn with_function_options<F>(self, func: F) -> Expr where F: Fn(FunctionOptions) -> FunctionOptions, { match self { Self::AnonymousFunction { input, function, output_type, mut options, } => { options = func(options); Self::AnonymousFunction { input, function, output_type, options, } } Self::Function { input, function, mut options, } => { options = func(options); Self::Function { input, function, options, } } _ => { panic!("implementation error") } } } /// Overwrite the function name used for formatting /// this is not intended to be used #[cfg(feature = "private")] #[doc(hidden)] pub fn with_fmt(self, name: &'static str) -> Expr { self.with_function_options(|mut options| { options.fmt_str = name; options }) } /// Compare `Expr` with other `Expr` on equality pub fn eq<E: Into<Expr>>(self, other: E) -> Expr { binary_expr(self, Operator::Eq, other.into()) } /// Compare `Expr` with other `Expr` on non-equality pub fn neq<E: Into<Expr>>(self, other: E) -> Expr { binary_expr(self, Operator::NotEq, other.into()) } /// Check if `Expr` < `Expr` pub fn lt<E: Into<Expr>>(self, other: E) -> Expr { binary_expr(self, Operator::Lt, other.into()) } /// Check if `Expr` > `Expr` pub fn gt<E: Into<Expr>>(self, other: E) -> Expr { binary_expr(self, Operator::Gt, other.into()) } /// Check if `Expr` >= `Expr` pub fn gt_eq<E: Into<Expr>>(self, other: E) -> Expr { binary_expr(self, Operator::GtEq, other.into()) } /// Check if `Expr` <= `Expr` pub fn lt_eq<E: Into<Expr>>(self, other: E) -> Expr { binary_expr(self, Operator::LtEq, other.into()) } /// Negate `Expr` #[allow(clippy::should_implement_trait)] pub fn not(self) -> Expr { Expr::Not(Box::new(self)) } /// Rename Column. pub fn alias(self, name: &str) -> Expr { Expr::Alias(Box::new(self), Arc::from(name)) } /// Run is_null operation on `Expr`. #[allow(clippy::wrong_self_convention)] pub fn is_null(self) -> Self { Expr::IsNull(Box::new(self)) } /// Run is_not_null operation on `Expr`. #[allow(clippy::wrong_self_convention)] pub fn is_not_null(self) -> Self { Expr::IsNotNull(Box::new(self)) } /// Drop null values pub fn drop_nulls(self) -> Self { self.apply(|s| Ok(s.drop_nulls()), GetOutput::same_type()) } /// Drop NaN values pub fn drop_nans(self) -> Self { self.apply( |s| match s.dtype() { DataType::Float32 => { let ca = s.f32()?; let mask = ca.is_not_nan().fill_null(FillNullStrategy::One)?; ca.filter(&mask).map(|ca| ca.into_series()) } DataType::Float64 => { let ca = s.f64()?; let mask = ca.is_not_nan().fill_null(FillNullStrategy::One)?; ca.filter(&mask).map(|ca| ca.into_series()) } _ => Ok(s), }, GetOutput::same_type(), ) } /// Reduce groups to minimal value. pub fn min(self) -> Self { AggExpr::Min(Box::new(self)).into() } /// Reduce groups to maximum value. pub fn max(self) -> Self { AggExpr::Max(Box::new(self)).into() } /// Reduce groups to the mean value. pub fn mean(self) -> Self { AggExpr::Mean(Box::new(self)).into() } /// Reduce groups to the median value. pub fn median(self) -> Self { AggExpr::Median(Box::new(self)).into() } /// Reduce groups to the sum of all the values. pub fn sum(self) -> Self { AggExpr::Sum(Box::new(self)).into() } /// Get the number of unique values in the groups. pub fn n_unique(self) -> Self { AggExpr::NUnique(Box::new(self)).into() } /// Get the first value in the group. pub fn first(self) -> Self { AggExpr::First(Box::new(self)).into() } /// Get the last value in the group. pub fn last(self) -> Self { AggExpr::Last(Box::new(self)).into() } /// Aggregate the group to a Series pub fn list(self) -> Self { AggExpr::List(Box::new(self)).into() } /// Compute the quantile per group. pub fn quantile(self, quantile: f64, interpol: QuantileInterpolOptions) -> Self { AggExpr::Quantile { expr: Box::new(self), quantile, interpol, } .into() } /// Get the group indexes of the group by operation. pub fn agg_groups(self) -> Self { AggExpr::AggGroups(Box::new(self)).into() } /// Alias for explode pub fn flatten(self) -> Self { self.explode() } /// Explode the utf8/ list column pub fn explode(self) -> Self { let has_filter = has_expr(&self, |e| matches!(e, Expr::Filter { .. })); // if we explode right after a window function we don't self join, but just flatten // the expression if let Expr::Window { function, partition_by, order_by, mut options, } = self { if has_filter { panic!("A Filter of a window function is not allowed in combination with explode/flatten.\ The resulting column may not fit the DataFrame/ or the groups ") } options.explode = true; Expr::Explode(Box::new(Expr::Window { function, partition_by, order_by, options, })) } else { Expr::Explode(Box::new(self)) } } /// Slice the Series. /// `offset` may be negative. pub fn slice<E: Into<Expr>, F: Into<Expr>>(self, offset: E, length: F) -> Self { Expr::Slice { input: Box::new(self), offset: Box::new(offset.into()), length: Box::new(length.into()), } } /// Append expressions. This is done by adding the chunks of `other` to this [`Series`]. pub fn append<E: Into<Expr>>(self, other: E, upcast: bool) -> Self { let output_type = if upcast { GetOutput::super_type() } else { GetOutput::same_type() }; apply_binary( self, other.into(), move |mut a, mut b| { if upcast { let dtype = get_supertype(a.dtype(), b.dtype())?; a = a.cast(&dtype)?; b = b.cast(&dtype)?; } a.append(&b)?; Ok(a) }, output_type, ) } /// Get the first `n` elements of the Expr result pub fn head(self, length: Option<usize>) -> Self { self.slice(lit(0), lit(length.unwrap_or(10) as u64)) } /// Get the last `n` elements of the Expr result pub fn tail(self, length: Option<usize>) -> Self { let len = length.unwrap_or(10); self.slice(lit(-(len as i64)), lit(len as u64)) } /// Get unique values of this expression. pub fn unique(self) -> Self { self.apply(|s: Series| s.unique(), GetOutput::same_type()) .with_fmt("unique") } /// Get unique values of this expression, while maintaining order. /// This requires more work than [`Expr::unique`]. pub fn unique_stable(self) -> Self { self.apply(|s: Series| s.unique_stable(), GetOutput::same_type()) .with_fmt("unique_stable") } /// Get the first index of unique values of this expression. pub fn arg_unique(self) -> Self { self.apply( |s: Series| s.arg_unique().map(|ca| ca.into_series()), GetOutput::from_type(IDX_DTYPE), ) .with_fmt("arg_unique") } /// Get the index value that has the minimum value pub fn arg_min(self) -> Self { let options = FunctionOptions { collect_groups: ApplyOptions::ApplyGroups, input_wildcard_expansion: false, auto_explode: true, fmt_str: "arg_min", }; self.function_with_options( move |s: Series| Ok(Series::new(s.name(), &[s.arg_min().map(|idx| idx as u32)])), GetOutput::from_type(IDX_DTYPE), options, ) } /// Get the index value that has the maximum value pub fn arg_max(self) -> Self { let options = FunctionOptions { collect_groups: ApplyOptions::ApplyGroups, input_wildcard_expansion: false, auto_explode: true, fmt_str: "arg_max", }; self.function_with_options( move |s: Series| Ok(Series::new(s.name(), &[s.arg_max().map(|idx| idx as u32)])), GetOutput::from_type(IDX_DTYPE), options, ) } /// Get the index values that would sort this expression. pub fn arg_sort(self, reverse: bool) -> Self { assert!( !has_expr(&self, |e| matches!(e, Expr::Wildcard)), "wildcard not supported in argsort expr" ); let options = FunctionOptions { collect_groups: ApplyOptions::ApplyGroups, input_wildcard_expansion: true, auto_explode: false, fmt_str: "arg_sort", }; self.function_with_options( move |s: Series| { Ok(s.argsort(SortOptions { descending: reverse, ..Default::default() }) .into_series()) }, GetOutput::from_type(IDX_DTYPE), options, ) } /// Cast expression to another data type. /// Throws an error if conversion had overflows pub fn strict_cast(self, data_type: DataType) -> Self { Expr::Cast { expr: Box::new(self), data_type, strict: true, } } /// Cast expression to another data type. pub fn cast(self, data_type: DataType) -> Self { Expr::Cast { expr: Box::new(self), data_type, strict: false, } } /// Take the values by idx. pub fn take<E: Into<Expr>>(self, idx: E) -> Self { Expr::Take { expr: Box::new(self), idx: Box::new(idx.into()), } } /// Sort in increasing order. See [the eager implementation](Series::sort). pub fn sort(self, reverse: bool) -> Self { Expr::Sort { expr: Box::new(self), options: SortOptions { descending: reverse, ..Default::default() }, } } /// Sort with given options. pub fn sort_with(self, options: SortOptions) -> Self { Expr::Sort { expr: Box::new(self), options, } } /// Reverse column pub fn reverse(self) -> Self { Expr::Reverse(Box::new(self)) } /// Apply a function/closure once the logical plan get executed. /// /// This function is very similar to [`Expr::apply`], but differs in how it handles aggregations. /// /// * `map` should be used for operations that are independent of groups, e.g. `multiply * 2`, or `raise to the power` /// * `apply` should be used for operations that work on a group of data. e.g. `sum`, `count`, etc. /// /// It is the responsibility of the caller that the schema is correct by giving /// the correct output_type. If None given the output type of the input expr is used. pub fn map<F>(self, function: F, output_type: GetOutput) -> Self where F: Fn(Series) -> Result<Series> + 'static + Send + Sync, { let f = move |s: &mut [Series]| function(std::mem::take(&mut s[0])); Expr::AnonymousFunction { input: vec![self], function: NoEq::new(Arc::new(f)), output_type, options: FunctionOptions { collect_groups: ApplyOptions::ApplyFlat, input_wildcard_expansion: false, auto_explode: false, fmt_str: "map", }, } } fn map_private(self, function_expr: FunctionExpr, fmt_str: &'static str) -> Self { Expr::Function { input: vec![self], function: function_expr, options: FunctionOptions { collect_groups: ApplyOptions::ApplyFlat, input_wildcard_expansion: false, auto_explode: false, fmt_str, }, } } /// Apply a function/closure once the logical plan get executed with many arguments /// /// See the [`Expr::map`] function for the differences between [`map`](Expr::map) and [`apply`](Expr::apply). pub fn map_many<F>(self, function: F, arguments: &[Expr], output_type: GetOutput) -> Self where F: Fn(&mut [Series]) -> Result<Series> + 'static + Send + Sync, { let mut input = vec![self]; input.extend_from_slice(arguments); Expr::AnonymousFunction { input, function: NoEq::new(Arc::new(function)), output_type, options: FunctionOptions { collect_groups: ApplyOptions::ApplyFlat, input_wildcard_expansion: false, auto_explode: false, fmt_str: "", }, } } /// Apply a function/closure once the logical plan get executed. /// /// This function is very similar to [apply](Expr::apply), but differs in how it handles aggregations. /// /// * `map` should be used for operations that are independent of groups, e.g. `multiply * 2`, or `raise to the power` /// * `apply` should be used for operations that work on a group of data. e.g. `sum`, `count`, etc. /// * `map_list` should be used when the function expects a list aggregated series. pub fn map_list<F>(self, function: F, output_type: GetOutput) -> Self where F: Fn(Series) -> Result<Series> + 'static + Send + Sync, { let f = move |s: &mut [Series]| function(std::mem::take(&mut s[0])); Expr::AnonymousFunction { input: vec![self], function: NoEq::new(Arc::new(f)), output_type, options: FunctionOptions { collect_groups: ApplyOptions::ApplyList, input_wildcard_expansion: false, auto_explode: false, fmt_str: "map_list", }, } } /// A function that cannot be expressed with `map` or `apply` and requires extra settings. pub fn function_with_options<F>( self, function: F, output_type: GetOutput, options: FunctionOptions, ) -> Self where F: Fn(Series) -> Result<Series> + 'static + Send + Sync, { let f = move |s: &mut [Series]| function(std::mem::take(&mut s[0])); Expr::AnonymousFunction { input: vec![self], function: NoEq::new(Arc::new(f)), output_type, options, } } /// Apply a function/closure over the groups. This should only be used in a groupby aggregation. /// /// It is the responsibility of the caller that the schema is correct by giving /// the correct output_type. If None given the output type of the input expr is used. /// /// This difference with [map](Self::map) is that `apply` will create a separate `Series` per group. /// /// * `map` should be used for operations that are independent of groups, e.g. `multiply * 2`, or `raise to the power` /// * `apply` should be used for operations that work on a group of data. e.g. `sum`, `count`, etc. pub fn apply<F>(self, function: F, output_type: GetOutput) -> Self where F: Fn(Series) -> Result<Series> + 'static + Send + Sync, { let f = move |s: &mut [Series]| function(std::mem::take(&mut s[0])); Expr::AnonymousFunction { input: vec![self], function: NoEq::new(Arc::new(f)), output_type, options: FunctionOptions { collect_groups: ApplyOptions::ApplyGroups, input_wildcard_expansion: false, auto_explode: false, fmt_str: "", }, } } fn apply_private(self, function_expr: FunctionExpr, fmt_str: &'static str) -> Self { Expr::Function { input: vec![self], function: function_expr, options: FunctionOptions { collect_groups: ApplyOptions::ApplyGroups, input_wildcard_expansion: false, auto_explode: false, fmt_str, }, } } /// Apply a function/closure over the groups with many arguments. This should only be used in a groupby aggregation. /// /// See the [`Expr::apply`] function for the differences between [`map`](Expr::map) and [`apply`](Expr::apply). pub fn apply_many<F>(self, function: F, arguments: &[Expr], output_type: GetOutput) -> Self where F: Fn(&mut [Series]) -> Result<Series> + 'static + Send + Sync, { let mut input = vec![self]; input.extend_from_slice(arguments); Expr::AnonymousFunction { input, function: NoEq::new(Arc::new(function)), output_type, options: FunctionOptions { collect_groups: ApplyOptions::ApplyGroups, input_wildcard_expansion: false, auto_explode: true, fmt_str: "", }, } } pub fn apply_many_private( self, function_expr: FunctionExpr, arguments: &[Expr], fmt_str: &'static str, ) -> Self { let mut input = Vec::with_capacity(arguments.len() + 1); input.push(self); input.extend_from_slice(arguments); Expr::Function { input, function: function_expr, options: FunctionOptions { collect_groups: ApplyOptions::ApplyGroups, input_wildcard_expansion: false, auto_explode: true, fmt_str, }, } } pub fn map_many_private( self, function_expr: FunctionExpr, arguments: &[Expr], fmt_str: &'static str, ) -> Self { let mut input = Vec::with_capacity(arguments.len() + 1); input.push(self); input.extend_from_slice(arguments); Expr::Function { input, function: function_expr, options: FunctionOptions { collect_groups: ApplyOptions::ApplyFlat, input_wildcard_expansion: false, auto_explode: true, fmt_str, }, } } /// Get mask of finite values if dtype is Float #[allow(clippy::wrong_self_convention)] pub fn is_finite(self) -> Self { self.map( |s: Series| s.is_finite().map(|ca| ca.into_series()), GetOutput::from_type(DataType::Boolean), ) .with_fmt("is_finite") } /// Get mask of infinite values if dtype is Float #[allow(clippy::wrong_self_convention)] pub fn is_infinite(self) -> Self { self.map( |s: Series| s.is_infinite().map(|ca| ca.into_series()), GetOutput::from_type(DataType::Boolean), ) .with_fmt("is_infinite") } /// Get mask of NaN values if dtype is Float #[allow(clippy::wrong_self_convention)] pub fn is_nan(self) -> Self { self.map( |s: Series| s.is_nan().map(|ca| ca.into_series()), GetOutput::from_type(DataType::Boolean), ) .with_fmt("is_nan") } /// Get inverse mask of NaN values if dtype is Float #[allow(clippy::wrong_self_convention)] pub fn is_not_nan(self) -> Self { self.map( |s: Series| s.is_not_nan().map(|ca| ca.into_series()), GetOutput::from_type(DataType::Boolean), ) .with_fmt("is_not_nan") } /// Shift the values in the array by some period. See [the eager implementation](polars_core::series::SeriesTrait::shift). pub fn shift(self, periods: i64) -> Self { Expr::Shift { input: Box::new(self), periods, } } pub fn shift_and_fill_impl(self, periods: i64, fill_value: Expr) -> Self { // Note: // The order of the then | otherwise is important if periods > 0 { when(self.clone().apply( move |s: Series| { let len = s.len(); let mut bits = MutableBitmap::with_capacity(s.len()); bits.extend_constant(periods as usize, false); bits.extend_constant(len.saturating_sub(periods as usize), true); let mask = BooleanArray::from_data_default(bits.into(), None); let ca: BooleanChunked = mask.into(); Ok(ca.into_series()) }, GetOutput::from_type(DataType::Boolean), )) .then(self.shift(periods)) .otherwise(fill_value) } else { when(self.clone().apply( move |s: Series| { let length = s.len() as i64; // periods is negative, so subtraction. let tipping_point = std::cmp::max(length + periods, 0); let mut bits = MutableBitmap::with_capacity(s.len()); bits.extend_constant(tipping_point as usize, true); bits.extend_constant(-periods as usize, false); let mask = BooleanArray::from_data_default(bits.into(), None); let ca: BooleanChunked = mask.into(); Ok(ca.into_series()) }, GetOutput::from_type(DataType::Boolean), )) .then(self.shift(periods)) .otherwise(fill_value) } } /// Shift the values in the array by some period and fill the resulting empty values. pub fn shift_and_fill<E: Into<Expr>>(self, periods: i64, fill_value: E) -> Self { self.shift_and_fill_impl(periods, fill_value.into()) } /// Get an array with the cumulative sum computed at every element #[cfg_attr(docsrs, doc(cfg(feature = "cum_agg")))] pub fn cumsum(self, reverse: bool) -> Self { self.apply( move |s: Series| Ok(s.cumsum(reverse)), GetOutput::same_type(), ) .with_fmt("cumsum") } /// Get an array with the cumulative product computed at every element #[cfg_attr(docsrs, doc(cfg(feature = "cum_agg")))] pub fn cumprod(self, reverse: bool) -> Self { self.apply( move |s: Series| Ok(s.cumprod(reverse)), GetOutput::map_dtype(|dt| { use DataType::*; match dt { Float32 => Float32, Float64 => Float64, _ => Int64, } }), ) .with_fmt("cumprod") } /// Get an array with the cumulative min computed at every element #[cfg_attr(docsrs, doc(cfg(feature = "cum_agg")))] pub fn cummin(self, reverse: bool) -> Self { self.apply( move |s: Series| Ok(s.cummin(reverse)), GetOutput::same_type(), ) .with_fmt("cummin") } /// Get an array with the cumulative max computed at every element #[cfg_attr(docsrs, doc(cfg(feature = "cum_agg")))] pub fn cummax(self, reverse: bool) -> Self { self.apply( move |s: Series| Ok(s.cummax(reverse)), GetOutput::map_dtype(|dt| { use DataType::*; match dt { Float32 => Float32, Float64 => Float64, _ => Int64, } }), ) .with_fmt("cummax") } /// Get the product aggregation of an expression #[cfg_attr(docsrs, doc(cfg(feature = "product")))] pub fn product(self) -> Self { let options = FunctionOptions { collect_groups: ApplyOptions::ApplyGroups, input_wildcard_expansion: false, auto_explode: true, fmt_str: "product", }; self.function_with_options( move |s: Series| Ok(s.product()), GetOutput::map_dtype(|dt| { use DataType::*; match dt { Float32 => Float32, Float64 => Float64, _ => Int64, } }), options, ) } /// Fill missing value with next non-null. pub fn backward_fill(self, limit: FillNullLimit) -> Self { self.apply( move |s: Series| s.fill_null(FillNullStrategy::Backward(limit)), GetOutput::same_type(), ) .with_fmt("backward_fill") } /// Fill missing value with previous non-null. pub fn forward_fill(self, limit: FillNullLimit) -> Self { self.apply( move |s: Series| s.fill_null(FillNullStrategy::Forward(limit)), GetOutput::same_type(), ) .with_fmt("forward_fill") } /// Round underlying floating point array to given decimal numbers. #[cfg(feature = "round_series")] #[cfg_attr(docsrs, doc(cfg(feature = "round_series")))] pub fn round(self, decimals: u32) -> Self { self.map(move |s: Series| s.round(decimals), GetOutput::same_type()) .with_fmt("round") } /// Floor underlying floating point array to the lowest integers smaller or equal to the float value. #[cfg(feature = "round_series")] #[cfg_attr(docsrs, doc(cfg(feature = "round_series")))] pub fn floor(self) -> Self { self.map(move |s: Series| s.floor(), GetOutput::same_type()) .with_fmt("floor") } /// Ceil underlying floating point array to the highest integers smaller or equal to the float value. #[cfg(feature = "round_series")] #[cfg_attr(docsrs, doc(cfg(feature = "round_series")))] pub fn ceil(self) -> Self { self.map(move |s: Series| s.ceil(), GetOutput::same_type()) .with_fmt("ceil") } /// Clip underlying values to a set boundary. #[cfg(feature = "round_series")] #[cfg_attr(docsrs, doc(cfg(feature = "round_series")))] pub fn clip(self, min: f64, max: f64) -> Self { self.map(move |s: Series| s.clip(min, max), GetOutput::same_type()) .with_fmt("clip") } /// Convert all values to their absolute/positive value. #[cfg(feature = "abs")] #[cfg_attr(docsrs, doc(cfg(feature = "abs")))] pub fn abs(self) -> Self { self.map(move |s: Series| s.abs(), GetOutput::same_type()) .with_fmt("abs") } /// Apply window function over a subgroup. /// This is similar to a groupby + aggregation + self join. /// Or similar to [window functions in Postgres](https://www.postgresql.org/docs/9.1/tutorial-window.html). /// /// # Example /// /// ``` rust /// #[macro_use] extern crate polars_core; /// use polars_core::prelude::*; /// use polars_lazy::prelude::*; /// /// fn example() -> Result<()> { /// let df = df! { /// "groups" => &[1, 1, 2, 2, 1, 2, 3, 3, 1], /// "values" => &[1, 2, 3, 4, 5, 6, 7, 8, 8] /// }?; /// /// let out = df /// .lazy() /// .select(&[ /// col("groups"), /// sum("values").over([col("groups")]), /// ]) /// .collect()?; /// dbg!(&out); /// Ok(()) /// } /// /// ``` /// /// Outputs: /// /// ``` text /// ╭────────┬────────╮ /// │ groups ┆ values │ /// │ --- ┆ --- │ /// │ i32 ┆ i32 │ /// ╞════════╪════════╡ /// │ 1 ┆ 16 │ /// ├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤ /// │ 1 ┆ 16 │ /// ├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤ /// │ 2 ┆ 13 │ /// ├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤ /// │ 2 ┆ 13 │ /// ├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤ /// │ ... ┆ ... │ /// ├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤ /// │ 1 ┆ 16 │ /// ├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤ /// │ 2 ┆ 13 │ /// ├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤ /// │ 3 ┆ 15 │ /// ├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤ /// │ 3 ┆ 15 │ /// ├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤ /// │ 1 ┆ 16 │ /// ╰────────┴────────╯ /// ``` pub fn over<E: AsRef<[IE]>, IE: Into<Expr> + Clone>(self, partition_by: E) -> Self { let partition_by = partition_by .as_ref() .iter() .map(|e| e.clone().into()) .collect(); Expr::Window { function: Box::new(self), partition_by, order_by: None, options: WindowOptions { explode: false }, } } fn fill_null_impl(self, fill_value: Expr) -> Self { self.map_many( |s| { let a = &s[0]; let b = &s[1]; if !a.null_count() == 0 { Ok(a.clone()) } else { let st = get_supertype(a.dtype(), b.dtype())?; let a = a.cast(&st)?; let b = b.cast(&st)?; let mask = a.is_not_null(); a.zip_with_same_type(&mask, &b) } }, &[fill_value], GetOutput::super_type(), ) .with_fmt("fill_null") } /// Replace the null values by a value. pub fn fill_null<E: Into<Expr>>(self, fill_value: E) -> Self { self.fill_null_impl(fill_value.into()) } /// Replace the floating point `NaN` values by a value. pub fn fill_nan<E: Into<Expr>>(self, fill_value: E) -> Self { when(self.clone().is_nan()) .then(fill_value.into()) .otherwise(self) } /// Count the values of the Series /// or /// Get counts of the group by operation. pub fn count(self) -> Self { AggExpr::Count(Box::new(self)).into() } /// Standard deviation of the values of the Series pub fn std(self) -> Self { AggExpr::Std(Box::new(self)).into() } /// Variance of the values of the Series pub fn var(self) -> Self { AggExpr::Var(Box::new(self)).into() } /// Get a mask of duplicated values #[allow(clippy::wrong_self_convention)] pub fn is_duplicated(self) -> Self { Expr::Duplicated(Box::new(self)) } /// Get a mask of unique values #[allow(clippy::wrong_self_convention)] pub fn is_unique(self) -> Self { Expr::IsUnique(Box::new(self)) } /// and operation pub fn and<E: Into<Expr>>(self, expr: E) -> Self { binary_expr(self, Operator::And, expr.into()) } // xor operation pub fn xor<E: Into<Expr>>(self, expr: E) -> Self { binary_expr(self, Operator::Xor, expr.into()) } /// or operation pub fn or<E: Into<Expr>>(self, expr: E) -> Self { binary_expr(self, Operator::Or, expr.into()) } /// Raise expression to the power `exponent` pub fn pow<E: Into<Expr>>(self, exponent: E) -> Self { Expr::Function { input: vec![self, exponent.into()], function: FunctionExpr::Pow, options: FunctionOptions { collect_groups: ApplyOptions::ApplyFlat, input_wildcard_expansion: false, auto_explode: false, fmt_str: "pow", }, } } /// Filter a single column /// Should be used in aggregation context. If you want to filter on a DataFrame level, use /// [LazyFrame::filter](LazyFrame::filter) pub fn filter<E: Into<Expr>>(self, predicate: E) -> Self { if has_expr(&self, |e| matches!(e, Expr::Wildcard)) { panic!("filter '*' not allowed, use LazyFrame::filter") }; Expr::Filter { input: Box::new(self), by: Box::new(predicate.into()), } } /// Check if the values of the left expression are in the lists of the right expr. #[allow(clippy::wrong_self_convention)] #[cfg(feature = "is_in")] #[cfg_attr(docsrs, doc(cfg(feature = "is_in")))] pub fn is_in<E: Into<Expr>>(self, other: E) -> Self { let other = other.into(); let has_literal = has_root_literal_expr(&other); if has_literal { if let Expr::Literal(LiteralValue::Series(s)) = &other { // nothing is in an empty list return all False if s.is_empty() { return Expr::Literal(LiteralValue::Boolean(false)); } } } let arguments = &[other]; // we don't have to apply on groups, so this is faster if has_literal { self.map_many_private(FunctionExpr::IsIn, arguments, "is_in_map") } else { self.apply_many_private(FunctionExpr::IsIn, arguments, "is_in_apply") } } /// Sort this column by the ordering of another column. /// Can also be used in a groupby context to sort the groups. pub fn sort_by<E: AsRef<[IE]>, IE: Into<Expr> + Clone, R: AsRef<[bool]>>( self, by: E, reverse: R, ) -> Expr { let by = by.as_ref().iter().map(|e| e.clone().into()).collect(); let reverse = reverse.as_ref().to_vec(); Expr::SortBy { expr: Box::new(self), by, reverse, } } #[cfg(feature = "repeat_by")] fn repeat_by_impl(self, by: Expr) -> Expr { let function = |s: &mut [Series]| { let by = &s[1]; let s = &s[0]; let by = by.cast(&IDX_DTYPE)?; Ok(s.repeat_by(by.idx()?).into_series()) }; self.apply_many( function, &[by], GetOutput::map_dtype(|dt| DataType::List(dt.clone().into())), ) .with_fmt("repeat_by") } #[cfg(feature = "repeat_by")] #[cfg_attr(docsrs, doc(cfg(feature = "repeat_by")))] /// Repeat the column `n` times, where `n` is determined by the values in `by`. /// This yields an `Expr` of dtype `List` pub fn repeat_by<E: Into<Expr>>(self, by: E) -> Expr { self.repeat_by_impl(by.into()) } #[cfg(feature = "is_first")] #[cfg_attr(docsrs, doc(cfg(feature = "is_first")))] #[allow(clippy::wrong_self_convention)] /// Get a mask of the first unique value. pub fn is_first(self) -> Expr { self.apply( |s| s.is_first().map(|ca| ca.into_series()), GetOutput::from_type(DataType::Boolean), ) .with_fmt("is_first") } #[cfg(feature = "dot_product")] fn dot_impl(self, other: Expr) -> Expr { let function = |s: &mut [Series]| Ok((&s[0] * &s[1]).sum_as_series()); self.apply_many(function, &[other], GetOutput::same_type()) .with_fmt("dot") } #[cfg(feature = "dot_product")] #[cfg_attr(docsrs, doc(cfg(feature = "dot_product")))] pub fn dot<E: Into<Expr>>(self, other: E) -> Expr { self.dot_impl(other.into()) } #[cfg(feature = "mode")] #[cfg_attr(docsrs, doc(cfg(feature = "mode")))] /// Compute the mode(s) of this column. This is the most occurring value. pub fn mode(self) -> Expr { self.apply( |s| s.mode().map(|ca| ca.into_series()), GetOutput::same_type(), ) .with_fmt("mode") } /// Keep the original root name /// /// ``` /// use polars_core::prelude::*; /// use polars_lazy::prelude::*; /// /// fn example(df: LazyFrame) -> LazyFrame { /// df.select([ /// // even thought the alias yields a different column name, /// // `keep_name` will make sure that the original column name is used /// col("*").alias("foo").keep_name() /// ]) /// } /// ``` pub fn keep_name(self) -> Expr { Expr::KeepName(Box::new(self)) } /// Define an alias by mapping a function over the original root column name. pub fn map_alias<F>(self, function: F) -> Expr where F: Fn(&str) -> String + 'static + Send + Sync, { let function = NoEq::new(Arc::new(function) as Arc<dyn RenameAliasFn>); Expr::RenameAlias { expr: Box::new(self), function, } } /// Add a suffix to the root column name. pub fn suffix(self, suffix: &str) -> Expr { let suffix = suffix.to_string(); self.map_alias(move |name| format!("{}{}", name, suffix)) } /// Add a prefix to the root column name. pub fn prefix(self, prefix: &str) -> Expr { let prefix = prefix.to_string(); self.map_alias(move |name| format!("{}{}", prefix, name)) } /// Exclude a column from a wildcard/regex selection. /// /// You may also use regexes in the exclude as long as they start with `^` and end with `$`/ /// /// # Example /// /// ```rust /// use polars_core::prelude::*; /// use polars_lazy::prelude::*; /// /// // Select all columns except foo. /// fn example(df: DataFrame) -> LazyFrame { /// df.lazy() /// .select(&[ /// col("*").exclude(&["foo"]) /// ]) /// } /// ``` pub fn exclude(self, columns: impl IntoVec<String>) -> Expr { let v = columns .into_vec() .into_iter() .map(|s| Excluded::Name(Arc::from(s))) .collect(); Expr::Exclude(Box::new(self), v) } pub fn exclude_dtype<D: AsRef<[DataType]>>(self, dtypes: D) -> Expr { let v = dtypes .as_ref() .iter() .map(|dt| Excluded::Dtype(dt.clone())) .collect(); Expr::Exclude(Box::new(self), v) } // Interpolate None values #[cfg(feature = "interpolate")] #[cfg_attr(docsrs, doc(cfg(feature = "interpolate")))] pub fn interpolate(self) -> Expr { self.apply(|s| Ok(s.interpolate()), GetOutput::same_type()) .with_fmt("interpolate") } fn finish_rolling( self, options: RollingOptions, expr_name: &'static str, expr_name_by: &'static str, rolling_fn: Arc<dyn (Fn(&Series, RollingOptionsImpl) -> Result<Series>) + Send + Sync>, ) -> Expr { if let Some(ref by) = options.by { self.apply_many( move |s| { let mut by = s[1].clone(); by = by.rechunk(); let s = &s[0]; if options.weights.is_some() { return Err(PolarsError::ComputeError( "weights not supported in 'rolling by' expression".into(), )); } if matches!(by.dtype(), DataType::Datetime(_, _)) { by = by.cast(&DataType::Datetime(TimeUnit::Microseconds, None))?; } let by = by.datetime().unwrap(); let by_values = by.cont_slice().map_err(|_| { PolarsError::ComputeError( "'by' column should not have null values in 'rolling by'".into(), ) })?; let tu = by.time_unit(); let options = RollingOptionsImpl { window_size: options.window_size, min_periods: options.min_periods, weights: None, center: options.center, by: Some(by_values), tu: Some(tu), closed_window: options.closed_window, }; rolling_fn(s, options) }, &[col(by)], GetOutput::same_type(), ) .with_fmt(expr_name_by) } else { if !options.window_size.parsed_int { panic!("if dynamic windows are used in a rolling aggregation, the 'by' argument must be set") } self.apply( move |s| rolling_fn(&s, options.clone().into()), GetOutput::same_type(), ) .with_fmt(expr_name) } } /// Apply a rolling min See: /// [ChunkedArray::rolling_min] #[cfg_attr(docsrs, doc(cfg(feature = "rolling_window")))] #[cfg(feature = "rolling_window")] pub fn rolling_min(self, options: RollingOptions) -> Expr { self.finish_rolling( options, "rolling_min", "rolling_min_by", Arc::new(|s, options| s.rolling_min(options)), ) } /// Apply a rolling max See: /// [ChunkedArray::rolling_max] #[cfg_attr(docsrs, doc(cfg(feature = "rolling_window")))] #[cfg(feature = "rolling_window")] pub fn rolling_max(self, options: RollingOptions) -> Expr { self.finish_rolling( options, "rolling_max", "rolling_max_by", Arc::new(|s, options| s.rolling_max(options)), ) } /// Apply a rolling mean See: /// [ChunkedArray::rolling_mean] #[cfg_attr(docsrs, doc(cfg(feature = "rolling_window")))] #[cfg(feature = "rolling_window")] pub fn rolling_mean(self, options: RollingOptions) -> Expr { self.finish_rolling( options, "rolling_mean", "rolling_mean_by", Arc::new(|s, options| s.rolling_mean(options)), ) } /// Apply a rolling sum See: /// [ChunkedArray::rolling_sum] #[cfg_attr(docsrs, doc(cfg(feature = "rolling_window")))] #[cfg(feature = "rolling_window")] pub fn rolling_sum(self, options: RollingOptions) -> Expr { self.finish_rolling( options, "rolling_sum", "rolling_sum_by", Arc::new(|s, options| s.rolling_sum(options)), ) } /// Apply a rolling median See: /// [`ChunkedArray::rolling_median`] #[cfg_attr(docsrs, doc(cfg(feature = "rolling_window")))] #[cfg(feature = "rolling_window")] pub fn rolling_median(self, options: RollingOptions) -> Expr { self.finish_rolling( options, "rolling_median", "rolling_median_by", Arc::new(|s, options| s.rolling_median(options)), ) } /// Apply a rolling quantile See: /// [`ChunkedArray::rolling_quantile`] #[cfg_attr(docsrs, doc(cfg(feature = "rolling_window")))] #[cfg(feature = "rolling_window")] pub fn rolling_quantile( self, quantile: f64, interpolation: QuantileInterpolOptions, options: RollingOptions, ) -> Expr { self.finish_rolling( options, "rolling_quantile", "rolling_quantile_by", Arc::new(move |s, options| s.rolling_quantile(quantile, interpolation, options)), ) } /// Apply a rolling variance #[cfg_attr(docsrs, doc(cfg(feature = "rolling_window")))] #[cfg(feature = "rolling_window")] pub fn rolling_var(self, options: RollingOptions) -> Expr { self.finish_rolling( options, "rolling_var", "rolling_var_by", Arc::new(|s, options| s.rolling_var(options)), ) } /// Apply a rolling std-dev #[cfg_attr(docsrs, doc(cfg(feature = "rolling_window")))] #[cfg(feature = "rolling_window")] pub fn rolling_std(self, options: RollingOptions) -> Expr { self.finish_rolling( options, "rolling_std", "rolling_std_by", Arc::new(|s, options| s.rolling_std(options)), ) } #[cfg_attr(docsrs, doc(cfg(feature = "rolling_window")))] #[cfg(feature = "rolling_window")] /// Apply a custom function over a rolling/ moving window of the array. /// This has quite some dynamic dispatch, so prefer rolling_min, max, mean, sum over this. pub fn rolling_apply( self, f: Arc<dyn Fn(&Series) -> Series + Send + Sync>, output_type: GetOutput, options: RollingOptionsFixedWindow, ) -> Expr { self.apply( move |s| s.rolling_apply(f.as_ref(), options.clone()), output_type, ) .with_fmt("rolling_apply") } #[cfg_attr(docsrs, doc(cfg(feature = "rolling_window")))] #[cfg(feature = "rolling_window")] /// Apply a custom function over a rolling/ moving window of the array. /// Prefer this over rolling_apply in case of floating point numbers as this is faster. /// This has quite some dynamic dispatch, so prefer rolling_min, max, mean, sum over this. pub fn rolling_apply_float<F>(self, window_size: usize, f: F) -> Expr where F: 'static + Fn(&Float64Chunked) -> Option<f64> + Send + Sync + Copy, { self.apply( move |s| { let out = match s.dtype() { DataType::Float64 => s .f64() .unwrap() .rolling_apply_float(window_size, f) .map(|ca| ca.into_series()), _ => s .cast(&DataType::Float64)? .f64() .unwrap() .rolling_apply_float(window_size, f) .map(|ca| ca.into_series()), }?; if let DataType::Float32 = s.dtype() { out.cast(&DataType::Float32) } else { Ok(out) } }, GetOutput::map_field(|field| match field.data_type() { DataType::Float64 => field.clone(), DataType::Float32 => Field::new(field.name(), DataType::Float32), _ => Field::new(field.name(), DataType::Float64), }), ) .with_fmt("rolling_apply_float") } #[cfg(feature = "rank")] #[cfg_attr(docsrs, doc(cfg(feature = "rank")))] pub fn rank(self, options: RankOptions) -> Expr { self.apply( move |s| Ok(s.rank(options)), GetOutput::map_field(move |fld| match options.method { RankMethod::Average => Field::new(fld.name(), DataType::Float32), _ => Field::new(fld.name(), IDX_DTYPE), }), ) .with_fmt("rank") } #[cfg(feature = "diff")] #[cfg_attr(docsrs, doc(cfg(feature = "diff")))] pub fn diff(self, n: usize, null_behavior: NullBehavior) -> Expr { self.apply( move |s| Ok(s.diff(n, null_behavior)), GetOutput::same_type(), ) .with_fmt("diff") } #[cfg(feature = "pct_change")] #[cfg_attr(docsrs, doc(cfg(feature = "pct_change")))] pub fn pct_change(self, n: usize) -> Expr { use DataType::*; self.apply( move |s| s.pct_change(n), GetOutput::map_dtype(|dt| match dt { Float64 | Float32 => dt.clone(), _ => Float64, }), ) .with_fmt("pct_change") } #[cfg(feature = "moment")] #[cfg_attr(docsrs, doc(cfg(feature = "moment")))] pub fn skew(self, bias: bool) -> Expr { self.apply( move |s| s.skew(bias).map(|opt_v| Series::new(s.name(), &[opt_v])), GetOutput::from_type(DataType::Float64), ) .with_function_options(|mut options| { options.fmt_str = "skew"; options.auto_explode = true; options }) } #[cfg(feature = "moment")] #[cfg_attr(docsrs, doc(cfg(feature = "moment")))] pub fn kurtosis(self, fisher: bool, bias: bool) -> Expr { self.apply( move |s| { s.kurtosis(fisher, bias) .map(|opt_v| Series::new(s.name(), &[opt_v])) }, GetOutput::from_type(DataType::Float64), ) .with_function_options(|mut options| { options.fmt_str = "kurtosis"; options.auto_explode = true; options }) } /// Get maximal value that could be hold by this dtype. pub fn upper_bound(self) -> Expr { self.map( |s| { let name = s.name(); use DataType::*; let s = match s.dtype().to_physical() { #[cfg(feature = "dtype-i8")] Int8 => Series::new(name, &[i8::MAX]), #[cfg(feature = "dtype-i16")] Int16 => Series::new(name, &[i16::MAX]), Int32 => Series::new(name, &[i32::MAX]), Int64 => Series::new(name, &[i64::MAX]), #[cfg(feature = "dtype-u8")] UInt8 => Series::new(name, &[u8::MAX]), #[cfg(feature = "dtype-u16")] UInt16 => Series::new(name, &[u16::MAX]), UInt32 => Series::new(name, &[u32::MAX]), UInt64 => Series::new(name, &[u64::MAX]), Float32 => Series::new(name, &[f32::INFINITY]), Float64 => Series::new(name, &[f64::INFINITY]), dt => { return Err(PolarsError::ComputeError( format!("cannot determine upper bound of dtype {}", dt).into(), )) } }; Ok(s) }, GetOutput::same_type(), ) .with_fmt("upper_bound") } /// Get minimal value that could be hold by this dtype. pub fn lower_bound(self) -> Expr { self.map( |s| { let name = s.name(); use DataType::*; let s = match s.dtype().to_physical() { #[cfg(feature = "dtype-i8")] Int8 => Series::new(name, &[i8::MIN]), #[cfg(feature = "dtype-i16")] Int16 => Series::new(name, &[i16::MIN]), Int32 => Series::new(name, &[i32::MIN]), Int64 => Series::new(name, &[i64::MIN]), #[cfg(feature = "dtype-u8")] UInt8 => Series::new(name, &[u8::MIN]), #[cfg(feature = "dtype-u16")] UInt16 => Series::new(name, &[u16::MIN]), UInt32 => Series::new(name, &[u32::MIN]), UInt64 => Series::new(name, &[u64::MIN]), Float32 => Series::new(name, &[f32::NEG_INFINITY]), Float64 => Series::new(name, &[f64::NEG_INFINITY]), dt => { return Err(PolarsError::ComputeError( format!("cannot determine lower bound of dtype {}", dt).into(), )) } }; Ok(s) }, GetOutput::same_type(), ) .with_fmt("lower_bound") } pub fn reshape(self, dims: &[i64]) -> Self { let dims = dims.to_vec(); let output_type = if dims.len() == 1 { GetOutput::map_field(|fld| { Field::new( fld.name(), fld.data_type() .inner_dtype() .unwrap_or_else(|| fld.data_type()) .clone(), ) }) } else { GetOutput::map_field(|fld| { let dtype = fld .data_type() .inner_dtype() .unwrap_or_else(|| fld.data_type()) .clone(); Field::new(fld.name(), DataType::List(Box::new(dtype))) }) }; self.apply(move |s| s.reshape(&dims), output_type) .with_fmt("reshape") } /// Cumulatively count values from 0 to len. pub fn cumcount(self, reverse: bool) -> Self { self.apply( move |s| { if reverse { let ca: NoNull<UInt32Chunked> = (0u32..s.len() as u32).rev().collect(); let mut ca = ca.into_inner(); ca.rename(s.name()); Ok(ca.into_series()) } else { let ca: NoNull<UInt32Chunked> = (0u32..s.len() as u32).collect(); let mut ca = ca.into_inner(); ca.rename(s.name()); Ok(ca.into_series()) } }, GetOutput::from_type(IDX_DTYPE), ) .with_fmt("cumcount") } #[cfg(feature = "random")] pub fn shuffle(self, seed: u64) -> Self { self.apply(move |s| Ok(s.shuffle(seed)), GetOutput::same_type()) .with_fmt("shuffle") } #[cfg(feature = "random")] pub fn sample_frac( self, frac: f64, with_replacement: bool, shuffle: bool, seed: Option<u64>, ) -> Self { self.apply( move |s| s.sample_frac(frac, with_replacement, shuffle, seed), GetOutput::same_type(), ) .with_fmt("shuffle") } #[cfg(feature = "ewma")] pub fn ewm_mean(self, options: EWMOptions) -> Self { use DataType::*; self.apply( move |s| s.ewm_mean(options), GetOutput::map_dtype(|dt| match dt { Float64 | Float32 => dt.clone(), _ => Float64, }), ) .with_fmt("emw_mean") } #[cfg(feature = "ewma")] pub fn ewm_std(self, options: EWMOptions) -> Self { use DataType::*; self.apply( move |s| s.ewm_std(options), GetOutput::map_dtype(|dt| match dt { Float64 | Float32 => dt.clone(), _ => Float64, }), ) .with_fmt("emw_std") } #[cfg(feature = "ewma")] pub fn ewm_var(self, options: EWMOptions) -> Self { use DataType::*; self.apply( move |s| s.ewm_var(options), GetOutput::map_dtype(|dt| match dt { Float64 | Float32 => dt.clone(), _ => Float64, }), ) .with_fmt("emw_var") } /// Check if any boolean value is `true` pub fn any(self) -> Self { self.apply( move |s| { let boolean = s.bool()?; if boolean.any() { Ok(Series::new(s.name(), [true])) } else { Ok(Series::new(s.name(), [false])) } }, GetOutput::from_type(DataType::Boolean), ) .with_function_options(|mut opt| { opt.fmt_str = "any"; opt.auto_explode = true; opt }) } /// Check if all boolean values are `true` pub fn all(self) -> Self { self.apply( move |s| { let boolean = s.bool()?; if boolean.all() { Ok(Series::new(s.name(), [true])) } else { Ok(Series::new(s.name(), [false])) } }, GetOutput::from_type(DataType::Boolean), ) .with_function_options(|mut opt| { opt.fmt_str = "all"; opt.auto_explode = true; opt }) } /// This is useful if an `apply` function needs a floating point type. /// Because this cast is done on a `map` level, it will be faster. pub fn to_float(self) -> Self { self.map( |s| match s.dtype() { DataType::Float32 | DataType::Float64 => Ok(s), _ => s.cast(&DataType::Float64), }, GetOutput::map_dtype(|dt| { if matches!(dt, DataType::Float32) { DataType::Float32 } else { DataType::Float64 } }), ) } #[cfg(feature = "dtype-struct")] #[cfg_attr(docsrs, doc(cfg(feature = "dtype-struct")))] /// Count all unique values and create a struct mapping value to count /// Note that it is better to turn multithreaded off in the aggregation context pub fn value_counts(self, multithreaded: bool) -> Self { self.apply( move |s| { s.value_counts(multithreaded) .map(|df| df.into_struct(s.name()).into_series()) }, GetOutput::map_field(|fld| { Field::new( fld.name(), DataType::Struct(vec![fld.clone(), Field::new("counts", IDX_DTYPE)]), ) }), ) .with_fmt("value_counts") } #[cfg(feature = "unique_counts")] #[cfg_attr(docsrs, doc(cfg(feature = "unique_counts")))] /// Returns a count of the unique values in the order of appearance. /// This method differs from [`Expr::value_counts]` in that it does not return the /// values, only the counts and might be faster pub fn unique_counts(self) -> Self { self.apply( |s| Ok(s.unique_counts().into_series()), GetOutput::from_type(IDX_DTYPE), ) .with_fmt("unique_counts") } #[cfg(feature = "log")] #[cfg_attr(docsrs, doc(cfg(feature = "log")))] /// Compute the logarithm to a given base pub fn log(self, base: f64) -> Self { self.map( move |s| Ok(s.log(base)), GetOutput::map_dtype(|dt| { if matches!(dt, DataType::Float32) { DataType::Float32 } else { DataType::Float64 } }), ) .with_fmt("log") } #[cfg(feature = "log")] #[cfg_attr(docsrs, doc(cfg(feature = "log")))] /// Calculate the exponential of all elements in the input array pub fn exp(self) -> Self { self.map( move |s| Ok(s.exp()), GetOutput::map_dtype(|dt| { if matches!(dt, DataType::Float32) { DataType::Float32 } else { DataType::Float64 } }), ) .with_fmt("log") } #[cfg(feature = "log")] #[cfg_attr(docsrs, doc(cfg(feature = "log")))] /// Compute the entropy as `-sum(pk * log(pk)`. /// where `pk` are discrete probabilities. pub fn entropy(self, base: f64, normalize: bool) -> Self { self.apply( move |s| Ok(Series::new(s.name(), [s.entropy(base, normalize)])), GetOutput::map_dtype(|dt| { if matches!(dt, DataType::Float32) { DataType::Float32 } else { DataType::Float64 } }), ) .with_function_options(|mut options| { options.fmt_str = "entropy"; options.auto_explode = true; options }) } /// Get the null count of the column/group pub fn null_count(self) -> Expr { self.apply_private(FunctionExpr::NullCount, "null_count") .with_function_options(|mut options| { options.auto_explode = true; options }) } /// Set this `Series` as `sorted` so that downstream code can use /// fast paths for sorted arrays. /// # Warning /// This can lead to incorrect results if this `Series` is not sorted!! /// Use with care! pub fn set_sorted(self, sorted: IsSorted) -> Expr { self.apply( move |mut s| { match sorted { IsSorted::Not => {} IsSorted::Ascending => s.set_sorted(false), IsSorted::Descending => s.set_sorted(true), } Ok(s) }, GetOutput::same_type(), ) } #[cfg(feature = "row_hash")] /// Compute the hash of every element pub fn hash(self, seed: usize) -> Expr { self.map_private(FunctionExpr::Hash(seed), "hash") } #[cfg(feature = "strings")] pub fn str(self) -> string::StringNameSpace { string::StringNameSpace(self) } #[cfg(feature = "temporal")] pub fn dt(self) -> dt::DateLikeNameSpace { dt::DateLikeNameSpace(self) } #[cfg(feature = "list")] pub fn arr(self) -> list::ListNameSpace { list::ListNameSpace(self) } #[cfg(feature = "dtype-categorical")] pub fn cat(self) -> cat::CategoricalNameSpace { cat::CategoricalNameSpace(self) } #[cfg(feature = "dtype-struct")] pub fn struct_(self) -> struct_::StructNameSpace { struct_::StructNameSpace(self) } } // Arithmetic ops impl Add for Expr { type Output = Expr; fn add(self, rhs: Self) -> Self::Output { binary_expr(self, Operator::Plus, rhs) } } impl Sub for Expr { type Output = Expr; fn sub(self, rhs: Self) -> Self::Output { binary_expr(self, Operator::Minus, rhs) } } impl Div for Expr { type Output = Expr; fn div(self, rhs: Self) -> Self::Output { binary_expr(self, Operator::Divide, rhs) } } impl Mul for Expr { type Output = Expr; fn mul(self, rhs: Self) -> Self::Output { binary_expr(self, Operator::Multiply, rhs) } } impl Rem for Expr { type Output = Expr; fn rem(self, rhs: Self) -> Self::Output { binary_expr(self, Operator::Modulus, rhs) } } /// Apply a function/closure over multiple columns once the logical plan get executed. /// /// This function is very similar to `[apply_mul]`, but differs in how it handles aggregations. /// /// * `map_mul` should be used for operations that are independent of groups, e.g. `multiply * 2`, or `raise to the power` /// * `apply_mul` should be used for operations that work on a group of data. e.g. `sum`, `count`, etc. /// /// It is the responsibility of the caller that the schema is correct by giving /// the correct output_type. If None given the output type of the input expr is used. pub fn map_multiple<F, E>(function: F, expr: E, output_type: GetOutput) -> Expr where F: Fn(&mut [Series]) -> Result<Series> + 'static + Send + Sync, E: AsRef<[Expr]>, { let input = expr.as_ref().to_vec(); Expr::AnonymousFunction { input, function: NoEq::new(Arc::new(function)), output_type, options: FunctionOptions { collect_groups: ApplyOptions::ApplyFlat, input_wildcard_expansion: false, auto_explode: false, fmt_str: "", }, } } /// Apply a function/closure over multiple columns once the logical plan get executed. /// /// This function is very similar to `[apply_mul]`, but differs in how it handles aggregations. /// /// * `map_mul` should be used for operations that are independent of groups, e.g. `multiply * 2`, or `raise to the power` /// * `apply_mul` should be used for operations that work on a group of data. e.g. `sum`, `count`, etc. /// * `map_list_mul` should be used when the function expects a list aggregated series. pub fn map_list_multiple<F, E>(function: F, expr: E, output_type: GetOutput) -> Expr where F: Fn(&mut [Series]) -> Result<Series> + 'static + Send + Sync, E: AsRef<[Expr]>, { let input = expr.as_ref().to_vec(); Expr::AnonymousFunction { input, function: NoEq::new(Arc::new(function)), output_type, options: FunctionOptions { collect_groups: ApplyOptions::ApplyList, input_wildcard_expansion: false, auto_explode: true, fmt_str: "", }, } } /// Apply a function/closure over the groups of multiple columns. This should only be used in a groupby aggregation. /// /// It is the responsibility of the caller that the schema is correct by giving /// the correct output_type. If None given the output type of the input expr is used. /// /// This difference with `[map_mul]` is that `[apply_mul]` will create a separate `[Series]` per group. /// /// * `[map_mul]` should be used for operations that are independent of groups, e.g. `multiply * 2`, or `raise to the power` /// * `[apply_mul]` should be used for operations that work on a group of data. e.g. `sum`, `count`, etc. pub fn apply_multiple<F, E>(function: F, expr: E, output_type: GetOutput) -> Expr where F: Fn(&mut [Series]) -> Result<Series> + 'static + Send + Sync, E: AsRef<[Expr]>, { let input = expr.as_ref().to_vec(); Expr::AnonymousFunction { input, function: NoEq::new(Arc::new(function)), output_type, options: FunctionOptions { collect_groups: ApplyOptions::ApplyGroups, input_wildcard_expansion: false, auto_explode: true, fmt_str: "", }, } } /// Count expression pub fn count() -> Expr { Expr::Count } /// First column in DataFrame pub fn first() -> Expr { Expr::Nth(0) } /// Last column in DataFrame pub fn last() -> Expr { Expr::Nth(-1) }
32.385172
126
0.509674
ff7b76af2303ef94322617c0eca0e7e3f91de61c
32,767
use super::*; #[test] fn record_definition_test() { assert_eq!( record_definition("PetCat", &[&"name", &"is_cute",]), "-record(pet_cat, {name, is_cute}).\n".to_string() ); // Reserved words are escaped in record names and fields assert_eq!( record_definition("div", &[&"receive", &"catch", &"unreserved"]), "-record(\'div\', {\'receive\', \'catch\', unreserved}).\n".to_string() ); } macro_rules! assert_erl { ($src:expr, $erl:expr $(,)?) => { println!("\n\n\n{}\n", $src); let mut ast = crate::grammar::ModuleParser::new() .parse($src) .expect("syntax error"); ast.name = vec!["the_app".to_string()]; let ast = crate::typ::infer_module(&mut 0, ast, &std::collections::HashMap::new(), &mut vec![]) .expect("should successfully infer"); let output = module(&ast); assert_eq!(($src, output), ($src, $erl.to_string())); }; } #[test] fn variable_rewrite() { // https://github.com/gleam-lang/gleam/issues/333 assert_erl!( r#" fn go(a) { case a { 99 -> { let a = a 1 } _ -> a } } "#, r#"-module(the_app). -compile(no_auto_import). go(A) -> case A of 99 -> A@1 = A, 1; _ -> A end. "#, ); // https://github.com/gleam-lang/gleam/issues/772 assert_erl!( "fn main(board) { fn(board) { board } board }", r#"-module(the_app). -compile(no_auto_import). main(Board) -> fun(Board@1) -> Board@1 end, Board. "#, ); // https://github.com/gleam-lang/gleam/issues/762 assert_erl!( r#" fn main(x) { fn(x) { x }(x) } "#, r#"-module(the_app). -compile(no_auto_import). main(X) -> (fun(X@1) -> X@1 end)(X). "#, ); assert_erl!( r#" fn main(x) { x |> fn(x) { x } } "#, r#"-module(the_app). -compile(no_auto_import). main(X) -> (fun(X@1) -> X@1 end)(X). "#, ); // https://github.com/gleam-lang/gleam/issues/788 assert_erl!( r#"fn go() { let _r = 1 let _r = 2 Nil }"#, r#"-module(the_app). -compile(no_auto_import). go() -> _ = 1, _ = 2, nil. "#, ); } #[test] fn integration_test() { assert_erl!( r#"fn go() { let x = tuple(100000000000000000, tuple(2000000000, 3000000000000, 40000000000), 50000, 6000000000) x }"#, r#"-module(the_app). -compile(no_auto_import). go() -> X = {100000000000000000, {2000000000, 3000000000000, 40000000000}, 50000, 6000000000}, X. "#, ); assert_erl!( r#"fn go() { let y = 1 let y = 2 y }"#, r#"-module(the_app). -compile(no_auto_import). go() -> Y = 1, Y@1 = 2, Y@1. "#, ); // hex, octal, and binary literals assert_erl!( r#"fn go() { let fifteen = 0xF let nine = 0o11 let ten = 0b1010 fifteen }"#, r#"-module(the_app). -compile(no_auto_import). go() -> Fifteen = 16#F, Nine = 8#11, Ten = 2#1010, Fifteen. "#, ); assert_erl!( r#"fn go() { assert y = 1 assert y = 2 y }"#, r#"-module(the_app). -compile(no_auto_import). go() -> Y = 1, Y@1 = 2, Y@1. "#, ); assert_erl!( r#"pub fn t() { True }"#, r#"-module(the_app). -compile(no_auto_import). -export([t/0]). t() -> true. "#, ); assert_erl!( r#"pub type Money { Pound(Int) } fn pound(x) { Pound(x) }"#, r#"-module(the_app). -compile(no_auto_import). pound(X) -> {pound, X}. "#, ); assert_erl!( r#"fn loop() { loop() }"#, r#"-module(the_app). -compile(no_auto_import). loop() -> loop(). "#, ); assert_erl!( r#"pub external fn run() -> Int = "Elixir.MyApp" "run""#, r#"-module(the_app). -compile(no_auto_import). -export([run/0]). run() -> 'Elixir.MyApp':run(). "#, ); assert_erl!( r#"fn inc(x) { x + 1 } pub fn go() { 1 |> inc |> inc |> inc }"#, r#"-module(the_app). -compile(no_auto_import). -export([go/0]). inc(X) -> X + 1. go() -> inc(inc(inc(1))). "#, ); assert_erl!( r#"fn add(x, y) { x + y } pub fn go() { 1 |> add(_, 1) |> add(2, _) |> add(_, 3) }"#, r#"-module(the_app). -compile(no_auto_import). -export([go/0]). add(X, Y) -> X + Y. go() -> add(add(2, add(1, 1)), 3). "#, ); assert_erl!( r#"fn and(x, y) { x && y } fn or(x, y) { x || y } fn modulo(x, y) { x % y } "#, r#"-module(the_app). -compile(no_auto_import). 'and'(X, Y) -> X andalso Y. 'or'(X, Y) -> X orelse Y. modulo(X, Y) -> X rem Y. "#, ); assert_erl!( r#"fn second(list) { case list { [x, y] -> y z -> 1 } } fn tail(list) { case list { [x, ..xs] -> xs z -> list } } "#, r#"-module(the_app). -compile(no_auto_import). second(List) -> case List of [X, Y] -> Y; Z -> 1 end. tail(List) -> case List of [X | Xs] -> Xs; Z -> List end. "#, ); assert_erl!( "fn tail(list) { case list { [x, ..] -> x } }", r#"-module(the_app). -compile(no_auto_import). tail(List) -> case List of [X | _] -> X end. "#, ); assert_erl!( r#"fn x() { let x = 1 let x = x + 1 x }"#, r#"-module(the_app). -compile(no_auto_import). x() -> X = 1, X@1 = X + 1, X@1. "#, ); assert_erl!( r#"pub external fn receive() -> Int = "try" "and" pub fn catch(x) { receive() }"#, r#"-module(the_app). -compile(no_auto_import). -export(['receive'/0, 'catch'/1]). 'receive'() -> 'try':'and'(). 'catch'(X) -> 'try':'and'(). "#, ); // Translation of Float-specific BinOp into variable-type Erlang term comparison. assert_erl!( r#"fn x() { 1. <. 2.3 }"#, r#"-module(the_app). -compile(no_auto_import). x() -> 1.0 < 2.3. "#, ); // Custom type creation assert_erl!( r#"type Pair(x, y) { Pair(x: x, y: y) } fn x() { Pair(1, 2) Pair(3., 4.) }"#, r#"-module(the_app). -compile(no_auto_import). x() -> {pair, 1, 2}, {pair, 3.0, 4.0}. "#, ); assert_erl!( r#"type Null { Null } fn x() { Null }"#, r#"-module(the_app). -compile(no_auto_import). x() -> null. "#, ); assert_erl!( r#"type Point { Point(x: Int, y: Int) } fn y() { fn() { Point }()(4, 6) }"#, r#"-module(the_app). -compile(no_auto_import). y() -> ((fun() -> fun(A, B) -> {point, A, B} end end)())(4, 6). "#, ); assert_erl!( r#"type Point { Point(x: Int, y: Int) } fn x() { Point(x: 4, y: 6) Point(y: 1, x: 9) }"#, r#"-module(the_app). -compile(no_auto_import). x() -> {point, 4, 6}, {point, 9, 1}. "#, ); assert_erl!( r#"type Point { Point(x: Int, y: Int) } fn x(y) { let Point(a, b) = y a }"#, r#"-module(the_app). -compile(no_auto_import). x(Y) -> {point, A, B} = Y, A. "#, ); // Private external function calls are simply inlined assert_erl!( r#"external fn go(x: Int, y: Int) -> Int = "m" "f" fn x() { go(x: 1, y: 2) go(y: 3, x: 4) }"#, r#"-module(the_app). -compile(no_auto_import). x() -> m:f(1, 2), m:f(4, 3). "#, ); // Public external function calls are inlined but the wrapper function is // also printed in the erlang output and exported assert_erl!( r#"pub external fn go(x: Int, y: Int) -> Int = "m" "f" fn x() { go(x: 1, y: 2) go(y: 3, x: 4) }"#, r#"-module(the_app). -compile(no_auto_import). -export([go/2]). go(A, B) -> m:f(A, B). x() -> m:f(1, 2), m:f(4, 3). "#, ); // Private external function references are inlined assert_erl!( r#"external fn go(x: Int, y: Int) -> Int = "m" "f" fn x() { go }"#, r#"-module(the_app). -compile(no_auto_import). x() -> fun m:f/2. "#, ); assert_erl!( r#"fn go(x xx, y yy) { xx } fn x() { go(x: 1, y: 2) go(y: 3, x: 4) }"#, r#"-module(the_app). -compile(no_auto_import). go(Xx, Yy) -> Xx. x() -> go(1, 2), go(4, 3). "#, ); // https://github.com/gleam-lang/gleam/issues/289 assert_erl!( r#" type User { User(id: Int, name: String, age: Int) } fn create_user(user_id) { User(age: 22, id: user_id, name: "") } "#, r#"-module(the_app). -compile(no_auto_import). create_user(UserId) -> {user, UserId, <<""/utf8>>, 22}. "#, ); assert_erl!( r#"fn run() { case 1, 2 { a, b -> a } }"#, r#"-module(the_app). -compile(no_auto_import). run() -> case {1, 2} of {A, B} -> A end. "#, ); assert_erl!( r#"type X { X(x: Int, y: Float) } fn x() { X(x: 1, y: 2.) X(y: 3., x: 4) }"#, r#"-module(the_app). -compile(no_auto_import). x() -> {x, 1, 2.0}, {x, 4, 3.0}. "#, ); assert_erl!( r#" fn go(a) { let a = a + 1 a } "#, r#"-module(the_app). -compile(no_auto_import). go(A) -> A@1 = A + 1, A@1. "#, ); assert_erl!( r#" fn go(a) { let a = 1 a } "#, r#"-module(the_app). -compile(no_auto_import). go(A) -> A@1 = 1, A@1. "#, ); // https://github.com/gleam-lang/gleam/issues/358 assert_erl!( r#" pub fn factory(f, i) { f(i) } pub type Box { Box(i: Int) } pub fn main() { factory(Box, 0) } "#, r#"-module(the_app). -compile(no_auto_import). -export([factory/2, main/0]). factory(F, I) -> F(I). main() -> factory(fun(A) -> {box, A} end, 0). "#, ); // https://github.com/gleam-lang/gleam/issues/384 assert_erl!( r#" pub fn main(args) { case args { _ -> { let a = 1 a } } let a = 2 a } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/1]). main(Args) -> case Args of _ -> A = 1, A end, A@1 = 2, A@1. "#, ); } #[test] fn bit_string_discard() { // https://github.com/gleam-lang/gleam/issues/704 assert_erl!( r#" pub fn bitstring_discard(x: String) -> Bool { case x { <<_:utf8, rest:binary>> -> True _ -> False } } "#, r#"-module(the_app). -compile(no_auto_import). -export([bitstring_discard/1]). bitstring_discard(X) -> case X of <<_/utf8, Rest/binary>> -> true; _ -> false end. "#, ); assert_erl!( r#" pub fn bitstring_discard(x: String) -> Bool { case x { <<_discardme:utf8, rest:binary>> -> True _ -> False } } "#, r#"-module(the_app). -compile(no_auto_import). -export([bitstring_discard/1]). bitstring_discard(X) -> case X of <<_/utf8, Rest/binary>> -> true; _ -> false end. "#, ); // Clause guards assert_erl!( r#" pub fn main(args) { case args { x if x == args -> 1 _ -> 0 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/1]). main(Args) -> case Args of X when X =:= Args -> 1; _ -> 0 end. "#, ); assert_erl!( r#" pub fn main(args) { case args { x if {x != x} == {args == args} -> 1 _ -> 0 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/1]). main(Args) -> case Args of X when (X =/= X) =:= (Args =:= Args) -> 1; _ -> 0 end. "#, ); assert_erl!( r#" pub fn main(args) { case args { x if x && x || x == x && x -> 1 _ -> 0 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/1]). main(Args) -> case Args of X when (X andalso X) orelse ((X =:= X) andalso X) -> 1; _ -> 0 end. "#, ); assert_erl!( r#" pub fn main() { case 1, 0 { x, y if x > y -> 1 _, _ -> 0 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/0]). main() -> case {1, 0} of {X, Y} when X > Y -> 1; {_, _} -> 0 end. "#, ); assert_erl!( r#" pub fn main() { case 1, 0 { x, y if x >= y -> 1 _, _ -> 0 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/0]). main() -> case {1, 0} of {X, Y} when X >= Y -> 1; {_, _} -> 0 end. "#, ); assert_erl!( r#" pub fn main() { case 1, 0 { x, y if x < y -> 1 _, _ -> 0 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/0]). main() -> case {1, 0} of {X, Y} when X < Y -> 1; {_, _} -> 0 end. "#, ); assert_erl!( r#" pub fn main() { case 1, 0 { x, y if x <= y -> 1 _, _ -> 0 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/0]). main() -> case {1, 0} of {X, Y} when X =< Y -> 1; {_, _} -> 0 end. "#, ); assert_erl!( r#" pub fn main() { case 1.0, 0.1 { x, y if x >. y -> 1 _, _ -> 0 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/0]). main() -> case {1.0, 0.1} of {X, Y} when X > Y -> 1; {_, _} -> 0 end. "#, ); assert_erl!( r#" pub fn main() { case 1.0, 0.1 { x, y if x >=. y -> 1 _, _ -> 0 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/0]). main() -> case {1.0, 0.1} of {X, Y} when X >= Y -> 1; {_, _} -> 0 end. "#, ); assert_erl!( r#" pub fn main() { let x = 0.123 case x { 99.9854 -> 1 _ -> 0 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/0]). main() -> X = 0.123, case X of 99.9854 -> 1; _ -> 0 end. "#, ); assert_erl!( r#" pub fn main() { let x = 0.123 case x { _ if x == 3.14 -> 1 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/0]). main() -> X = 0.123, case X of _ when X =:= 3.14 -> 1 end. "#, ); assert_erl!( r#" pub fn main() { let x = 0.123 case x { _ if 0.123 <. x -> 1 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/0]). main() -> X = 0.123, case X of _ when 0.123 < X -> 1 end. "#, ); assert_erl!( r#" pub fn main(x) { case x { _ if x == [1, 2, 3] -> 1 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/1]). main(X) -> case X of _ when X =:= [1, 2, 3] -> 1 end. "#, ); assert_erl!( r#" pub fn main() { let x = 0 case x { 0 -> 1 _ -> 0 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/0]). main() -> X = 0, case X of 0 -> 1; _ -> 0 end. "#, ); // Tuple literals in guards assert_erl!( r#" pub fn main() { let x = tuple(1, 2, 3) case x { _ if x == tuple(1, 2, 3) -> 1 _ -> 0 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/0]). main() -> X = {1, 2, 3}, case X of _ when X =:= {1, 2, 3} -> 1; _ -> 0 end. "#, ); assert_erl!( r#" pub fn main() { let x = tuple(1, 2, 3) case x { _ if x == tuple(1, 2, 3) -> 1 _ if x == tuple(2, 3, 4) -> 2 _ -> 0 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/0]). main() -> X = {1, 2, 3}, case X of _ when X =:= {1, 2, 3} -> 1; _ when X =:= {2, 3, 4} -> 2; _ -> 0 end. "#, ); // Int literals in guards assert_erl!( r#" pub fn main() { let x = 0 case x { _ if x == 0 -> 1 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/0]). main() -> X = 0, case X of _ when X =:= 0 -> 1 end. "#, ); assert_erl!( r#" pub fn main() { let x = 0 case x { _ if 0 < x -> 1 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/0]). main() -> X = 0, case X of _ when 0 < X -> 1 end. "#, ); // String literals in guards assert_erl!( r#" pub fn main() { case "test" { x if x == "test" -> 1 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/0]). main() -> case <<"test"/utf8>> of X when X =:= <<"test"/utf8>> -> 1 end. "#, ); // Record literals in guards assert_erl!( r#" type Test { Test(x: Int, y: Float) } pub fn main() { let x = Test(1, 3.0) case x { _ if x == Test(1, 1.0) -> 1 _ if x == Test(y: 2.0, x: 2) -> 2 _ if x != Test(2, 3.0) -> 2 _ -> 0 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/0]). main() -> X = {test, 1, 3.0}, case X of _ when X =:= {test, 1, 1.0} -> 1; _ when X =:= {test, 2, 2.0} -> 2; _ when X =/= {test, 2, 3.0} -> 2; _ -> 0 end. "#, ); // Float vars in guards assert_erl!( r#" pub fn main() { case 0.1, 1.0 { x, y if x <. y -> 1 _, _ -> 0 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/0]). main() -> case {0.1, 1.0} of {X, Y} when X < Y -> 1; {_, _} -> 0 end. "#, ); assert_erl!( r#" pub fn main() { case 0.1, 1.0 { x, y if x <=. y -> 1 _, _ -> 0 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/0]). main() -> case {0.1, 1.0} of {X, Y} when X =< Y -> 1; {_, _} -> 0 end. "#, ); assert_erl!( r#" pub fn main(args) { case args { [x] | [x, _] if x -> 1 _ -> 0 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/1]). main(Args) -> case Args of [X] when X -> 1; [X, _] when X -> 1; _ -> 0 end. "#, ); assert_erl!( r#" pub fn main() { todo } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/0]). main() -> erlang:error({gleam_error, todo}). "#, ); assert_erl!( r#" pub fn main() { todo("testing") } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/0]). main() -> erlang:error({gleam_error, todo, "testing"}). "#, ); // We can use record accessors for types with only one constructor assert_erl!( r#" pub type Person { Person(name: String, age: Int) } pub fn get_age(person: Person) { person.age } pub fn get_name(person: Person) { person.name } "#, r#"-module(the_app). -compile(no_auto_import). -export([get_age/1, get_name/1]). get_age(Person) -> erlang:element(3, Person). get_name(Person) -> erlang:element(2, Person). "#, ); // Test binding to a record field with the spread operator assert_erl!( r#" type Triple { Triple(a: Int, b: Int, c: Int) } fn main() { let triple = Triple(1,2,3) let Triple(the_a, ..) = triple the_a } "#, r#"-module(the_app). -compile(no_auto_import). main() -> Triple = {triple, 1, 2, 3}, {triple, TheA, _, _} = Triple, TheA. "#, ); // Test binding to a record field with the spread operator and a labelled argument assert_erl!( r#" type Triple { Triple(a: Int, b: Int, c: Int) } fn main() { let triple = Triple(1,2,3) let Triple(b: the_b, ..) = triple the_b } "#, r#"-module(the_app). -compile(no_auto_import). main() -> Triple = {triple, 1, 2, 3}, {triple, _, TheB, _} = Triple, TheB. "#, ); // Test binding to a record field with the spread operator with both a labelled argument and a positional argument assert_erl!( r#" type Triple { Triple(a: Int, b: Int, c: Int) } fn main() { let triple = Triple(1,2,3) let Triple(the_a, c: the_c, ..) = triple the_c } "#, r#"-module(the_app). -compile(no_auto_import). main() -> Triple = {triple, 1, 2, 3}, {triple, TheA, _, TheC} = Triple, TheC. "#, ); // Test binding to a record field with the spread operator in a match assert_erl!( r#" type Triple { Triple(a: Int, b: Int, c: Int) } fn main() { let triple = Triple(1,2,3) case triple { Triple(b: the_b, ..) -> the_b } } "#, r#"-module(the_app). -compile(no_auto_import). main() -> Triple = {triple, 1, 2, 3}, case Triple of {triple, _, TheB, _} -> TheB end. "#, ); // a |> b assert_erl!( r#" pub fn apply(f: fn(a) -> b, a: a) { a |> f } "#, r#"-module(the_app). -compile(no_auto_import). -export([apply/2]). apply(F, A) -> F(A). "#, ); // a |> b(c) assert_erl!( r#" pub fn apply(f: fn(a, Int) -> b, a: a) { a |> f(1) } "#, r#"-module(the_app). -compile(no_auto_import). -export([apply/2]). apply(F, A) -> F(A, 1). "#, ); // Parentheses are added for binop subexpressions assert_erl!( r#" fn main() { let a = 2 * {3 + 1} / 2 let b = 5 + 3 / 3 * 2 - 6 * 4 b } "#, r#"-module(the_app). -compile(no_auto_import). main() -> A = (2 * (3 + 1)) div 2, B = (5 + ((3 div 3) * 2)) - (6 * 4), B. "#, ); // try assert_erl!( r#" fn main() { try a = Ok(1) try b = Ok(2) Ok(a + b) } "#, r#"-module(the_app). -compile(no_auto_import). main() -> case {ok, 1} of {error, GleamTryError} -> {error, GleamTryError}; {ok, A} -> case {ok, 2} of {error, GleamTryError@1} -> {error, GleamTryError@1}; {ok, B} -> {ok, A + B} end end. "#, ); // Parentheses are added when calling functions returned by record access assert_erl!( r#" type FnBox { FnBox(f: fn(Int) -> Int) } fn main() { let b = FnBox(f: fn(x) { x }) b.f(5) } "#, r#"-module(the_app). -compile(no_auto_import). main() -> B = {fn_box, fun(X) -> X end}, (erlang:element(2, B))(5). "#, ); // Parentheses are added when calling functions returned by tuple access assert_erl!( r#" fn main() { let t = tuple(fn(x) { x }) t.0(5) } "#, r#"-module(the_app). -compile(no_auto_import). main() -> T = {fun(X) -> X end}, (erlang:element(1, T))(5). "#, ); // BitStrings assert_erl!( r#"fn main() { let a = 1 let simple = <<1, a>> let complex = <<4:int-unsigned-big, 5.0:little-float, 6:native-int-signed>> let <<7:2, 8:size(3), b:binary-size(4)>> = <<1>> let <<c:unit(1), d:binary-size(2)-unit(2)>> = <<1>> simple } "#, r#"-module(the_app). -compile(no_auto_import). main() -> A = 1, Simple = <<1, A>>, Complex = <<4/integer-unsigned-big, 5.0/little-float, 6/native-integer-signed>>, <<7:2, 8:3, B:4/binary>> = <<1>>, <<C/unit:1, D:2/binary-unit:2>> = <<1>>, Simple. "#, ); assert_erl!( r#"fn x() { 2 } fn main() { let a = 1 let b = <<a:unit(2)-size(a * 2), a:size(3 + x())-unit(1)>> b } "#, r#"-module(the_app). -compile(no_auto_import). x() -> 2. main() -> A = 1, B = <<A:(A * 2)/unit:2, A:(3 + x())/unit:1>>, B. "#, ); assert_erl!( r#"fn main() { let a = 1 let <<b, 1>> = <<1, a>> b } "#, r#"-module(the_app). -compile(no_auto_import). main() -> A = 1, <<B, 1>> = <<1, A>>, B. "#, ); assert_erl!( r#"fn main() { let a = <<"test":utf8>> let <<b:utf8_codepoint, "st":utf8>> = a b } "#, r#"-module(the_app). -compile(no_auto_import). main() -> A = <<"test"/utf8>>, <<B/utf8, "st"/utf8>> = A, B. "#, ); assert_erl!( r#"fn x() { 1 } fn main() { let a = <<x():int>> a } "#, r#"-module(the_app). -compile(no_auto_import). x() -> 1. main() -> A = <<(x())/integer>>, A. "#, ); assert_erl!( r#" pub const string_value = "constant value" pub fn main(arg) { case arg { _ if arg == string_value -> 1 _ -> 0 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/1]). main(Arg) -> case Arg of _ when Arg =:= <<"constant value"/utf8>> -> 1; _ -> 0 end. "#, ); assert_erl!( r#" pub const bits = <<1, "ok":utf8, 3, 4:50>> pub fn main(arg) { case arg { _ if arg == bits -> 1 _ -> 0 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/1]). main(Arg) -> case Arg of _ when Arg =:= <<1, "ok"/utf8, 3, 4:50>> -> 1; _ -> 0 end. "#, ); assert_erl!( r#" pub const constant = tuple(1, 2.0) pub fn main(arg) { case arg { _ if arg == constant -> 1 _ -> 0 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/1]). main(Arg) -> case Arg of _ when Arg =:= {1, 2.0} -> 1; _ -> 0 end. "#, ); assert_erl!( r#" pub const float_value = 3.14 pub fn main(arg) { case arg { _ if arg >. float_value -> 1 _ -> 0 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/1]). main(Arg) -> case Arg of _ when Arg > 3.14 -> 1; _ -> 0 end. "#, ); assert_erl!( r#" pub const string_value = "constant value" pub const float_value = 3.14 pub const int_value = 42 pub const tuple_value = tuple(1, 2.0, "3") pub const list_value = [1, 2, 3] pub fn main(arg) { let _ = list_value case arg { tuple(w, x, y, z) if w == tuple_value && x == string_value && y >. float_value && z == int_value -> 1 _ -> 0 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/1]). main(Arg) -> _ = [1, 2, 3], case Arg of {W, X, Y, Z} when (((W =:= {1, 2.0, <<"3"/utf8>>}) andalso (X =:= <<"constant value"/utf8>>)) andalso (Y > 3.14)) andalso (Z =:= 42) -> 1; _ -> 0 end. "#, ); assert_erl!( r#" pub const list = [1, 2, 3] pub fn main(arg) { case arg { _ if arg == list -> 1 _ -> 0 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/1]). main(Arg) -> case Arg of _ when Arg =:= [1, 2, 3] -> 1; _ -> 0 end. "#, ); // reassigning name in alternative patterns assert_erl!( r#" pub fn test() { let duplicate_name = 1 case 1 { 1 | 2 -> { let duplicate_name = duplicate_name + 1 duplicate_name } } }"#, r#"-module(the_app). -compile(no_auto_import). -export([test/0]). test() -> DuplicateName = 1, case 1 of 1 -> DuplicateName@1 = DuplicateName + 1, DuplicateName@1; 2 -> DuplicateName@1 = DuplicateName + 1, DuplicateName@1 end. "#, ); // Alternative patterns with a clause containing vars assert_erl!( r#" pub fn test() { case Ok(1) { Ok(duplicate_name) | Error(duplicate_name) -> duplicate_name } }"#, r#"-module(the_app). -compile(no_auto_import). -export([test/0]). test() -> case {ok, 1} of {ok, DuplicateName} -> DuplicateName; {error, DuplicateName} -> DuplicateName end. "#, ); // Alternative patterns with a guard clause containing vars assert_erl!( r#" pub fn test() { let duplicate_name = 1 case 1 { 1 | 2 if duplicate_name == 1 -> duplicate_name } }"#, r#"-module(the_app). -compile(no_auto_import). -export([test/0]). test() -> DuplicateName = 1, case 1 of 1 when DuplicateName =:= 1 -> DuplicateName; 2 when DuplicateName =:= 1 -> DuplicateName end. "#, ); assert_erl!( r#" pub const constant = Ok(1) pub fn main(arg) { let _ = constant case arg { _ if arg == constant -> 1 _ -> 0 } } "#, r#"-module(the_app). -compile(no_auto_import). -export([main/1]). main(Arg) -> _ = {ok, 1}, case Arg of _ when Arg =:= {ok, 1} -> 1; _ -> 0 end. "#, ); // Record updates assert_erl!( r#" pub type Person { Person(name: String, age: Int) } fn main() { let p = Person("Quinn", 27) let new_p = Person(..p, age: 28) new_p } "#, r#"-module(the_app). -compile(no_auto_import). main() -> P = {person, <<"Quinn"/utf8>>, 27}, NewP = erlang:setelement(3, P, 28), NewP. "#, ); // Record updates with field accesses assert_erl!( r#" pub type Person { Person(name: String, age: Int) } fn main() { let p = Person("Quinn", 27) let new_p = Person(..p, age: p.age + 1) new_p } "#, r#"-module(the_app). -compile(no_auto_import). main() -> P = {person, <<"Quinn"/utf8>>, 27}, NewP = erlang:setelement(3, P, erlang:element(3, P) + 1), NewP. "#, ); // Record updates with multiple fields assert_erl!( r#" pub type Person { Person(name: String, age: Int) } fn main() { let p = Person("Quinn", 27) let new_p = Person(..p, age: 28, name: "Riley") new_p } "#, r#"-module(the_app). -compile(no_auto_import). main() -> P = {person, <<"Quinn"/utf8>>, 27}, NewP = erlang:setelement(2, erlang:setelement(3, P, 28), <<"Riley"/utf8>>), NewP. "#, ); // Numbers with underscores assert_erl!( r#" fn main() { 100_000 100_000.00101 } "#, r#"-module(the_app). -compile(no_auto_import). main() -> 100000, 100000.00101. "#, ); assert_erl!( r#" const i = 100_000 const f = 100_000.00101 fn main() { i f } "#, r#"-module(the_app). -compile(no_auto_import). main() -> 100000, 100000.00101. "#, ); assert_erl!( r#" fn main() { assert 100_000 = 1 assert 100_000.00101 = 1. 1 } "#, r#"-module(the_app). -compile(no_auto_import). main() -> 100000 = 1, 100000.00101 = 1.0, 1. "#, ); } // https://github.com/gleam-lang/gleam/issues/777 #[test] fn block_assignment() { assert_erl!( r#" fn main() { let x = { 1 2 } x } "#, r#"-module(the_app). -compile(no_auto_import). main() -> X = begin 1, 2 end, X. "#, ); } #[test] fn recursive_type() { // TODO: we should be able to generalise `id` and we should be // able to handle recursive types. Either of these type features // would make this module type check OK. assert_erl!( r#" fn id(x) { x } fn main() { id(id) } "#, r#"-module(the_app). -compile(no_auto_import). id(X) -> X. main() -> id(fun id/1). "#, ); } #[test] fn tuple_access_in_guard() { assert_erl!( r#" fn main() { let key = 10 let x = [tuple(10, 2), tuple(1, 2)] case x { [first, ..rest] if first.0 == key -> "ok" _ -> "ko" } } "#, r#"-module(the_app). -compile(no_auto_import). main() -> Key = 10, X = [{10, 2}, {1, 2}], case X of [First | Rest] when erlang:element(1, First) =:= Key -> <<"ok"/utf8>>; _ -> <<"ko"/utf8>> end. "#, ); } #[test] fn record_constants() { assert_erl!( "pub type Test { A } const test = A pub fn a() { A }", "-module(the_app). -compile(no_auto_import). -export([a/0]). a() -> a. " ); }
15.419765
143
0.452345
f7239ebcca44e1abbbfcc98478e1d03967ec90a8
17,881
use super::nodes::{ IPciNode, PciNodeType, PciRoot, PcieBarInfo, PcieIrqMode, PcieIrqModeCaps, SharedLegacyIrqHandler, }; use super::{ config::PciConfig, constants::*, pci_init_args::PciIrqSwizzleLut, pio::pci_bdf_raw_addr, MappedEcamRegion, PciAddrSpace, PciEcamRegion, }; use crate::dev::Interrupt; use crate::object::*; use crate::vm::{kernel_allocate_physical, CachePolicy, MMUFlags, PhysAddr, VirtAddr}; use crate::ZxResult; use alloc::sync::{Arc, Weak}; use alloc::{collections::BTreeMap, vec::Vec}; use core::cmp::min; use core::marker::{Send, Sync}; use lazy_static::*; use region_alloc::RegionAllocator; use spin::Mutex; /// PCIE Bus Driver. pub struct PCIeBusDriver { pub(crate) mmio_lo: Arc<Mutex<RegionAllocator>>, pub(crate) mmio_hi: Arc<Mutex<RegionAllocator>>, pub(crate) pio_region: Arc<Mutex<RegionAllocator>>, address_provider: Option<Arc<dyn PCIeAddressProvider>>, roots: BTreeMap<usize, Arc<PciRoot>>, state: PCIeBusDriverState, bus_topology: Mutex<()>, configs: Mutex<Vec<Arc<PciConfig>>>, legacy_irq_list: Mutex<Vec<Arc<SharedLegacyIrqHandler>>>, } #[derive(PartialEq, Debug)] enum PCIeBusDriverState { NotStarted, StartingScanning, StartingRunningQuirks, StartingResourceAllocation, Operational, } lazy_static! { static ref _INSTANCE: Mutex<PCIeBusDriver> = Mutex::new(PCIeBusDriver::new()); } impl PCIeBusDriver { /// Add bus region. pub fn add_bus_region(base: u64, size: u64, aspace: PciAddrSpace) -> ZxResult { _INSTANCE.lock().add_bus_region_inner(base, size, aspace) } /// Subtract bus region. pub fn sub_bus_region(base: u64, size: u64, aspace: PciAddrSpace) -> ZxResult { _INSTANCE.lock().sub_bus_region_inner(base, size, aspace) } /// A PcieAddressProvider translates a BDF address to an address that the /// system can use to access ECAMs. pub fn set_address_translation_provider(provider: Arc<dyn PCIeAddressProvider>) -> ZxResult { _INSTANCE .lock() .set_address_translation_provider_inner(provider) } /// Add a root bus to the driver and attempt to scan it for devices. pub fn add_root(bus_id: usize, lut: PciIrqSwizzleLut) -> ZxResult { let mut bus = _INSTANCE.lock(); let root = PciRoot::new(bus_id, lut, &bus); bus.add_root_inner(root) } /// Start the bus driver. pub fn start_bus_driver() -> ZxResult { _INSTANCE.lock().start_bus_driver_inner() } /// Get the "Nth" device. pub fn get_nth_device(n: usize) -> ZxResult<(PcieDeviceInfo, Arc<PcieDeviceKObject>)> { let device_node = _INSTANCE .lock() .get_nth_device_inner(n) .ok_or(ZxError::OUT_OF_RANGE)?; let device = device_node.device(); let info = PcieDeviceInfo { vendor_id: device.vendor_id, device_id: device.device_id, base_class: device.class_id, sub_class: device.subclass_id, program_interface: device.prog_if, revision_id: device.rev_id, bus_id: device.bus_id as u8, dev_id: device.dev_id as u8, func_id: device.func_id as u8, _padding1: 0, }; let object = PcieDeviceKObject::new(device_node.clone()); Ok((info, object)) } } impl PCIeBusDriver { fn new() -> Self { PCIeBusDriver { mmio_lo: Default::default(), mmio_hi: Default::default(), pio_region: Default::default(), address_provider: None, roots: BTreeMap::new(), state: PCIeBusDriverState::NotStarted, bus_topology: Mutex::default(), legacy_irq_list: Mutex::new(Vec::new()), configs: Mutex::new(Vec::new()), } } fn add_bus_region_inner(&mut self, base: u64, size: u64, aspace: PciAddrSpace) -> ZxResult { self.add_or_sub_bus_region(base, size, aspace, true) } fn sub_bus_region_inner(&mut self, base: u64, size: u64, aspace: PciAddrSpace) -> ZxResult { self.add_or_sub_bus_region(base, size, aspace, false) } fn set_address_translation_provider_inner( &mut self, provider: Arc<dyn PCIeAddressProvider>, ) -> ZxResult { if self.is_started(false) { return Err(ZxError::BAD_STATE); } self.address_provider = Some(provider); Ok(()) } fn add_root_inner(&mut self, root: Arc<PciRoot>) -> ZxResult { if self.is_started(false) { return Err(ZxError::BAD_STATE); } if self.roots.contains_key(&root.managed_bus_id()) { return Err(ZxError::ALREADY_EXISTS); } self.bus_topology.lock(); self.roots.insert(root.managed_bus_id(), root); Ok(()) } fn add_or_sub_bus_region( &mut self, base: u64, size: u64, aspace: PciAddrSpace, is_add: bool, ) -> ZxResult { if self.is_started(true) { return Err(ZxError::BAD_STATE); } if size == 0 { return Err(ZxError::INVALID_ARGS); } if aspace == PciAddrSpace::MMIO { let u32_max: u64 = u32::MAX as u64; let end = base + size; if base <= u32_max { let lo_size = min(u32_max + 1 - base, size); self.mmio_lo .lock() .add_or_subtract(base as usize, lo_size as usize, is_add); } if end > u32_max + 1 { let hi_size = min(end - (u32_max + 1), size); self.mmio_hi .lock() .add_or_subtract((end - hi_size) as usize, end as usize, is_add); } } else if aspace == PciAddrSpace::PIO { let end = base + size - 1; if ((base | end) & !PCIE_PIO_ADDR_SPACE_MASK) != 0 { return Err(ZxError::INVALID_ARGS); } self.pio_region .lock() .add_or_subtract(base as usize, size as usize, is_add); } Ok(()) } fn start_bus_driver_inner(&mut self) -> ZxResult { self.transfer_state( PCIeBusDriverState::NotStarted, PCIeBusDriverState::StartingScanning, )?; self.foreach_root( |root, _c| { root.base_upstream.scan_downstream(self); true }, (), ); self.transfer_state( PCIeBusDriverState::StartingScanning, PCIeBusDriverState::StartingRunningQuirks, )?; warn!("pci: skip quirks"); self.transfer_state( PCIeBusDriverState::StartingRunningQuirks, PCIeBusDriverState::StartingResourceAllocation, )?; self.foreach_root( |root, _| { root.base_upstream.allocate_downstream_bars(); true }, (), ); self.transfer_state( PCIeBusDriverState::StartingResourceAllocation, PCIeBusDriverState::Operational, )?; Ok(()) } fn foreach_root<T, C>(&self, callback: T, context: C) -> C where T: Fn(Arc<PciRoot>, &mut C) -> bool, { let mut bus_top_guard = self.bus_topology.lock(); let mut context = context; for (_key, root) in self.roots.iter() { drop(bus_top_guard); if !callback(root.clone(), &mut context) { return context; } bus_top_guard = self.bus_topology.lock(); } drop(bus_top_guard); context } #[allow(dead_code)] fn foreach_device<T, C>(&self, callback: &T, context: C) -> C where T: Fn(Arc<dyn IPciNode>, &mut C, usize) -> bool, { self.foreach_root( |root, ctx| { self.foreach_downstream(root, 0 /*level*/, callback, &mut (ctx.0)) }, (context, &self), ) .0 } #[allow(dead_code)] fn foreach_downstream<T, C>( &self, upstream: Arc<dyn IPciNode>, level: usize, callback: &T, context: &mut C, ) -> bool where T: Fn(Arc<dyn IPciNode>, &mut C, usize) -> bool, { if level > 256 || upstream.as_upstream().is_none() { return true; } let upstream = upstream.as_upstream().unwrap(); for i in 0..PCI_MAX_FUNCTIONS_PER_BUS { let device = upstream.get_downstream(i); if let Some(dev) = device { if !callback(dev.clone(), context, level) { return false; } if let PciNodeType::Bridge = dev.node_type() { if !self.foreach_downstream(dev, level + 1, callback, context) { return false; } } } } true } fn transfer_state( &mut self, expected: PCIeBusDriverState, target: PCIeBusDriverState, ) -> ZxResult { trace!("transfer state from {:#x?} to {:#x?}", expected, target); if self.state != expected { return Err(ZxError::BAD_STATE); } self.state = target; Ok(()) } fn is_started(&self, _allow_quirks_phase: bool) -> bool { !matches!(self.state, PCIeBusDriverState::NotStarted) } /// Get a device's config. pub fn get_config( &self, bus_id: usize, dev_id: usize, func_id: usize, ) -> Option<(Arc<PciConfig>, PhysAddr)> { self.address_provider.as_ref()?; let (paddr, vaddr) = self .address_provider .clone() .unwrap() .translate(bus_id as u8, dev_id as u8, func_id as u8) .ok()?; let mut config = self.configs.lock(); let cfg = config.iter().find(|x| x.base == vaddr); if let Some(x) = cfg { return Some((x.clone(), paddr)); } let cfg = self .address_provider .clone() .unwrap() .create_config(vaddr as u64); config.push(cfg.clone()); Some((cfg, paddr)) } /// Link a device to an upstream node. pub fn link_device_to_upstream(&self, down: Arc<dyn IPciNode>, up: Weak<dyn IPciNode>) { let _guard = self.bus_topology.lock(); let dev = down.device(); dev.set_upstream(up.clone()); let up = up.upgrade().unwrap().as_upstream().unwrap(); up.set_downstream( dev.dev_id() * PCI_MAX_FUNCTIONS_PER_DEVICE + dev.func_id(), Some(down.clone()), ); } /// Find the legacy IRQ handler. pub fn find_legacy_irq_handler(&self, irq_id: usize) -> ZxResult<Arc<SharedLegacyIrqHandler>> { let mut list = self.legacy_irq_list.lock(); for i in list.iter() { if irq_id == i.irq_id { return Ok(i.clone()); } } SharedLegacyIrqHandler::create(irq_id) .map(|x| { list.push(x.clone()); x }) .ok_or(ZxError::NO_RESOURCES) } fn get_nth_device_inner(&self, n: usize) -> Option<Arc<dyn IPciNode>> { self.foreach_device( &|device, context: &mut (usize, Option<Arc<_>>), _level| { if context.0 == 0 { context.1 = Some(device); false } else { context.0 -= 1; true } }, (n, None), ) .1 } } /// PcieAddressProvider is an interface that implements translation from a BDF to /// a PCI ECAM address. pub trait PCIeAddressProvider: Send + Sync { /// Creates a config that corresponds to the type of the PcieAddressProvider. fn create_config(&self, addr: u64) -> Arc<PciConfig>; /// Accepts a PCI BDF triple and returns ZX_OK if it is able to translate it /// into an ECAM address. fn translate(&self, bus_id: u8, dev_id: u8, func_id: u8) -> ZxResult<(PhysAddr, VirtAddr)>; } /// Systems that have memory mapped Config Spaces. #[derive(Default)] pub struct MmioPcieAddressProvider { ecam_regions: Mutex<BTreeMap<u8, MappedEcamRegion>>, } impl MmioPcieAddressProvider { /// Add a ECAM region. pub fn add_ecam(&self, ecam: PciEcamRegion) -> ZxResult { if ecam.bus_start > ecam.bus_end { return Err(ZxError::INVALID_ARGS); } let bus_count = (ecam.bus_end - ecam.bus_start) as usize + 1; if ecam.size != bus_count * PCIE_ECAM_BYTES_PER_BUS { return Err(ZxError::INVALID_ARGS); } let mut inner = self.ecam_regions.lock(); if let Some((_key, value)) = inner.range(..=ecam.bus_start).last() { // if intersect... if ecam.bus_end <= value.ecam.bus_start || value.ecam.bus_end <= ecam.bus_start || bus_count == 0 || value.ecam.bus_start == value.ecam.bus_end + 1 { return Err(ZxError::BAD_STATE); } } let vaddr = kernel_allocate_physical( ecam.size, ecam.phys_base as PhysAddr, MMUFlags::READ | MMUFlags::WRITE, CachePolicy::UncachedDevice, )?; inner.insert( ecam.bus_start, MappedEcamRegion { ecam, vaddr: vaddr as u64, }, ); Ok(()) } } impl PCIeAddressProvider for MmioPcieAddressProvider { fn create_config(&self, addr: u64) -> Arc<PciConfig> { Arc::new(PciConfig { addr_space: PciAddrSpace::MMIO, base: addr as usize, }) } fn translate( &self, bus_id: u8, device_id: u8, function_id: u8, ) -> ZxResult<(PhysAddr, VirtAddr)> { let regions = self.ecam_regions.lock(); let target = regions.range(..=bus_id).last().ok_or(ZxError::NOT_FOUND)?; if bus_id < target.1.ecam.bus_start || bus_id > target.1.ecam.bus_end { return Err(ZxError::NOT_FOUND); } let bus_id = bus_id - target.1.ecam.bus_start; let offset = (bus_id as usize) << 20 | (device_id as usize) << 15 | (function_id as usize) << 12; let phys = target.1.ecam.phys_base as usize + offset; let vaddr = target.1.vaddr as usize + offset; Ok((phys, vaddr)) } } /// Systems that have PIO mapped Config Spaces. #[derive(Default)] pub struct PioPcieAddressProvider; impl PCIeAddressProvider for PioPcieAddressProvider { fn create_config(&self, addr: u64) -> Arc<PciConfig> { Arc::new(PciConfig { addr_space: PciAddrSpace::PIO, base: addr as usize, }) } fn translate( &self, bus_id: u8, device_id: u8, function_id: u8, ) -> ZxResult<(PhysAddr, VirtAddr)> { let virt = pci_bdf_raw_addr(bus_id, device_id, function_id, 0); Ok((0, virt as VirtAddr)) } } /// Info returned to dev manager for PCIe devices when probing. #[allow(missing_docs)] #[repr(C)] #[derive(Clone, Debug)] pub struct PcieDeviceInfo { pub vendor_id: u16, pub device_id: u16, pub base_class: u8, pub sub_class: u8, pub program_interface: u8, pub revision_id: u8, pub bus_id: u8, pub dev_id: u8, pub func_id: u8, _padding1: u8, } /// PCIE Device Entity. pub struct PcieDeviceKObject { base: KObjectBase, device: Arc<dyn IPciNode>, irqs_avail_cnt: u32, // WARNING irqs_maskable: bool, // WARNING } impl_kobject!(PcieDeviceKObject); impl PcieDeviceKObject { /// Create a new PcieDeviceKObject. pub fn new(device: Arc<dyn IPciNode>) -> Arc<PcieDeviceKObject> { Arc::new(PcieDeviceKObject { base: KObjectBase::new(), device, irqs_avail_cnt: 10, // WARNING irqs_maskable: true, // WARNING }) } /// Get PcieBarInfo. pub fn get_bar(&self, bar_num: u32) -> ZxResult<PcieBarInfo> { let device = self.device.device(); device.get_bar(bar_num as usize).ok_or(ZxError::NOT_FOUND) } /// Map the interrupt to the IRQ. pub fn map_interrupt(&self, irq: i32) -> ZxResult<Arc<Interrupt>> { if irq < 0 || irq as u32 >= self.irqs_avail_cnt { return Err(ZxError::INVALID_ARGS); } Interrupt::new_pci(self.device.clone(), irq as u32, self.irqs_maskable) } /// Enable MMIO. pub fn enable_mmio(&self) -> ZxResult { self.device.device().enable_mmio(true) } /// Enable PIO. pub fn enable_pio(&self) -> ZxResult { self.device.device().enable_pio(true) } /// Enable bus mastering. pub fn enable_master(&self, enable: bool) -> ZxResult { self.device.device().enable_master(enable) } /// Check whether `mode` is capable PCI device's IRQ modes. pub fn get_irq_mode_capabilities(&self, mode: PcieIrqMode) -> ZxResult<PcieIrqModeCaps> { self.device.device().get_irq_mode_capabilities(mode) } /// Set IRQ mode. pub fn set_irq_mode(&self, mode: PcieIrqMode, requested_irqs: usize) -> ZxResult { self.device.device().set_irq_mode(mode, requested_irqs) } /// Read the device's config. pub fn config_read(&self, offset: usize, width: usize) -> ZxResult<u32> { self.device.device().config_read(offset, width) } /// Write the device's config. pub fn config_write(&self, offset: usize, width: usize, val: u32) -> ZxResult { self.device.device().config_write(offset, width, val) } }
32.102334
99
0.567809
878c567e3c27cfbada46e8ae8d455e85b88f26c8
947
// use std::error; use std::fmt; use rpc_json_client; use serde_json; #[derive(Debug)] pub enum Error { JsonRpc(rpc_json_client::Error), Json(serde_json::error::Error), } impl From<rpc_json_client::Error> for Error { fn from(e: rpc_json_client::Error) -> Error { Error::JsonRpc(e) } } impl From<serde_json::error::Error> for Error { fn from(e: serde_json::error::Error) -> Error { Error::Json(e) } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Error::JsonRpc(ref e) => write!(f, "JSON-RPC error: {}", e), Error::Json(ref e) => write!(f, "JSON error: {}", e), } } } impl std::error::Error for Error { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match *self { Error::JsonRpc(ref e) => Some(e), Error::Json(ref e) => Some(e), } } }
22.547619
72
0.555438
4a51ab4fc0420265328063c1a92bc3e82e058879
26,331
/******************************************************************************* * Copyright (c) 2017 Association Cénotélie (cenotelie.fr) * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation, either version 3 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General * Public License along with this program. * If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ //! Module for LR(k) parsers use std::usize; use super::super::ast::Ast; use super::super::ast::TableElemRef; use super::super::ast::TableType; use super::super::errors::ParseErrorUnexpectedToken; use super::super::lexers::Lexer; use super::super::lexers::TokenKernel; use super::super::lexers::DEFAULT_CONTEXT; use super::super::symbols::SemanticBody; use super::super::symbols::SemanticElement; use super::super::symbols::SemanticElementTrait; use super::subtree::SubTree; use super::*; /// Represents the LR(k) parsing table and productions pub struct LRkAutomaton { /// The number of columns in the LR table columns_count: usize, /// The number of states in the LR table states_count: usize, /// Map of symbol ID to column index in the LR table columns_map: LRColumnMap, /// The contexts information contexts: Vec<LRContexts>, /// The LR table table: Vec<u16>, /// The table of LR productions productions: Vec<LRProduction> } impl LRkAutomaton { /// Initializes a new automaton from the given binary data pub fn new(data: &[u8]) -> LRkAutomaton { let columns_count = read_u16(data, 0) as usize; let states_count = read_u16(data, 2) as usize; let productions_count = read_u16(data, 4) as usize; let columns_map = LRColumnMap::new(data, 6, columns_count); let mut contexts = Vec::<LRContexts>::with_capacity(states_count); let mut index = 6 + columns_count * 2; for _i in 0..states_count { let mut context = LRContexts::new(); let count = read_u16(data, index); index += 2; for _j in 0..count { context.add(read_u16(data, index), read_u16(data, index + 2)); index += 4 } contexts.push(context); } let table = read_table_u16(data, index, states_count * columns_count * 2); index += states_count * columns_count * 4; let mut productions = Vec::<LRProduction>::with_capacity(productions_count); for _i in 0..productions_count { let production = LRProduction::new(data, &mut index); productions.push(production); } LRkAutomaton { columns_count, states_count, columns_map, contexts, table, productions } } /// Gets the number of states in this automaton pub fn get_states_count(&self) -> usize { self.states_count } /// Gets the contexts opened by the specified state pub fn get_contexts(&self, state: u32) -> &LRContexts { &self.contexts[state as usize] } /// Gets the LR(k) action for the given state and sid pub fn get_action(&self, state: u32, identifier: u32) -> LRAction { let column = self.columns_map.get(identifier) as usize; LRAction { table: &self.table, offset: (state as usize * self.columns_count + column) * 2 } } /// Gets the i-th production pub fn get_production(&self, index: usize) -> &LRProduction { &self.productions[index] } /// Gets the expected terminals for the specified state pub fn get_expected(&self, state: u32, terminals: &'static [Symbol]) -> LRExpected { let mut expected = LRExpected::new(); let mut offset = self.columns_count * state as usize * 2; for terminal in terminals.iter() { let action = self.table[offset]; if action == LR_ACTION_CODE_SHIFT { expected.shifts.push(*terminal); } else if action == LR_ACTION_CODE_REDUCE { expected.reductions.push(*terminal); } offset += 2; } expected } } const ESTIMATION_BIAS: usize = 5; /// The data about a reduction struct LRkAstReduction { /// The length of the reduction length: usize, /// The sub-tree build-up cache cache: SubTree, /// The number of items popped from the stack pop_count: usize } impl LRkAstReduction { /// Turns this reduction data into a subtree pub fn into_subtree(self) -> SubTree { self.cache } } /// Represents the builder of Parse Trees for LR(k) parsers struct LRkAstBuilder<'l> { /// Lexer associated to this parser lexer: &'l mut dyn Lexer<'l>, /// The stack of semantic objects stack: Vec<SubTree>, /// The AST being built result: Ast<'l>, /// The reduction handle represented as the indices of the sub-trees in the cache handle: Vec<usize>, /// The data of the current reduction reduction: Option<LRkAstReduction> } impl<'l> SemanticBody for LRkAstBuilder<'l> { fn get_element_at(&self, index: usize) -> SemanticElement { match self.reduction { None => panic!("Not in a reduction"), Some(ref data) => { let label = data.cache.get_label_at(self.handle[index]); match label.get_type() { TableType::Token => { SemanticElement::Token(self.lexer.get_output().get_token(label.get_index())) } TableType::Variable => { SemanticElement::Variable(self.result.get_variables()[label.get_index()]) } TableType::Virtual => { SemanticElement::Virtual(self.result.get_virtuals()[label.get_index()]) } TableType::None => SemanticElement::Terminal(self.lexer.get_terminals()[0]) } } } } fn length(&self) -> usize { self.handle.len() } } impl<'l> LRkAstBuilder<'l> { /// Initializes the builder with the given stack size pub fn new(lexer: &'l mut dyn Lexer<'l>, result: Ast<'l>) -> LRkAstBuilder<'l> { LRkAstBuilder { lexer, stack: Vec::<SubTree>::new(), result, handle: Vec::<usize>::new(), reduction: None } } /// Gets the grammar variables for this AST pub fn get_variables(&self) -> &'static [Symbol] { self.result.get_variables() } /// Push a token onto the stack pub fn push_token(&mut self, index: usize) { let mut single = SubTree::new(1); single.push(TableElemRef::new(TableType::Token, index), TREE_ACTION_NONE); self.stack.push(single); } /// Prepares for the forthcoming reduction operations pub fn reduction_prepare(&mut self, variable_index: usize, length: usize, action: TreeAction) { let mut estimation = ESTIMATION_BIAS; for i in 0..length { estimation += self.stack[self.stack.len() - length + i].get_size(); } let mut cache = SubTree::new(estimation); cache.setup_root( TableElemRef::new(TableType::Variable, variable_index), action ); self.reduction = Some(LRkAstReduction { length, cache, pop_count: 0 }); } /// During a reduction, insert the given sub-tree fn reduction_add_sub( reduction: &mut LRkAstReduction, handle: &mut Vec<usize>, sub: &SubTree, action: TreeAction ) { if sub.get_action_at(0) == TREE_ACTION_REPLACE_BY_CHILDREN { let children_count = sub.get_children_count_at(0); // copy the children to the cache let mut cache_index = sub.copy_children_to(&mut reduction.cache); // setup the handle let mut sub_index = 1; for _i in 0..children_count { let size = sub.get_children_count_at(sub_index) + 1; handle.push(cache_index); cache_index += size; sub_index += size; } } else if action == TREE_ACTION_DROP { // do nothing } else { // copy the complete sub-tree to the cache let cache_index = sub.copy_to(&mut reduction.cache); handle.push(cache_index); if action != TREE_ACTION_NONE { reduction.cache.set_action_at(cache_index, action); } } } /// During a redution, pops the top symbol from the stack and gives it a tree action pub fn reduction_pop(&mut self, action: TreeAction) { match self.reduction { None => panic!("Not in a reduction"), Some(ref mut reduction) => { let sub = &self.stack[self.stack.len() - reduction.length + reduction.pop_count]; LRkAstBuilder::reduction_add_sub(reduction, &mut self.handle, &sub, action); reduction.pop_count += 1; } } } /// During a reduction, inserts a virtual symbol pub fn reduction_add_virtual(&mut self, index: usize, action: TreeAction) { if action != TREE_ACTION_DROP { match self.reduction { None => panic!("Not in a reduction"), Some(ref mut reduction) => { let cache_index = reduction .cache .push(TableElemRef::new(TableType::Virtual, index), action); self.handle.push(cache_index); } } } } /// Finalizes the reduction operation pub fn reduce(&mut self) { let stack_size = self.stack.len(); match self.reduction { None => panic!("Not in a reduction"), Some(ref mut reduction) => { if reduction.cache.get_action_at(0) == TREE_ACTION_REPLACE_BY_CHILDREN { reduction.cache.set_children_count_at(0, self.handle.len()); } else { LRkAstBuilder::reduce_tree(reduction, &self.handle, &mut self.result); } // Put it on the stack self.stack.truncate(stack_size - reduction.length); } } let result = ::std::mem::replace(&mut self.reduction, None) .unwrap() .into_subtree(); self.handle.clear(); self.stack.push(result); } /// Applies the promotion tree actions to the cache and commits to the final AST pub fn reduce_tree(reduction: &mut LRkAstReduction, handle: &[usize], result: &mut Ast) { // apply the epsilon replace, if any if reduction.cache.get_action_at(0) == TREE_ACTION_REPLACE_BY_EPSILON { reduction .cache .set_label_at(0, TableElemRef::new(TableType::None, 0)); reduction.cache.set_action_at(0, TREE_ACTION_NONE); } // promotion data let mut promotion = false; let mut insertion = 1; for item in handle.iter() { let item = *item; match reduction.cache.get_action_at(item) { TREE_ACTION_PROMOTE => { if promotion { // This is not the first promotion // Commit the previously promoted node's children reduction.cache.set_children_count_at(0, insertion - 1); reduction.cache.commit_children_of(0, result); // Re-put the previously promoted node in the cache reduction.cache.move_node(0, 1); insertion = 2; } promotion = true; // Save the new promoted node reduction.cache.move_node(item, 0); // Repack the children on the left if any let nb = reduction.cache.get_children_count_at(0); reduction.cache.move_range(item + 1, insertion, nb); insertion += nb; } _ => { // Commit the children if any reduction.cache.commit_children_of(item, result); // Repack the sub-root on the left if insertion != item { reduction.cache.move_node(item, insertion); } insertion += 1; } } } // finalize the sub-tree data reduction.cache.set_children_count_at(0, insertion - 1); } /// Commits the tree's root pub fn commit_root(&mut self) { let length = self.stack.len(); if length > 1 { let head = &mut self.stack[length - 2]; head.commit(&mut self.result); } } } /// The head of a LR(k) parser #[derive(Copy, Clone)] struct LRkHead { /// The automaton's state state: u32, /// The symbol identifier identifier: u32 } struct LRkParserData<'a> { /// The parser's automaton automaton: LRkAutomaton, /// The parser's stack stack: Vec<LRkHead>, /// The grammar variables variables: &'static [Symbol], /// The semantic actions actions: &'a mut dyn FnMut(usize, Symbol, &dyn SemanticBody) } impl<'a> ContextProvider for LRkParserData<'a> { /// Gets the priority of the specified context required by the specified terminal /// The priority is an unsigned integer. The lesser the value the higher the priority. /// The absence of value represents the unavailability of the required context. fn get_context_priority( &self, token_count: usize, context: u16, terminal_id: u32 ) -> Option<usize> { // the default context is always active if context == DEFAULT_CONTEXT { return Some(usize::MAX); } if token_count == 0 { // this is the first token, does it open the context? let contexts = self.automaton.get_contexts(0); return if contexts.opens(terminal_id, context) { Some(0) } else { None }; } // retrieve the action for this terminal let state = self.stack[self.stack.len() - 1].state; let mut action = self.automaton.get_action(state, terminal_id); // if the terminal is unexpected, do not validate if action.get_code() == LR_ACTION_CODE_NONE { return None; } // does the context opens with the terminal? if action.get_code() == LR_ACTION_CODE_SHIFT && self .automaton .get_contexts(state) .opens(terminal_id, context) { return Some(0); } let production = if action.get_code() == LR_ACTION_CODE_REDUCE { Some(self.automaton.get_production(action.get_data() as usize)) } else { None }; // look into the stack for the opening of the context let mut i = self.stack.len() - 2; loop { let state = self.stack[i].state; let id = self.stack[i + 1].identifier; if self.automaton.get_contexts(state).opens(id, context) { // the context opens here // but is it closed by the reduction (if any)? match production { None => return Some(self.stack.len() - 1 - i), Some(data) => { if i < self.stack.len() - 1 - data.reduction_length { return Some(self.stack.len() - 1 - i); } } } } if i == 0 { break; } i -= 1; } // at this point, the requested context is not yet open or is closed by a reduction // now, if the action is something else than a reduction (shift, accept or error), // the context can never be produced // for the context to open, a new state must be pushed onto the stack // this means that the provided terminal must trigger a chain of at least one reduction if action.get_code() != LR_ACTION_CODE_REDUCE { return None; } // there is at least one reduction, simulate let mut my_stack = self.stack.clone(); while action.get_code() == LR_ACTION_CODE_REDUCE { // execute the reduction let production = self.automaton.get_production(action.get_data() as usize); let variable = self.variables[production.head]; let length = my_stack.len(); my_stack.truncate(length - production.reduction_length); // this must be a shift action = self .automaton .get_action(my_stack[my_stack.len() - 1].state, variable.id); my_stack.push(LRkHead { state: u32::from(action.get_data()), identifier: variable.id }); // now, get the new action for the terminal action = self .automaton .get_action(u32::from(action.get_data()), terminal_id); } // is this a shift action that opens the context? if action.get_code() == LR_ACTION_CODE_SHIFT && self .automaton .get_contexts(my_stack[my_stack.len() - 1].state) .opens(terminal_id, context) { Some(0) } else { None } } } impl<'a> LRkParserData<'a> { /// Checks whether the specified terminal is indeed expected for a reduction /// This check is required because in the case of a base LALR graph, /// some terminals expected for reduction in the automaton are coming from other paths. fn check_is_expected(&self, terminal: Symbol) -> bool { // copy the stack to use for the simulation let mut my_stack = self.stack.clone(); let mut action = self .automaton .get_action(my_stack[my_stack.len() - 1].state, terminal.id); while action.get_code() != LR_ACTION_CODE_NONE { if action.get_code() == LR_ACTION_CODE_SHIFT { // yep, the terminal was expected return true; } if action.get_code() == LR_ACTION_CODE_REDUCE { // execute the reduction let production = self.automaton.get_production(action.get_data() as usize); let variable = self.variables[production.head]; let length = my_stack.len(); my_stack.truncate(length - production.reduction_length); // this must be a shift action = self .automaton .get_action(my_stack[my_stack.len() - 1].state, variable.id); my_stack.push(LRkHead { state: u32::from(action.get_data()), identifier: variable.id }); // now, get the new action for the terminal action = self .automaton .get_action(u32::from(action.get_data()), terminal.id); } } // nope, that was a pathological case in a LALR graph false } /// Parses on the specified token kernel fn parse_on_token(&mut self, kernel: TokenKernel, builder: &mut LRkAstBuilder) -> LRActionCode { let stack = &mut self.stack; loop { let head = stack[stack.len() - 1]; let action = self.automaton.get_action(head.state, kernel.terminal_id); if action.get_code() == LR_ACTION_CODE_SHIFT { stack.push(LRkHead { state: u32::from(action.get_data()), identifier: kernel.terminal_id }); builder.push_token(kernel.index as usize); return action.get_code(); } if action.get_code() != LR_ACTION_CODE_REDUCE { return action.get_code(); } // now reduce let production = self.automaton.get_production(action.get_data() as usize); let variable = LRkParserData::reduce(production, builder, &mut self.actions); let length = stack.len(); stack.truncate(length - production.reduction_length); let action = self.automaton.get_action( stack[stack.len() - 1].state, builder.get_variables()[production.head].id ); stack.push(LRkHead { state: u32::from(action.get_data()), identifier: variable.id }); } } /// Executes the given LR reduction fn reduce( production: &LRProduction, builder: &mut LRkAstBuilder, actions: &mut dyn FnMut(usize, Symbol, &dyn SemanticBody) ) -> Symbol { let variable = builder.get_variables()[production.head]; builder.reduction_prepare( production.head, production.reduction_length, production.head_action ); let mut i = 0; while i < production.bytecode.len() { let op_code = production.bytecode[i]; i += 1; match get_op_code_base(op_code) { LR_OP_CODE_BASE_SEMANTIC_ACTION => { let index = production.bytecode[i] as usize; i += 1; actions(index, variable, builder); } LR_OP_CODE_BASE_ADD_VIRTUAL => { let index = production.bytecode[i] as usize; i += 1; builder.reduction_add_virtual(index, get_op_code_tree_action(op_code)); } _ => { builder.reduction_pop(get_op_code_tree_action(op_code)); } } } builder.reduce(); variable } } /// Represents a base for all LR(k) parsers pub struct LRkParser<'l, 'a: 'l> { /// The parser's data data: LRkParserData<'a>, /// The AST builder builder: LRkAstBuilder<'l> } impl<'l, 'a: 'l> LRkParser<'l, 'a> { /// Initializes a new instance of the parser pub fn new( lexer: &'l mut dyn Lexer<'l>, automaton: LRkAutomaton, ast: Ast<'l>, actions: &'a mut dyn FnMut(usize, Symbol, &dyn SemanticBody) ) -> LRkParser<'l, 'a> { let mut stack = Vec::<LRkHead>::new(); stack.push(LRkHead { state: 0, identifier: 0 }); LRkParser { data: LRkParserData { automaton, stack, variables: ast.get_variables(), actions }, builder: LRkAstBuilder::new(lexer, ast) } } /// Gets the next token in the kernel fn get_next_token(&mut self) -> Option<TokenKernel> { let data = &self.data; self.builder.lexer.get_next_token(data) } /// Builds the unexpected token error fn build_error(&self, kernel: TokenKernel) -> ParseErrorUnexpectedToken { dbg!(&kernel.terminal_id,kernel.index); let token = self .builder .lexer .get_output() .get_token(kernel.index as usize); let expected_on_head = self.data.automaton.get_expected( self.data.stack[self.data.stack.len() - 1].state, self.builder.lexer.get_terminals() ); let mut my_expected = Vec::<Symbol>::new(); for x in expected_on_head.shifts.iter() { my_expected.push(*x); } for x in expected_on_head.reductions.iter() { if self.data.check_is_expected(*x) { my_expected.push(*x); } } ParseErrorUnexpectedToken::new( token.get_position().unwrap(), token.get_span().unwrap().length, token.get_value().unwrap(), token.get_symbol(), my_expected ) } } impl<'l, 'a> Parser for LRkParser<'l, 'a> { fn parse(&mut self) { let mut kernel_maybe = self.get_next_token(); loop { match kernel_maybe { None => { self.builder.commit_root(); return; } Some(kernel) => { let action = self.data.parse_on_token(kernel, &mut self.builder); println!("{},{}",kernel.terminal_id,action); match action { LR_ACTION_CODE_ACCEPT => { self.builder.commit_root(); return; } LR_ACTION_CODE_SHIFT => { kernel_maybe = self.get_next_token(); } _ => { // this is an error let error = self.build_error(kernel); let errors = self.builder.lexer.get_errors(); errors.push_error_unexpected_token(error); // TODO: try to recover here return; } } } } } } }
36.981742
100
0.541377
16c7014ec1fb700bc38ed1abc1b47aaf41604dfc
4,174
use crate::{build_solidity, no_errors}; use parity_scale_codec::{Decode, Encode}; use solang::{file_resolver::FileResolver, Target}; use std::ffi::OsStr; #[test] fn emit() { let mut runtime = build_solidity( r##" contract a { event foo(bool) anonymous; function emit_event() public { emit foo(true); } }"##, ); runtime.constructor(0, Vec::new()); runtime.function("emit_event", Vec::new()); assert_eq!(runtime.events.len(), 1); let event = &runtime.events[0]; assert_eq!(event.topics.len(), 0); assert_eq!(event.data, (0u8, true).encode()); #[derive(Debug, PartialEq, Encode, Decode)] struct Foo(u8, bool, u32); let mut runtime = build_solidity( r##" contract a { event foo(bool,uint32,int64 indexed); event bar(uint32,uint64,string indexed); function emit_event() public { emit foo(true, 102, 1); emit bar(0xdeadcafe, 102, "foobar"); } }"##, ); runtime.constructor(0, Vec::new()); runtime.function("emit_event", Vec::new()); assert_eq!(runtime.events.len(), 2); let event = &runtime.events[0]; assert_eq!(event.topics.len(), 1); let mut t = [0u8; 32]; t[0] = 1; assert_eq!(event.topics[0], t); assert_eq!(event.data, Foo(0, true, 102).encode()); let event = &runtime.events[1]; assert_eq!(event.topics.len(), 1); assert_eq!( event.topics[0].to_vec(), hex::decode("38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e").unwrap() ); assert_eq!(event.data, (1u8, 0xdeadcafeu32, 102u64).encode()); } #[test] fn event_imported() { let mut cache = FileResolver::new(); cache.set_file_contents( "a.sol", r#" import "b.sol"; contract foo { function emit_event() public { emit bar(102, true); } } "# .to_string(), ); cache.set_file_contents( "b.sol", r#" event bar (uint32 indexed f1, bool x); "# .to_string(), ); let ns = solang::parse_and_resolve(OsStr::new("a.sol"), &mut cache, Target::default_substrate()); no_errors(ns.diagnostics); let mut cache = FileResolver::new(); cache.set_file_contents( "a.sol", r#" import "b.sol"; contract foo { function emit_event() public { emit baz.bar(102, true); } } "# .to_string(), ); cache.set_file_contents( "b.sol", r#" contract baz { event bar (uint32 indexed f1, bool x); } "# .to_string(), ); let ns = solang::parse_and_resolve(OsStr::new("a.sol"), &mut cache, Target::default_substrate()); no_errors(ns.diagnostics); let mut cache = FileResolver::new(); cache.set_file_contents( "a.sol", r#" import "b.sol" as X; contract foo { function emit_event() public { emit X.baz.bar(102, true); } } "# .to_string(), ); cache.set_file_contents( "b.sol", r#" contract baz { event bar (uint32 indexed f1, bool x); } "# .to_string(), ); let ns = solang::parse_and_resolve(OsStr::new("a.sol"), &mut cache, Target::default_substrate()); no_errors(ns.diagnostics); let mut cache = FileResolver::new(); cache.set_file_contents( "a.sol", r#" import "b.sol" as X; contract foo { function emit_event() public { emit X.bar(102, true); } } "# .to_string(), ); cache.set_file_contents( "b.sol", r#" event bar (uint32 indexed f1, bool x); "# .to_string(), ); let ns = solang::parse_and_resolve(OsStr::new("a.sol"), &mut cache, Target::default_substrate()); no_errors(ns.diagnostics); }
22.684783
96
0.517729
f55a5309cd2b3a0f17351148d10a09264f69ff30
477
pub mod connection; mod empty_mutation; mod empty_subscription; mod r#enum; mod list; mod maybe_undefined; mod merged_object; mod optional; mod query_root; mod upload; pub use empty_mutation::EmptyMutation; pub use empty_subscription::EmptySubscription; pub use maybe_undefined::MaybeUndefined; pub use merged_object::{MergedObject, MergedObjectSubscriptionTail, MergedObjectTail}; pub use query_root::QueryRoot; pub use r#enum::{EnumItem, EnumType}; pub use upload::Upload;
23.85
86
0.81761
ac51ba79ea6d145a2cbbdd1cd957eff226a791dc
4,720
//! Commonly used graphics related types. use wgpu::{Backend, Backends, Color, DeviceType, PowerPreference, PresentMode}; /// Represents a color. // Must match https://docs.rs/wgpu/0.12.0/wgpu/struct.Color.html. pub type NSTDGLColor = Color; /// Represents a graphics backend. #[repr(C)] #[allow(non_camel_case_types)] #[derive(Clone, Copy, Debug, Hash)] pub enum NSTDGLBackend { /// An unknown graphics backend. NSTD_GL_BACKEND_UNKNOWN, /// Vulkan. NSTD_GL_BACKEND_VULKAN, /// Metal. NSTD_GL_BACKEND_METAL, /// Direct3D 12. NSTD_GL_BACKEND_DX12, /// Direct3D 11. NSTD_GL_BACKEND_DX11, /// OpenGL. NSTD_GL_BACKEND_GL, /// Web based GPU. NSTD_GL_BACKEND_WEBGPU, } impl Default for NSTDGLBackend { #[inline] fn default() -> Self { Self::NSTD_GL_BACKEND_UNKNOWN } } impl Into<Backends> for NSTDGLBackend { #[inline] fn into(self) -> Backends { match self { Self::NSTD_GL_BACKEND_UNKNOWN => Backends::all(), Self::NSTD_GL_BACKEND_VULKAN => Backends::VULKAN, Self::NSTD_GL_BACKEND_METAL => Backends::METAL, Self::NSTD_GL_BACKEND_DX11 => Backends::DX11, Self::NSTD_GL_BACKEND_DX12 => Backends::DX12, Self::NSTD_GL_BACKEND_GL => Backends::GL, Self::NSTD_GL_BACKEND_WEBGPU => Backends::BROWSER_WEBGPU, } } } impl From<Backend> for NSTDGLBackend { #[inline] fn from(backend: Backend) -> Self { match backend { Backend::Empty => Self::NSTD_GL_BACKEND_UNKNOWN, Backend::Vulkan => Self::NSTD_GL_BACKEND_VULKAN, Backend::Metal => Self::NSTD_GL_BACKEND_METAL, Backend::Dx12 => Self::NSTD_GL_BACKEND_DX12, Backend::Dx11 => Self::NSTD_GL_BACKEND_DX11, Backend::Gl => Self::NSTD_GL_BACKEND_GL, Backend::BrowserWebGpu => Self::NSTD_GL_BACKEND_WEBGPU, } } } /// Represents a state's presentation mode. #[repr(C)] #[allow(non_camel_case_types)] #[derive(Clone, Copy, Debug, Hash)] pub enum NSTDGLPresentationMode { /// - `wgpu`'s presentation engine will request drawing immediately. NSTD_GL_PRESENTATION_MODE_IMMEDIATE, /// Waits for the vertical blanking period, but frames are submitted immediately. NSTD_GL_PRESENTATION_MODE_MAILBOX, /// Waits for the vertical blanking period, and frames are /// submitted with the monitor's referesh rate. NSTD_GL_PRESENTATION_MODE_FIFO, } impl Into<PresentMode> for NSTDGLPresentationMode { #[inline] fn into(self) -> PresentMode { match self { Self::NSTD_GL_PRESENTATION_MODE_IMMEDIATE => PresentMode::Immediate, Self::NSTD_GL_PRESENTATION_MODE_MAILBOX => PresentMode::Mailbox, Self::NSTD_GL_PRESENTATION_MODE_FIFO => PresentMode::Fifo, } } } /// Represents a power preference. #[repr(C)] #[allow(non_camel_case_types)] #[derive(Clone, Copy, Debug, Hash)] pub enum NSTDGLPowerPreference { /// Use the default power preference. NSTD_GL_POWER_PREFERENCE_DEFAULT, /// Use low GPU power. NSTD_GL_POWER_PREFERENCE_LOW, /// Use high GPU power. NSTD_GL_POWER_PREFERENCE_HIGH, } impl Into<PowerPreference> for NSTDGLPowerPreference { #[inline] fn into(self) -> PowerPreference { match self { Self::NSTD_GL_POWER_PREFERENCE_DEFAULT => PowerPreference::default(), Self::NSTD_GL_POWER_PREFERENCE_LOW => PowerPreference::LowPower, Self::NSTD_GL_POWER_PREFERENCE_HIGH => PowerPreference::HighPerformance, } } } /// Represents a device type. #[repr(C)] #[allow(non_camel_case_types)] #[derive(Clone, Copy, Debug, Hash)] pub enum NSTDGLDeviceType { /// An unknown device type. NSTD_GL_DEVICE_TYPE_UNKNOWN, /// - `wgpu`'s integrated GPU. NSTD_GL_DEVICE_TYPE_INTEGRATED_GPU, /// A physical GPU. NSTD_GL_DEVICE_TYPE_DISCRETE_GPU, /// A virtual/hosted GPU. NSTD_GL_DEVICE_TYPE_VIRTUAL_GPU, /// CPU/Software rendering. NSTD_GL_DEVICE_TYPE_CPU, } impl Default for NSTDGLDeviceType { #[inline] fn default() -> Self { Self::NSTD_GL_DEVICE_TYPE_UNKNOWN } } impl From<DeviceType> for NSTDGLDeviceType { #[inline] fn from(device_type: DeviceType) -> Self { match device_type { DeviceType::Other => Self::NSTD_GL_DEVICE_TYPE_UNKNOWN, DeviceType::IntegratedGpu => Self::NSTD_GL_DEVICE_TYPE_INTEGRATED_GPU, DeviceType::DiscreteGpu => Self::NSTD_GL_DEVICE_TYPE_DISCRETE_GPU, DeviceType::VirtualGpu => Self::NSTD_GL_DEVICE_TYPE_VIRTUAL_GPU, DeviceType::Cpu => Self::NSTD_GL_DEVICE_TYPE_CPU, } } }
32.777778
85
0.672034
16ac6ed786b973b91425a7f4a08e49a697dab461
7,050
use std::env; use std::fs; use std::time; fn test_for_longest(line: &str, start: usize, end: usize, mut longest: String, longest_len: usize) -> String { //iterate through line, char by char //x is the index of char for x in start+1..end { //check if xth char is a space //this is functionally the same as `string[x]` in other languages //this is safer though because Rust strings are utf8 and can //be one or two chars in length if line.chars().nth(x) == Some(' ') { //if x is a space and the len of the substring is greater //than the prior longest, check if substring is longester //check to the left if x - start > longest_len { longest = test_for_longest(line, start, end - 1, longest, longest_len); } //check to the right if end - x + 1 > longest_len { longest = test_for_longest(line, start, end - 1, longest, longest_len); } //If x is a space, return `longest` //Note explicit `return` //Don't grok the need for this, but it is needed. //irc says it's needed because of the missing `else`, but //the docs don't mention this so ¯\_(ツ)_/¯ return longest } } //return the longest //due to Rust's treatment of all strings as UTF8 //This will `skip()` to the first char in the substring //we `take()` all char from `start` to the end of the word //then `collect()` it back into `std::string::String` //We use `collect()` because `line` is a `&str` and we want //to return a `String` so it needs to be transformed via iteration //I tried the &str[start..end] method but that panicks for $REASONS //A shame because I believe that would speed this up line.chars().skip(start).take(end - start + 1).collect() } fn striding_longest(file: String) -> String { let mut longest_len: usize = 0; let mut longest = String::new(); for words in file.lines() { let line_len = words.len(); if line_len > longest_len { let mut start: usize = 0; let mut pos = longest_len + 1; while pos < line_len { if words.chars().nth(pos) == Some(' ') { longest = test_for_longest(&words, start, pos - 1, longest, longest_len); longest_len = longest.len(); start = pos + 1; pos = start + longest_len + 1; } else { pos = pos + 1; } } if line_len - 1 - start > longest_len { longest = test_for_longest(&words, start, pos - 1, longest, longest_len); } } } longest } fn get_longest(words: std::str::SplitWhitespace) -> std::string::String { //allocate two mutable varibles to store the longest word and its length let mut longest_word = String::new(); let mut longest_len: usize = 0; //iterate through the `str::SplitWhitespace` word by word for word in words { //if the word is longer than the prior longest word, set it as the longest if word.len() > longest_len { longest_len = word.len(); longest_word = word.to_string(); } } //return the longest word found longest_word } pub fn single() { //get $1 from sys let filename = env::args().nth(1).unwrap(); //read argv as a string let file: std::string::String = fs::read_to_string(filename).expect(""); //use the string method `split_whitespace()` to split each word in string into an iterator let words = file.split_whitespace(); let start = time::Instant::now(); let result = get_longest(words); let elapsed = start.elapsed(); let seconds = ((elapsed.as_secs() as f64) + (elapsed.subsec_nanos() as f64 / 1_000_000_000.0)) * 1000.0; println!("Simple: Found {} in {} milliseconds.", result, seconds); //Tried to find an elegant way to get around the below but due to ownership //and `std::str::SplitWhitespace` not being well documented to allocate an //empty variable of that type, I settled for just reusing the prior `file` and //`filename` variables and reassigning them to the same thing... //There are two issues. First, `filename` is owned by `fs::read_to_string` //and I can't take ownership of `filename` here. Second `file` is consumed by //`split_whitespace()` and I wasn't able to `clone()` it because `words` lifetime ends //before `files`, which is why I went hunting for the means of allocating an empty //`std::str::SplitWhitespace`. //Long live the jankiness! let filename = env::args().nth(1).unwrap(); let file: std::string::String = fs::read_to_string(filename).expect(""); let start = time::Instant::now(); //My pythonic-esque solution //Create a Vec of the words, then sort words by length. //The answer will be the 0th word in the Vec. //May be able to use `filter()` or another method to //increase speed but this was just a fun solution so I didn't bother let mut words: Vec<_> = file.split_whitespace().collect(); words.sort_by(|a, b| b.len().cmp(&a.len())); let elapsed = start.elapsed(); let seconds = ((elapsed.as_secs() as f64) + (elapsed.subsec_nanos() as f64 / 1_000_000_000.0)) * 1000.0; println!("'Pythonic': Found {} in {} milliseconds.", words[0], seconds); let filename = env::args().nth(1).unwrap(); let file: std::string::String = fs::read_to_string(filename).expect(""); let start = time::Instant::now(); //Very fast. Don't compare to other things. //Make `keys == word.len()` and then return the value of max key //unwrap it and profit! let result = file.split_whitespace().max_by_key(|word| word.len()).unwrap(); let elapsed = start.elapsed(); let seconds = ((elapsed.as_secs() as f64) + (elapsed.subsec_nanos() as f64 / 1_000_000_000.0)) * 1000.0; println!("HashTable Lookup: Found {} in {} milliseconds.", result, seconds); //See lines 71 through 81 above for why I am re-reallocating `filename` and `file` let filename = env::args().nth(1).unwrap(); let file: String = fs::read_to_string(filename).expect("Bummer, man!"); //The above was a `BufReader` but by loading to heap instead of streaming //this became an entire 1000ms faster. irc said heap would be quicker, guess //they were right. let start = time::Instant::now(); let result = striding_longest(file); let elapsed = start.elapsed(); let seconds = ((elapsed.as_secs() as f64) + (elapsed.subsec_nanos() as f64 / 1_000_000_000.0)) * 1000.0; println!("Striding: Found {} in {} milliseconds.", result, seconds); }
46.078431
108
0.596312
d6636176e1d08183af9b8e1c83c4f900e21c8c91
1,815
/* Copyright 2013 10gen Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ use mongo::client::*; use mongo::coll::*; use bson::formattable::*; use bson::encode::*; /** * Helper fn for tests; fills a collection with a given number of docs. */ pub fn fill_coll(db : ~str, coll : ~str, client : @Client, n : uint) -> (Collection, ~[~str], ~[BsonDocument]) { let coll = Collection::new(db, coll, client); // clear out collection to start from scratch coll.remove(None, None, None, None); // create and insert batch let mut ins_strs = ~[]; let mut ins_docs = ~[]; let mut i = 0; for n.times { let ins_str = fmt!("{ \"_id\":%d, \"a\":%d, \"b\":\"ins %d\", \"loc\":{ \"x\":%d, \"y\":%d }, \"insert no\":%d }", i, i/2, i, -i, i+4, i); let ins_doc = match ins_str.to_bson_t() { Embedded(bson) => *bson, _ => fail!("what happened"), }; ins_strs.push(ins_str); ins_docs.push(ins_doc); i += 1; } coll.insert_batch(ins_strs.clone(), None, None, None); (coll, ins_strs, ins_docs) }
33
75
0.544353
db57137bda8860db148a4e763838fcd2a557ac96
273,553
#![doc = "generated by AutoRust 0.1.0"] #![allow(unused_mut)] #![allow(unused_variables)] #![allow(unused_imports)] use super::{models, models::*, API_VERSION}; pub mod autoscale_settings { use super::{models, models::*, API_VERSION}; pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, resource_group_name: &str, subscription_id: &str, ) -> std::result::Result<AutoscaleSettingResourceCollection, list_by_resource_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/autoscalesettings", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_resource_group::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_resource_group::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_resource_group::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AutoscaleSettingResourceCollection = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_resource_group::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_resource_group { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, resource_group_name: &str, autoscale_setting_name: &str, subscription_id: &str, ) -> std::result::Result<AutoscaleSettingResource, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/autoscalesettings/{}", operation_config.base_path(), subscription_id, resource_group_name, autoscale_setting_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AutoscaleSettingResource = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, resource_group_name: &str, autoscale_setting_name: &str, parameters: &AutoscaleSettingResource, subscription_id: &str, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/autoscalesettings/{}", operation_config.base_path(), subscription_id, resource_group_name, autoscale_setting_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AutoscaleSettingResource = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_or_update::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: AutoscaleSettingResource = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_or_update::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Err(create_or_update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create_or_update { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(AutoscaleSettingResource), Created201(AutoscaleSettingResource), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, autoscale_setting_name: &str, autoscale_setting_resource: &AutoscaleSettingResourcePatch, ) -> std::result::Result<AutoscaleSettingResource, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/autoscalesettings/{}", operation_config.base_path(), subscription_id, resource_group_name, autoscale_setting_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(autoscale_setting_resource).map_err(update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AutoscaleSettingResource = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, resource_group_name: &str, autoscale_setting_name: &str, subscription_id: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/autoscalesettings/{}", operation_config.base_path(), subscription_id, resource_group_name, autoscale_setting_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_subscription( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<AutoscaleSettingResourceCollection, list_by_subscription::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Insights/autoscalesettings", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(list_by_subscription::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_subscription::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_subscription::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_subscription::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AutoscaleSettingResourceCollection = serde_json::from_slice(rsp_body) .map_err(|source| list_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_subscription::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_subscription { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod operations { use super::{models, models::*, API_VERSION}; pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<OperationListResult, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!("{}/providers/Microsoft.Insights/operations", operation_config.base_path(),); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: OperationListResult = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod alert_rule_incidents { use super::{models, models::*, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, resource_group_name: &str, rule_name: &str, incident_name: &str, subscription_id: &str, ) -> std::result::Result<Incident, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/microsoft.insights/alertrules/{}/incidents/{}", operation_config.base_path(), subscription_id, resource_group_name, rule_name, incident_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: Incident = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_alert_rule( operation_config: &crate::OperationConfig, resource_group_name: &str, rule_name: &str, subscription_id: &str, ) -> std::result::Result<IncidentListResult, list_by_alert_rule::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/microsoft.insights/alertrules/{}/incidents", operation_config.base_path(), subscription_id, resource_group_name, rule_name ); let mut url = url::Url::parse(url_str).map_err(list_by_alert_rule::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_alert_rule::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_alert_rule::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_alert_rule::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: IncidentListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_by_alert_rule::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_by_alert_rule::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_by_alert_rule { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod alert_rules { use super::{models, models::*, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, resource_group_name: &str, rule_name: &str, subscription_id: &str, ) -> std::result::Result<AlertRuleResource, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/alertrules/{}", operation_config.base_path(), subscription_id, resource_group_name, rule_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AlertRuleResource = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, resource_group_name: &str, rule_name: &str, parameters: &AlertRuleResource, subscription_id: &str, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/alertrules/{}", operation_config.base_path(), subscription_id, resource_group_name, rule_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AlertRuleResource = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_or_update::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: AlertRuleResource = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_or_update::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Err(create_or_update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create_or_update { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(AlertRuleResource), Created201(AlertRuleResource), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, rule_name: &str, alert_rules_resource: &AlertRuleResourcePatch, ) -> std::result::Result<update::Response, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/alertrules/{}", operation_config.base_path(), subscription_id, resource_group_name, rule_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(alert_rules_resource).map_err(update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AlertRuleResource = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: AlertRuleResource = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(AlertRuleResource), Created201(AlertRuleResource), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, resource_group_name: &str, rule_name: &str, subscription_id: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/alertrules/{}", operation_config.base_path(), subscription_id, resource_group_name, rule_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), http::StatusCode::OK => Ok(delete::Response::Ok200), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { NoContent204, Ok200, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, resource_group_name: &str, subscription_id: &str, ) -> std::result::Result<AlertRuleResourceCollection, list_by_resource_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/alertrules", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_resource_group::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_resource_group::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_resource_group::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AlertRuleResourceCollection = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_resource_group::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_resource_group { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_subscription( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<AlertRuleResourceCollection, list_by_subscription::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Insights/alertrules", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(list_by_subscription::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_subscription::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_subscription::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_subscription::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AlertRuleResourceCollection = serde_json::from_slice(rsp_body) .map_err(|source| list_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_subscription::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_subscription { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod log_profiles { use super::{models, models::*, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, log_profile_name: &str, subscription_id: &str, ) -> std::result::Result<LogProfileResource, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Insights/logprofiles/{}", operation_config.base_path(), subscription_id, log_profile_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: LogProfileResource = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, log_profile_name: &str, parameters: &LogProfileResource, subscription_id: &str, ) -> std::result::Result<LogProfileResource, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Insights/logprofiles/{}", operation_config.base_path(), subscription_id, log_profile_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: LogProfileResource = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(create_or_update::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod create_or_update { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, log_profile_name: &str, log_profiles_resource: &LogProfileResourcePatch, ) -> std::result::Result<LogProfileResource, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Insights/logprofiles/{}", operation_config.base_path(), subscription_id, log_profile_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(log_profiles_resource).map_err(update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: LogProfileResource = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, log_profile_name: &str, subscription_id: &str, ) -> std::result::Result<(), delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Insights/logprofiles/{}", operation_config.base_path(), subscription_id, log_profile_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(()), status_code => { let rsp_body = rsp.body(); Err(delete::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod delete { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<LogProfileCollection, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Insights/logprofiles", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: LogProfileCollection = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod diagnostic_settings { use super::{models, models::*, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, resource_uri: &str, name: &str, ) -> std::result::Result<DiagnosticSettingsResource, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/{}/providers/Microsoft.Insights/diagnosticSettings/{}", operation_config.base_path(), resource_uri, name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: DiagnosticSettingsResource = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, resource_uri: &str, parameters: &DiagnosticSettingsResource, name: &str, ) -> std::result::Result<DiagnosticSettingsResource, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/{}/providers/Microsoft.Insights/diagnosticSettings/{}", operation_config.base_path(), resource_uri, name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: DiagnosticSettingsResource = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Err(create_or_update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create_or_update { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, resource_uri: &str, name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/{}/providers/Microsoft.Insights/diagnosticSettings/{}", operation_config.base_path(), resource_uri, name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list( operation_config: &crate::OperationConfig, resource_uri: &str, ) -> std::result::Result<DiagnosticSettingsResourceCollection, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/{}/providers/Microsoft.Insights/diagnosticSettings", operation_config.base_path(), resource_uri ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: DiagnosticSettingsResourceCollection = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Err(list::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod diagnostic_settings_category { use super::{models, models::*, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, resource_uri: &str, name: &str, ) -> std::result::Result<DiagnosticSettingsCategoryResource, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/{}/providers/Microsoft.Insights/diagnosticSettingsCategories/{}", operation_config.base_path(), resource_uri, name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: DiagnosticSettingsCategoryResource = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list( operation_config: &crate::OperationConfig, resource_uri: &str, ) -> std::result::Result<DiagnosticSettingsCategoryResourceCollection, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/{}/providers/Microsoft.Insights/diagnosticSettingsCategories", operation_config.base_path(), resource_uri ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: DiagnosticSettingsCategoryResourceCollection = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Err(list::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod action_groups { use super::{models, models::*, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, resource_group_name: &str, action_group_name: &str, subscription_id: &str, ) -> std::result::Result<ActionGroupResource, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/actionGroups/{}", operation_config.base_path(), subscription_id, resource_group_name, action_group_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ActionGroupResource = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, resource_group_name: &str, action_group_name: &str, action_group: &ActionGroupResource, subscription_id: &str, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/actionGroups/{}", operation_config.base_path(), subscription_id, resource_group_name, action_group_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(action_group).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ActionGroupResource = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_or_update::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: ActionGroupResource = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_or_update::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Err(create_or_update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create_or_update { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(ActionGroupResource), Created201(ActionGroupResource), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, action_group_name: &str, action_group_patch: &ActionGroupPatchBody, ) -> std::result::Result<ActionGroupResource, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/actionGroups/{}", operation_config.base_path(), subscription_id, resource_group_name, action_group_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(action_group_patch).map_err(update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ActionGroupResource = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, resource_group_name: &str, action_group_name: &str, subscription_id: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/actionGroups/{}", operation_config.base_path(), subscription_id, resource_group_name, action_group_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_subscription_id( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<ActionGroupList, list_by_subscription_id::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/microsoft.insights/actionGroups", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(list_by_subscription_id::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_subscription_id::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_subscription_id::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_subscription_id::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ActionGroupList = serde_json::from_slice(rsp_body) .map_err(|source| list_by_subscription_id::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_subscription_id::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_subscription_id::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_subscription_id { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, resource_group_name: &str, subscription_id: &str, ) -> std::result::Result<ActionGroupList, list_by_resource_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/actionGroups", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_resource_group::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_resource_group::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_resource_group::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ActionGroupList = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_resource_group::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_resource_group { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn enable_receiver( operation_config: &crate::OperationConfig, resource_group_name: &str, action_group_name: &str, enable_request: &EnableRequest, subscription_id: &str, ) -> std::result::Result<(), enable_receiver::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/actionGroups/{}/subscribe", operation_config.base_path(), subscription_id, resource_group_name, action_group_name ); let mut url = url::Url::parse(url_str).map_err(enable_receiver::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(enable_receiver::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(enable_request).map_err(enable_receiver::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(enable_receiver::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(enable_receiver::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(()), http::StatusCode::CONFLICT => Err(enable_receiver::Error::Conflict409 {}), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| enable_receiver::Error::DeserializeError(source, rsp_body.clone()))?; Err(enable_receiver::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod enable_receiver { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Error response #response_type")] Conflict409 {}, #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod activity_log_alerts { use super::{models, models::*, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, activity_log_alert_name: &str, ) -> std::result::Result<ActivityLogAlertResource, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/activityLogAlerts/{}", operation_config.base_path(), subscription_id, resource_group_name, activity_log_alert_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ActivityLogAlertResource = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, activity_log_alert_name: &str, activity_log_alert: &ActivityLogAlertResource, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/activityLogAlerts/{}", operation_config.base_path(), subscription_id, resource_group_name, activity_log_alert_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(activity_log_alert).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ActivityLogAlertResource = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_or_update::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: ActivityLogAlertResource = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_or_update::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Err(create_or_update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create_or_update { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(ActivityLogAlertResource), Created201(ActivityLogAlertResource), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, activity_log_alert_name: &str, activity_log_alert_patch: &ActivityLogAlertPatchBody, ) -> std::result::Result<ActivityLogAlertResource, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/activityLogAlerts/{}", operation_config.base_path(), subscription_id, resource_group_name, activity_log_alert_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(activity_log_alert_patch).map_err(update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ActivityLogAlertResource = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, activity_log_alert_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/activityLogAlerts/{}", operation_config.base_path(), subscription_id, resource_group_name, activity_log_alert_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_subscription_id( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<ActivityLogAlertList, list_by_subscription_id::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/microsoft.insights/activityLogAlerts", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(list_by_subscription_id::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_subscription_id::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_subscription_id::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_subscription_id::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ActivityLogAlertList = serde_json::from_slice(rsp_body) .map_err(|source| list_by_subscription_id::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_subscription_id::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_subscription_id::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_subscription_id { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, ) -> std::result::Result<ActivityLogAlertList, list_by_resource_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/activityLogAlerts", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_resource_group::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_resource_group::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_resource_group::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ActivityLogAlertList = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_resource_group::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_resource_group { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod activity_logs { use super::{models, models::*, API_VERSION}; pub async fn list( operation_config: &crate::OperationConfig, filter: &str, select: Option<&str>, subscription_id: &str, ) -> std::result::Result<EventDataCollection, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Insights/eventtypes/management/values", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); url.query_pairs_mut().append_pair("$filter", filter); if let Some(select) = select { url.query_pairs_mut().append_pair("$select", select); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: EventDataCollection = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Err(list::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod event_categories { use super::{models, models::*, API_VERSION}; pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<EventCategoryCollection, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!("{}/providers/Microsoft.Insights/eventcategories", operation_config.base_path(),); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: EventCategoryCollection = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Err(list::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod tenant_activity_logs { use super::{models, models::*, API_VERSION}; pub async fn list( operation_config: &crate::OperationConfig, filter: Option<&str>, select: Option<&str>, ) -> std::result::Result<EventDataCollection, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/providers/Microsoft.Insights/eventtypes/management/values", operation_config.base_path(), ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(select) = select { url.query_pairs_mut().append_pair("$select", select); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: EventDataCollection = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Err(list::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod metric_definitions { use super::{models, models::*, API_VERSION}; pub async fn list( operation_config: &crate::OperationConfig, resource_uri: &str, metricnamespace: Option<&str>, ) -> std::result::Result<MetricDefinitionCollection, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/{}/providers/Microsoft.Insights/metricDefinitions", operation_config.base_path(), resource_uri ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(metricnamespace) = metricnamespace { url.query_pairs_mut().append_pair("metricnamespace", metricnamespace); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: MetricDefinitionCollection = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Err(list::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod metrics { use super::{models, models::*, API_VERSION}; pub async fn list( operation_config: &crate::OperationConfig, resource_uri: &str, timespan: Option<&str>, interval: Option<&str>, metricnames: Option<&str>, aggregation: Option<&str>, top: Option<i32>, orderby: Option<&str>, filter: Option<&str>, result_type: Option<&str>, metricnamespace: Option<&str>, ) -> std::result::Result<Response, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/{}/providers/Microsoft.Insights/metrics", operation_config.base_path(), resource_uri ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(timespan) = timespan { url.query_pairs_mut().append_pair("timespan", timespan); } if let Some(interval) = interval { url.query_pairs_mut().append_pair("interval", interval); } if let Some(metricnames) = metricnames { url.query_pairs_mut().append_pair("metricnames", metricnames); } if let Some(aggregation) = aggregation { url.query_pairs_mut().append_pair("aggregation", aggregation); } if let Some(top) = top { url.query_pairs_mut().append_pair("top", top.to_string().as_str()); } if let Some(orderby) = orderby { url.query_pairs_mut().append_pair("orderby", orderby); } if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(result_type) = result_type { url.query_pairs_mut().append_pair("resultType", result_type); } if let Some(metricnamespace) = metricnamespace { url.query_pairs_mut().append_pair("metricnamespace", metricnamespace); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: Response = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Err(list::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod baselines { use super::{models, models::*, API_VERSION}; pub async fn list( operation_config: &crate::OperationConfig, resource_uri: &str, metricnames: Option<&str>, metricnamespace: Option<&str>, timespan: Option<&str>, interval: Option<&str>, aggregation: Option<&str>, sensitivities: Option<&str>, filter: Option<&str>, result_type: Option<&str>, ) -> std::result::Result<MetricBaselinesResponse, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/{}/providers/Microsoft.Insights/metricBaselines", operation_config.base_path(), resource_uri ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(metricnames) = metricnames { url.query_pairs_mut().append_pair("metricnames", metricnames); } if let Some(metricnamespace) = metricnamespace { url.query_pairs_mut().append_pair("metricnamespace", metricnamespace); } if let Some(timespan) = timespan { url.query_pairs_mut().append_pair("timespan", timespan); } if let Some(interval) = interval { url.query_pairs_mut().append_pair("interval", interval); } if let Some(aggregation) = aggregation { url.query_pairs_mut().append_pair("aggregation", aggregation); } if let Some(sensitivities) = sensitivities { url.query_pairs_mut().append_pair("sensitivities", sensitivities); } if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(result_type) = result_type { url.query_pairs_mut().append_pair("resultType", result_type); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: MetricBaselinesResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Err(list::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod metric_alerts { use super::{models, models::*, API_VERSION}; pub async fn list_by_subscription( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<MetricAlertResourceCollection, list_by_subscription::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Insights/metricAlerts", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(list_by_subscription::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_subscription::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_subscription::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_subscription::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: MetricAlertResourceCollection = serde_json::from_slice(rsp_body) .map_err(|source| list_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_subscription::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_subscription { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, ) -> std::result::Result<MetricAlertResourceCollection, list_by_resource_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/metricAlerts", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_resource_group::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_resource_group::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_resource_group::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: MetricAlertResourceCollection = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_resource_group::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_resource_group { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, rule_name: &str, ) -> std::result::Result<MetricAlertResource, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/metricAlerts/{}", operation_config.base_path(), subscription_id, resource_group_name, rule_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: MetricAlertResource = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, rule_name: &str, parameters: &MetricAlertResource, ) -> std::result::Result<MetricAlertResource, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/metricAlerts/{}", operation_config.base_path(), subscription_id, resource_group_name, rule_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: MetricAlertResource = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Err(create_or_update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create_or_update { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, rule_name: &str, parameters: &MetricAlertResourcePatch, ) -> std::result::Result<MetricAlertResource, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/metricAlerts/{}", operation_config.base_path(), subscription_id, resource_group_name, rule_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: MetricAlertResource = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, rule_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/metricAlerts/{}", operation_config.base_path(), subscription_id, resource_group_name, rule_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod metric_alerts_status { use super::{models, models::*, API_VERSION}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, rule_name: &str, ) -> std::result::Result<MetricAlertStatusCollection, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/metricAlerts/{}/status", operation_config.base_path(), subscription_id, resource_group_name, rule_name ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: MetricAlertStatusCollection = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Err(list::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_name( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, rule_name: &str, status_name: &str, ) -> std::result::Result<MetricAlertStatusCollection, list_by_name::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/metricAlerts/{}/status/{}", operation_config.base_path(), subscription_id, resource_group_name, rule_name, status_name ); let mut url = url::Url::parse(url_str).map_err(list_by_name::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_name::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_name::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_name::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: MetricAlertStatusCollection = serde_json::from_slice(rsp_body).map_err(|source| list_by_name::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list_by_name::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_name::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_name { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod scheduled_query_rules { use super::{models, models::*, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, resource_group_name: &str, rule_name: &str, subscription_id: &str, ) -> std::result::Result<LogSearchRuleResource, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/scheduledQueryRules/{}", operation_config.base_path(), subscription_id, resource_group_name, rule_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: LogSearchRuleResource = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorContract = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorContract, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, rule_name: &str, parameters: &LogSearchRuleResource, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/scheduledQueryRules/{}", operation_config.base_path(), subscription_id, resource_group_name, rule_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: LogSearchRuleResource = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_or_update::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: LogSearchRuleResource = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_or_update::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorContract = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Err(create_or_update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create_or_update { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(LogSearchRuleResource), Created201(LogSearchRuleResource), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorContract, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, rule_name: &str, parameters: &LogSearchRuleResourcePatch, ) -> std::result::Result<LogSearchRuleResource, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/scheduledQueryRules/{}", operation_config.base_path(), subscription_id, resource_group_name, rule_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: LogSearchRuleResource = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorContract = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorContract, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, resource_group_name: &str, rule_name: &str, subscription_id: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/scheduledQueryRules/{}", operation_config.base_path(), subscription_id, resource_group_name, rule_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorContract = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorContract, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_subscription( operation_config: &crate::OperationConfig, filter: Option<&str>, subscription_id: &str, ) -> std::result::Result<LogSearchRuleResourceCollection, list_by_subscription::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Insights/scheduledQueryRules", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(list_by_subscription::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_subscription::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_subscription::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_subscription::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: LogSearchRuleResourceCollection = serde_json::from_slice(rsp_body) .map_err(|source| list_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorContract = serde_json::from_slice(rsp_body) .map_err(|source| list_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_subscription::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_subscription { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorContract, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, resource_group_name: &str, filter: Option<&str>, subscription_id: &str, ) -> std::result::Result<LogSearchRuleResourceCollection, list_by_resource_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/scheduledQueryRules", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_resource_group::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_resource_group::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_resource_group::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: LogSearchRuleResourceCollection = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorContract = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_resource_group::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_resource_group { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorContract, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod metric_namespaces { use super::{models, models::*, API_VERSION}; pub async fn list( operation_config: &crate::OperationConfig, resource_uri: &str, start_time: Option<&str>, ) -> std::result::Result<MetricNamespaceCollection, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/{}/providers/microsoft.insights/metricNamespaces", operation_config.base_path(), resource_uri ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(start_time) = start_time { url.query_pairs_mut().append_pair("startTime", start_time); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: MetricNamespaceCollection = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Err(list::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod vm_insights { use super::{models, models::*, API_VERSION}; pub async fn get_onboarding_status( operation_config: &crate::OperationConfig, resource_uri: &str, ) -> std::result::Result<VmInsightsOnboardingStatus, get_onboarding_status::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/{}/providers/Microsoft.Insights/vmInsightsOnboardingStatuses/default", operation_config.base_path(), resource_uri ); let mut url = url::Url::parse(url_str).map_err(get_onboarding_status::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_onboarding_status::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(get_onboarding_status::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_onboarding_status::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: VmInsightsOnboardingStatus = serde_json::from_slice(rsp_body) .map_err(|source| get_onboarding_status::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ResponseWithError = serde_json::from_slice(rsp_body) .map_err(|source| get_onboarding_status::Error::DeserializeError(source, rsp_body.clone()))?; Err(get_onboarding_status::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get_onboarding_status { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ResponseWithError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod private_link_scopes { use super::{models, models::*, API_VERSION}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<AzureMonitorPrivateLinkScopeListResult, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/microsoft.insights/privateLinkScopes", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AzureMonitorPrivateLinkScopeListResult = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, resource_group_name: &str, subscription_id: &str, ) -> std::result::Result<AzureMonitorPrivateLinkScopeListResult, list_by_resource_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/privateLinkScopes", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_resource_group::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_resource_group::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_resource_group::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AzureMonitorPrivateLinkScopeListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_by_resource_group::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_by_resource_group { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, resource_group_name: &str, subscription_id: &str, scope_name: &str, ) -> std::result::Result<AzureMonitorPrivateLinkScope, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/privateLinkScopes/{}", operation_config.base_path(), subscription_id, resource_group_name, scope_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AzureMonitorPrivateLinkScope = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, resource_group_name: &str, subscription_id: &str, scope_name: &str, azure_monitor_private_link_scope_payload: &AzureMonitorPrivateLinkScope, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/privateLinkScopes/{}", operation_config.base_path(), subscription_id, resource_group_name, scope_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(azure_monitor_private_link_scope_payload).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AzureMonitorPrivateLinkScope = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_or_update::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: AzureMonitorPrivateLinkScope = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_or_update::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); Err(create_or_update::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod create_or_update { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(AzureMonitorPrivateLinkScope), Created201(AzureMonitorPrivateLinkScope), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update_tags( operation_config: &crate::OperationConfig, resource_group_name: &str, subscription_id: &str, scope_name: &str, private_link_scope_tags: &TagsResource, ) -> std::result::Result<AzureMonitorPrivateLinkScope, update_tags::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/privateLinkScopes/{}", operation_config.base_path(), subscription_id, resource_group_name, scope_name ); let mut url = url::Url::parse(url_str).map_err(update_tags::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update_tags::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(private_link_scope_tags).map_err(update_tags::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update_tags::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(update_tags::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AzureMonitorPrivateLinkScope = serde_json::from_slice(rsp_body).map_err(|source| update_tags::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(update_tags::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod update_tags { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, resource_group_name: &str, subscription_id: &str, scope_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/privateLinkScopes/{}", operation_config.base_path(), subscription_id, resource_group_name, scope_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), status_code => { let rsp_body = rsp.body(); Err(delete::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod delete { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, NoContent204, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod private_link_scope_operation_status { use super::{models, models::*, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, async_operation_id: &str, resource_group_name: &str, subscription_id: &str, ) -> std::result::Result<OperationStatus, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/privateLinkScopeOperationStatuses/{}", operation_config.base_path(), subscription_id, resource_group_name, async_operation_id ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: OperationStatus = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod private_link_resources { use super::{models, models::*, API_VERSION}; pub async fn list_by_private_link_scope( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, scope_name: &str, ) -> std::result::Result<PrivateLinkResourceListResult, list_by_private_link_scope::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/privateLinkScopes/{}/privateLinkResources", operation_config.base_path(), subscription_id, resource_group_name, scope_name ); let mut url = url::Url::parse(url_str).map_err(list_by_private_link_scope::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_private_link_scope::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_private_link_scope::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_private_link_scope::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: PrivateLinkResourceListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_by_private_link_scope::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_by_private_link_scope::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_by_private_link_scope { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, scope_name: &str, group_name: &str, ) -> std::result::Result<PrivateLinkResource, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/privateLinkScopes/{}/privateLinkResources/{}", operation_config.base_path(), subscription_id, resource_group_name, scope_name, group_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: PrivateLinkResource = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod private_endpoint_connections { use super::{models, models::*, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, scope_name: &str, private_endpoint_connection_name: &str, ) -> std::result::Result<PrivateEndpointConnection, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/privateLinkScopes/{}/privateEndpointConnections/{}", operation_config.base_path(), subscription_id, resource_group_name, scope_name, private_endpoint_connection_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: PrivateEndpointConnection = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, scope_name: &str, private_endpoint_connection_name: &str, parameters: &PrivateEndpointConnection, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/privateLinkScopes/{}/privateEndpointConnections/{}", operation_config.base_path(), subscription_id, resource_group_name, scope_name, private_endpoint_connection_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: PrivateEndpointConnection = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_or_update::Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(create_or_update::Response::Accepted202), status_code => { let rsp_body = rsp.body(); Err(create_or_update::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod create_or_update { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(PrivateEndpointConnection), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, scope_name: &str, private_endpoint_connection_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/privateLinkScopes/{}/privateEndpointConnections/{}", operation_config.base_path(), subscription_id, resource_group_name, scope_name, private_endpoint_connection_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); Err(delete::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod delete { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_private_link_scope( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, scope_name: &str, ) -> std::result::Result<PrivateEndpointConnectionListResult, list_by_private_link_scope::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/privateLinkScopes/{}/privateEndpointConnections", operation_config.base_path(), subscription_id, resource_group_name, scope_name ); let mut url = url::Url::parse(url_str).map_err(list_by_private_link_scope::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_private_link_scope::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_private_link_scope::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_private_link_scope::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: PrivateEndpointConnectionListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_by_private_link_scope::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_by_private_link_scope::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_by_private_link_scope { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod private_link_scoped_resources { use super::{models, models::*, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, scope_name: &str, name: &str, ) -> std::result::Result<ScopedResource, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/privateLinkScopes/{}/scopedResources/{}", operation_config.base_path(), subscription_id, resource_group_name, scope_name, name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ScopedResource = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, scope_name: &str, name: &str, parameters: &ScopedResource, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/privateLinkScopes/{}/scopedResources/{}", operation_config.base_path(), subscription_id, resource_group_name, scope_name, name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ScopedResource = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_or_update::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: ScopedResource = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_or_update::Response::Created201(rsp_value)) } http::StatusCode::ACCEPTED => Ok(create_or_update::Response::Accepted202), status_code => { let rsp_body = rsp.body(); Err(create_or_update::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod create_or_update { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(ScopedResource), Created201(ScopedResource), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, scope_name: &str, name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/privateLinkScopes/{}/scopedResources/{}", operation_config.base_path(), subscription_id, resource_group_name, scope_name, name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); Err(delete::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod delete { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_private_link_scope( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, scope_name: &str, ) -> std::result::Result<ScopedResourceListResult, list_by_private_link_scope::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/privateLinkScopes/{}/scopedResources", operation_config.base_path(), subscription_id, resource_group_name, scope_name ); let mut url = url::Url::parse(url_str).map_err(list_by_private_link_scope::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_private_link_scope::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_private_link_scope::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_private_link_scope::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ScopedResourceListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_by_private_link_scope::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_by_private_link_scope::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_by_private_link_scope { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } }
48.059206
136
0.586384
cc8eb911b355c2275d568ae589cb98262eb8061e
73
pub(crate) mod env; pub(crate) mod theme; mod ui; pub(crate) use ui::*;
12.166667
21
0.657534
1693339ce50da3267c340d07950eddb3ab593dca
1,204
use dotenv::dotenv; use once_cell::sync::Lazy; use slog::{FnValue, *}; use sloggers::{ terminal::{Destination, TerminalLoggerBuilder}, types::Severity, Build, }; use std::env; use std::fs::OpenOptions; use std::sync::Mutex; pub static APP_LOGGING: Lazy<slog::Logger> = Lazy::new(|| { let mut builder = TerminalLoggerBuilder::new(); if cfg!(test) { builder.level(Severity::Debug); builder.destination(Destination::Stdout); } else { dotenv().ok(); let log_dir = env::var("LOG_DIR").expect("LOG_DIR must be set"); let logfile = format!("{}/log_app.txt", log_dir); let file = OpenOptions::new() .create(true) .write(true) .append(true) .open(logfile) .unwrap(); let filter_level = "info".parse::<Level>().expect("Invalid log level filter"); Logger::root( Mutex::new(LevelFilter::new(slog_bunyan::default(file), filter_level)).fuse(), o!("location" => FnValue(move |info| { format!("{}:{} {}", info.file(), info.line(), info.module()) }) ), ); } builder.build().unwrap() });
30.1
90
0.552326
d6e4def1d0f769abde652d397e5e60e5e1c3f515
5,261
#[doc = r" Value read from the register"] pub struct R { bits: u32, } impl super::STATUS { #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } } #[doc = "Possible values of the field `B`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum BR { #[doc = "EMC\n is idle (warm reset value)."] IDLE, #[doc = "EMC\n is busy performing memory transactions, commands, auto-refresh cycles,\n or is in self-refresh mode (POR reset value)."] BUSY, } impl BR { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { BR::IDLE => false, BR::BUSY => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> BR { match value { false => BR::IDLE, true => BR::BUSY, } } #[doc = "Checks if the value of the field is `IDLE`"] #[inline] pub fn is_idle(&self) -> bool { *self == BR::IDLE } #[doc = "Checks if the value of the field is `BUSY`"] #[inline] pub fn is_busy(&self) -> bool { *self == BR::BUSY } } #[doc = "Possible values of the field `S`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SR { #[doc = "Write buffers\n empty (POR reset value)"] EMPTY, #[doc = "Write\n buffers contain data."] DATA, } impl SR { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { SR::EMPTY => false, SR::DATA => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> SR { match value { false => SR::EMPTY, true => SR::DATA, } } #[doc = "Checks if the value of the field is `EMPTY`"] #[inline] pub fn is_empty(&self) -> bool { *self == SR::EMPTY } #[doc = "Checks if the value of the field is `DATA`"] #[inline] pub fn is_data(&self) -> bool { *self == SR::DATA } } #[doc = "Possible values of the field `SA`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SAR { #[doc = "Normal mode"] NORMAL, #[doc = "Self-refresh mode (POR reset value)."] SELFREFRESH, } impl SAR { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { SAR::NORMAL => false, SAR::SELFREFRESH => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> SAR { match value { false => SAR::NORMAL, true => SAR::SELFREFRESH, } } #[doc = "Checks if the value of the field is `NORMAL`"] #[inline] pub fn is_normal(&self) -> bool { *self == SAR::NORMAL } #[doc = "Checks if the value of the field is `SELFREFRESH`"] #[inline] pub fn is_selfrefresh(&self) -> bool { *self == SAR::SELFREFRESH } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bit 0 - Busy. This bit is used to ensure that the memory controller enters the low-power or disabled mode cleanly by determining if the memory controller is busy or not."] #[inline] pub fn b(&self) -> BR { BR::_from({ const MASK: bool = true; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 1 - Write buffer status.This bit enables the EMC to enter low-power mode or disabled mode cleanly."] #[inline] pub fn s(&self) -> SR { SR::_from({ const MASK: bool = true; const OFFSET: u8 = 1; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 2 - Self-refresh acknowledge. This bit indicates the operating mode of the EMC."] #[inline] pub fn sa(&self) -> SAR { SAR::_from({ const MASK: bool = true; const OFFSET: u8 = 2; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } }
27.835979
217
0.493442
e8642cae4a07c917db0f83166b6d8235da5b1a88
524
#[derive(Debug)] enum ColorNoParam { Red, Yellow, Blue, } #[derive(Debug)] enum ColorParam { Red(String), Yellow(String), Blue(String), } fn main() { let color_no_param = ColorNoParam::Red; match color_no_param { ColorNoParam::Red => println!("{:?}", ColorNoParam::Red), ColorNoParam::Yellow => println!("{:?}", ColorNoParam::Yellow), ColorNoParam::Blue => println!("{:?}", ColorNoParam::Blue), } println!("{:?}", ColorParam::Blue(String::from("blue"))); }
20.96
71
0.585878
01159960f523f9ffd81896d311ce6fc0f447b0b9
6,711
// Copyright 2018 The Grin Developers // Modifications Copyright 2019 The Gotts Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use chrono::prelude::{DateTime, Utc}; use chrono::Duration; use std::sync::Arc; use crate::chain::{self, SyncState, SyncStatus}; use crate::core::core::hash::Hashed; use crate::core::global; use crate::p2p::{self, Peer}; /// Fast sync has 3 "states": /// * syncing headers /// * once all headers are sync'd, requesting the txhashset state /// * once we have the state, get blocks after that /// /// The StateSync struct implements and monitors the middle step. pub struct StateSync { sync_state: Arc<SyncState>, peers: Arc<p2p::Peers>, chain: Arc<chain::Chain>, prev_state_sync: Option<DateTime<Utc>>, state_sync_peer: Option<Arc<Peer>>, } impl StateSync { pub fn new( sync_state: Arc<SyncState>, peers: Arc<p2p::Peers>, chain: Arc<chain::Chain>, ) -> StateSync { StateSync { sync_state, peers, chain, prev_state_sync: None, state_sync_peer: None, } } /// Check whether state sync should run and triggers a state download when /// it's time (we have all headers). Returns true as long as state sync /// needs monitoring, false when it's either done or turned off. pub fn check_run( &mut self, header_head: &chain::Tip, head: &chain::Tip, tail: &chain::Tip, highest_height: u64, ) -> bool { trace!("state_sync: head.height: {}, tail.height: {}. header_head.height: {}, highest_height: {}", head.height, tail.height, header_head.height, highest_height, ); let mut sync_need_restart = false; // check sync error { let clone = self.sync_state.sync_error(); if let Some(ref sync_error) = *clone.read() { error!("state_sync: error = {:?}. restart fast sync", sync_error); sync_need_restart = true; } drop(clone); } // check peer connection status of this sync if let Some(ref peer) = self.state_sync_peer { if let SyncStatus::TxHashsetDownload { .. } = self.sync_state.status() { if !peer.is_connected() { sync_need_restart = true; info!( "state_sync: peer connection lost: {:?}. restart", peer.info.addr, ); } } } // if txhashset downloaded and validated successfully, we switch to BodySync state, // and we need call state_sync_reset() to make it ready for next possible state sync. let done = if let SyncStatus::TxHashsetDone = self.sync_state.status() { self.sync_state.update(SyncStatus::BodySync { current_height: 0, highest_height: 0, }); true } else { false }; if sync_need_restart || done { self.state_sync_reset(); self.sync_state.clear_sync_error(); } if done { return false; } // run fast sync if applicable, normally only run one-time, except restart in error if sync_need_restart || header_head.height == highest_height { let (go, download_timeout) = self.state_sync_due(); if let SyncStatus::TxHashsetDownload { .. } = self.sync_state.status() { if download_timeout { error!("state_sync: TxHashsetDownload status timeout in 10 minutes!"); self.sync_state.set_sync_error( chain::ErrorKind::SyncError(format!("{:?}", p2p::Error::Timeout)).into(), ); } } if go { self.state_sync_peer = None; match self.request_state(&header_head) { Ok(peer) => { self.state_sync_peer = Some(peer); } Err(e) => self .sync_state .set_sync_error(chain::ErrorKind::SyncError(format!("{:?}", e)).into()), } // to avoid the confusing log, // update the final HeaderSync state mainly for 'current_height' { let status = self.sync_state.status(); if let SyncStatus::HeaderSync { .. } = status { self.sync_state.update(SyncStatus::HeaderSync { current_height: header_head.height, highest_height, }); } } self.sync_state.update(SyncStatus::TxHashsetDownload { start_time: Utc::now(), prev_update_time: Utc::now(), update_time: Utc::now(), prev_downloaded_size: 0, downloaded_size: 0, total_size: 0, }); } } true } fn request_state(&self, header_head: &chain::Tip) -> Result<Arc<Peer>, p2p::Error> { let threshold = global::state_sync_threshold() as u64; let archive_interval = global::txhashset_archive_interval(); let mut txhashset_height = header_head.height.saturating_sub(threshold); txhashset_height = txhashset_height.saturating_sub(txhashset_height % archive_interval); if let Some(peer) = self.peers.most_work_peer() { // ask for txhashset at state_sync_threshold let mut txhashset_head = self .chain .get_block_header(&header_head.prev_block_h) .map_err(|e| { error!( "chain error during getting a block header {}: {:?}", &header_head.prev_block_h, e ); p2p::Error::Internal })?; while txhashset_head.height > txhashset_height { txhashset_head = self .chain .get_previous_header(&txhashset_head) .map_err(|e| { error!( "chain error during getting a previous block header {}: {:?}", txhashset_head.hash(), e ); p2p::Error::Internal })?; } let bhash = txhashset_head.hash(); debug!( "state_sync: before txhashset request, header head: {} / {}, txhashset_head: {} / {}", header_head.height, header_head.last_block_h, txhashset_head.height, bhash ); if let Err(e) = peer.send_txhashset_request(txhashset_head.height, bhash) { error!("state_sync: send_txhashset_request err! {:?}", e); return Err(e); } return Ok(peer.clone()); } Err(p2p::Error::PeerException) } // For now this is a one-time thing (it can be slow) at initial startup. fn state_sync_due(&mut self) -> (bool, bool) { let now = Utc::now(); let mut download_timeout = false; match self.prev_state_sync { None => { self.prev_state_sync = Some(now); (true, download_timeout) } Some(prev) => { if now - prev > Duration::minutes(10) { download_timeout = true; } (false, download_timeout) } } } fn state_sync_reset(&mut self) { self.prev_state_sync = None; self.state_sync_peer = None; } }
28.436441
100
0.664283
64fa00de5f3e2d911411e3aa0994899c45e814ef
16,038
//! [`std::process::Command`][Command] customized for testing. //! //! [Command]: std::process::Command use std::ffi; use std::io; use std::io::{Read, Write}; use std::path; use std::process; use crate::assert::Assert; use crate::assert::OutputAssertExt; use crate::output::DebugBuffer; use crate::output::DebugBytes; use crate::output::OutputError; use crate::output::OutputOkExt; use crate::output::OutputResult; /// [`std::process::Command`][Command] customized for testing. /// /// [Command]: std::process::Command #[derive(Debug)] pub struct Command { cmd: process::Command, stdin: Option<Vec<u8>>, timeout: Option<std::time::Duration>, } impl Command { /// Constructs a new `Command` from a `std` `Command`. pub fn from_std(cmd: process::Command) -> Self { Self { cmd, stdin: None, timeout: None, } } /// Create a `Command` to run a specific binary of the current crate. /// /// See the [`cargo` module documentation][crate::cargo] for caveats and workarounds. /// /// # Examples /// /// ```rust,no_run /// use assert_cmd::Command; /// /// let mut cmd = Command::cargo_bin(env!("CARGO_PKG_NAME")) /// .unwrap(); /// let output = cmd.unwrap(); /// println!("{:?}", output); /// ``` /// /// ```rust,no_run /// use assert_cmd::Command; /// /// let mut cmd = Command::cargo_bin("bin_fixture") /// .unwrap(); /// let output = cmd.unwrap(); /// println!("{:?}", output); /// ``` /// pub fn cargo_bin<S: AsRef<str>>(name: S) -> Result<Self, crate::cargo::CargoError> { let cmd = crate::cargo::cargo_bin_cmd(name)?; Ok(Self::from_std(cmd)) } /// Write `buffer` to `stdin` when the `Command` is run. /// /// # Examples /// /// ```rust /// use assert_cmd::Command; /// /// let mut cmd = Command::new("cat") /// .arg("-et") /// .write_stdin("42") /// .assert() /// .stdout("42"); /// ``` pub fn write_stdin<S>(&mut self, buffer: S) -> &mut Self where S: Into<Vec<u8>>, { self.stdin = Some(buffer.into()); self } /// Error out if a timeout is reached /// /// ```rust,no_run /// use assert_cmd::Command; /// /// let assert = Command::cargo_bin("bin_fixture") /// .unwrap() /// .timeout(std::time::Duration::from_secs(1)) /// .env("sleep", "100") /// .assert(); /// assert.failure(); /// ``` pub fn timeout(&mut self, timeout: std::time::Duration) -> &mut Self { self.timeout = Some(timeout); self } /// Write `path`s content to `stdin` when the `Command` is run. /// /// Paths are relative to the [`env::current_dir`][env_current_dir] and not /// [`Command::current_dir`][Command_current_dir]. /// /// [env_current_dir]: std::env::current_dir() /// [Command_current_dir]: std::process::Command::current_dir() pub fn pipe_stdin<P>(&mut self, file: P) -> io::Result<&mut Self> where P: AsRef<path::Path>, { let buffer = std::fs::read(file)?; Ok(self.write_stdin(buffer)) } /// Run a `Command`, returning an [`OutputResult`][OutputResult]. /// /// # Examples /// /// ```rust /// use assert_cmd::Command; /// /// let result = Command::new("echo") /// .args(&["42"]) /// .ok(); /// assert!(result.is_ok()); /// ``` /// pub fn ok(&mut self) -> OutputResult { OutputOkExt::ok(self) } /// Run a `Command`, unwrapping the [`OutputResult`][OutputResult]. /// /// # Examples /// /// ```rust /// use assert_cmd::Command; /// /// let output = Command::new("echo") /// .args(&["42"]) /// .unwrap(); /// ``` /// pub fn unwrap(&mut self) -> process::Output { OutputOkExt::unwrap(self) } /// Run a `Command`, unwrapping the error in the [`OutputResult`][OutputResult]. /// /// # Examples /// /// ```rust,no_run /// use assert_cmd::Command; /// /// let err = Command::new("a-command") /// .args(&["--will-fail"]) /// .unwrap_err(); /// ``` /// /// [Output]: std::process::Output pub fn unwrap_err(&mut self) -> OutputError { OutputOkExt::unwrap_err(self) } /// Run a `Command` and make assertions on the [`Output`]. /// /// # Examples /// /// ```rust,no_run /// use assert_cmd::Command; /// /// let mut cmd = Command::cargo_bin("bin_fixture") /// .unwrap() /// .assert() /// .success(); /// ``` /// /// [`Output`]: std::process::Output pub fn assert(&mut self) -> Assert { OutputAssertExt::assert(self) } } /// Mirror [`std::process::Command`][Command]'s API /// /// [Command]: std::process::Command impl Command { /// Constructs a new `Command` for launching the program at /// path `program`, with the following default configuration: /// /// * No arguments to the program /// * Inherit the current process's environment /// * Inherit the current process's working directory /// * Inherit stdin/stdout/stderr for `spawn` or `status`, but create pipes for `output` /// /// Builder methods are provided to change these defaults and /// otherwise configure the process. /// /// If `program` is not an absolute path, the `PATH` will be searched in /// an OS-defined way. /// /// The search path to be used may be controlled by setting the /// `PATH` environment variable on the Command, /// but this has some implementation limitations on Windows /// (see issue #37519). /// /// # Examples /// /// Basic usage: /// /// ```no_run /// use assert_cmd::Command; /// /// Command::new("sh").unwrap(); /// ``` pub fn new<S: AsRef<ffi::OsStr>>(program: S) -> Self { let cmd = process::Command::new(program); Self::from_std(cmd) } /// Adds an argument to pass to the program. /// /// Only one argument can be passed per use. So instead of: /// /// ```no_run /// # assert_cmd::Command::new("sh") /// .arg("-C /path/to/repo") /// # ; /// ``` /// /// usage would be: /// /// ```no_run /// # assert_cmd::Command::new("sh") /// .arg("-C") /// .arg("/path/to/repo") /// # ; /// ``` /// /// To pass multiple arguments see [`args`]. /// /// [`args`]: Command::args() /// /// # Examples /// /// Basic usage: /// /// ```no_run /// use assert_cmd::Command; /// /// Command::new("ls") /// .arg("-l") /// .arg("-a") /// .unwrap(); /// ``` pub fn arg<S: AsRef<ffi::OsStr>>(&mut self, arg: S) -> &mut Self { self.cmd.arg(arg); self } /// Adds multiple arguments to pass to the program. /// /// To pass a single argument see [`arg`]. /// /// [`arg`]: Command::arg() /// /// # Examples /// /// Basic usage: /// /// ```no_run /// use assert_cmd::Command; /// /// Command::new("ls") /// .args(&["-l", "-a"]) /// .unwrap(); /// ``` pub fn args<I, S>(&mut self, args: I) -> &mut Self where I: IntoIterator<Item = S>, S: AsRef<ffi::OsStr>, { self.cmd.args(args); self } /// Inserts or updates an environment variable mapping. /// /// Note that environment variable names are case-insensitive (but case-preserving) on Windows, /// and case-sensitive on all other platforms. /// /// # Examples /// /// Basic usage: /// /// ```no_run /// use assert_cmd::Command; /// /// Command::new("ls") /// .env("PATH", "/bin") /// .unwrap_err(); /// ``` pub fn env<K, V>(&mut self, key: K, val: V) -> &mut Self where K: AsRef<ffi::OsStr>, V: AsRef<ffi::OsStr>, { self.cmd.env(key, val); self } /// Adds or updates multiple environment variable mappings. /// /// # Examples /// /// Basic usage: /// /// ```no_run /// use assert_cmd::Command; /// use std::process::Stdio; /// use std::env; /// use std::collections::HashMap; /// /// let filtered_env : HashMap<String, String> = /// env::vars().filter(|&(ref k, _)| /// k == "TERM" || k == "TZ" || k == "LANG" || k == "PATH" /// ).collect(); /// /// Command::new("printenv") /// .env_clear() /// .envs(&filtered_env) /// .unwrap(); /// ``` pub fn envs<I, K, V>(&mut self, vars: I) -> &mut Self where I: IntoIterator<Item = (K, V)>, K: AsRef<ffi::OsStr>, V: AsRef<ffi::OsStr>, { self.cmd.envs(vars); self } /// Removes an environment variable mapping. /// /// # Examples /// /// Basic usage: /// /// ```no_run /// use assert_cmd::Command; /// /// Command::new("ls") /// .env_remove("PATH") /// .unwrap_err(); /// ``` pub fn env_remove<K: AsRef<ffi::OsStr>>(&mut self, key: K) -> &mut Self { self.cmd.env_remove(key); self } /// Clears the entire environment map for the child process. /// /// # Examples /// /// Basic usage: /// /// ```no_run /// use assert_cmd::Command; /// /// Command::new("ls") /// .env_clear() /// .unwrap_err(); /// ``` pub fn env_clear(&mut self) -> &mut Self { self.cmd.env_clear(); self } /// Sets the working directory for the child process. /// /// # Platform-specific behavior /// /// If the program path is relative (e.g., `"./script.sh"`), it's ambiguous /// whether it should be interpreted relative to the parent's working /// directory or relative to `current_dir`. The behavior in this case is /// platform specific and unstable, and it's recommended to use /// [`canonicalize`] to get an absolute program path instead. /// /// # Examples /// /// Basic usage: /// /// ```no_run /// use assert_cmd::Command; /// /// Command::new("ls") /// .current_dir("/bin") /// .unwrap(); /// ``` /// /// [`canonicalize`]: std::fs::canonicalize() pub fn current_dir<P: AsRef<path::Path>>(&mut self, dir: P) -> &mut Self { self.cmd.current_dir(dir); self } /// Executes the `Command` as a child process, waiting for it to finish and collecting all of its /// output. /// /// By default, stdout and stderr are captured (and used to provide the resulting output). /// Stdin is not inherited from the parent and any attempt by the child process to read from /// the stdin stream will result in the stream immediately closing. /// /// # Examples /// /// ```should_panic /// use assert_cmd::Command; /// use std::io::{self, Write}; /// let output = Command::new("/bin/cat") /// .arg("file.txt") /// .output() /// .expect("failed to execute process"); /// /// println!("status: {}", output.status); /// io::stdout().write_all(&output.stdout).unwrap(); /// io::stderr().write_all(&output.stderr).unwrap(); /// /// assert!(output.status.success()); /// ``` pub fn output(&mut self) -> io::Result<process::Output> { let spawn = self.spawn()?; Self::wait_with_input_output(spawn, self.stdin.clone(), self.timeout) } /// If `input`, write it to `child`'s stdin while also reading `child`'s /// stdout and stderr, then wait on `child` and return its status and output. /// /// This was lifted from `std::process::Child::wait_with_output` and modified /// to also write to stdin. fn wait_with_input_output( mut child: process::Child, input: Option<Vec<u8>>, timeout: Option<std::time::Duration>, ) -> io::Result<process::Output> { let stdin = input.and_then(|i| { child .stdin .take() .map(|mut stdin| std::thread::spawn(move || stdin.write_all(&i))) }); fn read<R>(mut input: R) -> std::thread::JoinHandle<io::Result<Vec<u8>>> where R: Read + Send + 'static, { std::thread::spawn(move || { let mut ret = Vec::new(); input.read_to_end(&mut ret).map(|_| ret) }) } let stdout = child.stdout.take().map(read); let stderr = child.stderr.take().map(read); // Finish writing stdin before waiting, because waiting drops stdin. stdin.and_then(|t| t.join().unwrap().ok()); let status = if let Some(timeout) = timeout { wait_timeout::ChildExt::wait_timeout(&mut child, timeout) .transpose() .unwrap_or_else(|| { let _ = child.kill(); child.wait() }) } else { child.wait() }?; let stdout = stdout .and_then(|t| t.join().unwrap().ok()) .unwrap_or_default(); let stderr = stderr .and_then(|t| t.join().unwrap().ok()) .unwrap_or_default(); Ok(process::Output { status, stdout, stderr, }) } fn spawn(&mut self) -> io::Result<process::Child> { // stdout/stderr should only be piped for `output` according to `process::Command::new`. self.cmd.stdin(process::Stdio::piped()); self.cmd.stdout(process::Stdio::piped()); self.cmd.stderr(process::Stdio::piped()); self.cmd.spawn() } } impl From<process::Command> for Command { fn from(cmd: process::Command) -> Self { Command::from_std(cmd) } } impl<'c> OutputOkExt for &'c mut Command { fn ok(self) -> OutputResult { let output = self.output().map_err(OutputError::with_cause)?; if output.status.success() { Ok(output) } else { let error = OutputError::new(output).set_cmd(format!("{:?}", self.cmd)); let error = if let Some(stdin) = self.stdin.as_ref() { error.set_stdin(stdin.clone()) } else { error }; Err(error) } } fn unwrap_err(self) -> OutputError { match self.ok() { Ok(output) => { if let Some(stdin) = self.stdin.as_ref() { panic!( "Completed successfully:\ncommand=`{:?}`\nstdin=```{}```\nstdout=```{}```", self.cmd, DebugBytes::new(stdin), DebugBytes::new(&output.stdout) ) } else { panic!( "Completed successfully:\ncommand=`{:?}`\nstdout=```{}```", self.cmd, DebugBytes::new(&output.stdout) ) } } Err(err) => err, } } } impl<'c> OutputAssertExt for &'c mut Command { fn assert(self) -> Assert { let output = match self.output() { Ok(output) => output, Err(err) => { panic!("Failed to spawn {:?}: {}", self, err); } }; let assert = Assert::new(output).append_context("command", format!("{:?}", self.cmd)); if let Some(stdin) = self.stdin.as_ref() { assert.append_context("stdin", DebugBuffer::new(stdin.clone())) } else { assert } } }
28.235915
101
0.499439
5d1a4da04fa0a014c10233800565bbbd66d72e21
8,083
//---------------------------------------------------------------------------// // Copyright (c) 2017-2020 Ismael Gutiérrez González. All rights reserved. // // This file is part of the Rusted PackFile Manager (RPFM) project, // which can be found here: https://github.com/Frodo45127/rpfm. // // This file is licensed under the MIT license, which can be found here: // https://github.com/Frodo45127/rpfm/blob/master/LICENSE. //---------------------------------------------------------------------------// /*! Module with all the code related to the localisation of rpfm_ui. This module contains all the code needed to initialize/localize the entire UI, or at least the strings on this program (the ones from the rpfm_lib/error are another story. !*/ use qt_core::QString; use cpp_core::CppBox; use fluent_bundle::{FluentResource, FluentBundle}; use unic_langid::{langid, LanguageIdentifier, subtags::Language}; use std::fs::File; use std::io::Read; use std::path::Path; use std::sync::{Arc, RwLock, RwLockReadGuard}; use std::str::FromStr; use rpfm_error::{Error, ErrorKind, Result}; use rpfm_lib::common::get_files_from_subdir; use crate::ASSETS_PATH; use crate::LOCALE; use crate::LOCALE_FALLBACK; /// Name of the folder containing all the schemas. const LOCALE_FOLDER: &str = "locale"; /// Replace sequence used to insert data into the translations. const REPLACE_SEQUENCE: &str = "{}"; /// Include by default the english localisation, to avoid problems with idiots deleting files. const FALLBACK_LOCALE: &str = include_str!("../../../locale/English_en.ftl"); /// This struct contains a localisation use in RPFM. #[derive(Clone)] pub struct Locale(Arc<RwLock<FluentBundle<FluentResource>>>); /// Wacky fix for the "You cannot put a pointer in a static" problem. unsafe impl Sync for Locale {} /// Implementation of `Locale`. impl Locale { /// This function initializes the localisation for the provided language, if exists. pub fn initialize(file_name: &str) -> Result<Self> { // Get the list of available translations from the locale folder, and load the requested one, if found. let lang_info = file_name.split('_').collect::<Vec<&str>>(); if lang_info.len() == 2 { let lang_id = lang_info[1]; let locales = Self::get_available_locales()?; let selected_locale = locales.iter().map(|x| x.1.clone()).find(|x| x.language == lang_id).ok_or_else(|| Error::from(ErrorKind::FluentResourceLoadingError))?; let locale = format!("{}/{}/{}.ftl", ASSETS_PATH.to_string_lossy(), LOCALE_FOLDER, file_name); // If found, load the entire file to a string. let mut file = File::open(&locale)?; let mut ftl_string = String::new(); file.read_to_string(&mut ftl_string)?; // Then to a resource and a bundle. let resource = FluentResource::try_new(ftl_string)?; let mut bundle = FluentBundle::new([selected_locale].to_vec()); bundle.add_resource(resource)?; // If nothing failed, return the new translation. Ok(Self(Arc::new(RwLock::new(bundle)))) } else { Err(ErrorKind::InvalidLocalisationFileName(file_name.to_string()).into()) } } /// This function initializes the fallback localisation included in the binary. pub fn initialize_fallback() -> Result<Self> { let resource = FluentResource::try_new(FALLBACK_LOCALE.to_owned())?; let mut bundle = FluentBundle::new(vec![langid!["en"]]); bundle.add_resource(resource)?; Ok(Self(Arc::new(RwLock::new(bundle)))) } /// This function initializes an empty localisation, just in case some idiot deletes the english translation and fails to load it. pub fn initialize_empty() -> Self { let resource = FluentResource::try_new(String::new()).unwrap(); let mut bundle = FluentBundle::new(vec![langid!["en"]]); bundle.add_resource(resource).unwrap(); Self(Arc::new(RwLock::new(bundle))) } /// This function returns a list of all the languages we have translation files for in the `("English", "en")` form. pub fn get_available_locales() -> Result<Vec<(String, LanguageIdentifier)>> { let mut languages = vec![]; for file in get_files_from_subdir(&ASSETS_PATH.to_path_buf().join(Path::new("locale")), false)? { let language = file.file_stem().unwrap().to_string_lossy().to_string(); let lang_info = language.split('_').collect::<Vec<&str>>(); if lang_info.len() == 2 { let lang_id = Language::from_str(lang_info[1]).unwrap(); let language_id = LanguageIdentifier::from_parts(lang_id, None, None, &[]); languages.push((lang_info[0].to_owned(), language_id)); } } Ok(languages) } /// This function returns the translation for the key provided in the current language. /// /// If the key doesn't exists, it returns the equivalent from the english localisation. If it fails to find it there too, returns a warning. fn tr(key: &str) -> String { let mut _errors = vec![]; match LOCALE.get().get_message(key) { Some(message) => match message.value() { Some(pattern) => LOCALE.get().format_pattern(pattern, None, &mut _errors).to_string(), None => Self::tr_fallback(key), }, None => Self::tr_fallback(key), } } /// This function returns the translation for the key provided in the english language, or a... warning. fn tr_fallback(key: &str) -> String { let mut _errors = vec![]; match LOCALE_FALLBACK.get().get_message(key) { Some(message) => match message.value() { Some(pattern) => LOCALE_FALLBACK.get().format_pattern(pattern, None, &mut _errors).to_string(), None => "AlL YoUrS TrAnSlAtIoNs ArE BeLoNg To mE.".to_owned(), }, None => "AlL YoUrS TrAnSlAtIoNs ArE BeLoNg To mE.".to_owned(), } } /// This function returns a read-only guard to the provided `Locale`. pub fn get(&self) -> RwLockReadGuard<FluentBundle<FluentResource>> { self.0.read().unwrap() } } /// This function returns the translation as a `String` for the key provided in the current language. /// /// If the key doesn't exists, it returns the equivalent from the english localisation. If it fails to find it there too, returns a warning. pub fn tr(key: &str) -> String { Locale::tr(key) } /// This function returns the translation as a `String` for the key provided in the current language, /// replacing certain parts of the translation with the replacements provided. /// /// If the key doesn't exists, it returns the equivalent from the english localisation. If it fails to find it there too, returns a warning. #[allow(unused)] pub fn tre(key: &str, replacements: &[&str]) -> String { let mut translation = Locale::tr(key); replacements.iter().for_each(|x| translation = translation.replacen(REPLACE_SEQUENCE, x, 1)); translation } /// This function returns the translation as a `QString` for the key provided in the current language. /// /// If the key doesn't exists, it returns the equivalent from the english localisation. If it fails to find it there too, returns a warning. pub fn qtr(key: &str) -> CppBox<QString> { QString::from_std_str(Locale::tr(key)) } /// This function returns the translation as a `QString` for the key provided in the current language, /// replacing certain parts of the translation with the replacements provided. /// /// If the key doesn't exists, it returns the equivalent from the english localisation. If it fails to find it there too, returns a warning. pub fn qtre(key: &str, replacements: &[&str]) -> CppBox<QString> { let mut translation = Locale::tr(key); replacements.iter().for_each(|x| translation = translation.replacen(REPLACE_SEQUENCE, x, 1)); QString::from_std_str(translation) }
43.929348
169
0.653099
9bfbde7f4cbcde975bfc87cb35b8a1bca37e38fb
2,750
use std::{collections::HashMap, fs}; pub fn main() { let test = fs::read_to_string("data/day18test.txt").unwrap(); let input = fs::read_to_string("data/day18.txt").unwrap(); let test1 = solve(test.clone(), 4, false); assert_eq!(test1, 4); println!("Day 18: Test 1: input has {} lights after {} iterations", test1, 4); let part1 = solve(input.clone(), 100, false); assert_eq!(part1, 768); println!("Day 18: Part 1: input has {} lights after {} iterations", part1, 100); let test2 = solve(test, 5, true); assert_eq!(test2, 17); println!("Day 18: Test 2: input has {} lights after {} iterations", test2, 5); let part2 = solve(input, 100, true); assert_eq!(part2, 781); println!("Day 18: Part 2: input has {} lights after {} iterations", part2, 100); } fn solve(input: String, iterations: u32, part2: bool) -> u32 { let mut old_state = HashMap::new(); let size = input.lines().count() as i32; for (x, line) in input.lines().enumerate() { for (y, cell) in line.chars().enumerate() { if part2 && (x == 0 || x == (size as usize) - 1) && (y == 0 || y == (size as usize) - 1) { old_state.insert((x as i32, y as i32), 1); continue } match cell { '#' => old_state.insert((x as i32, y as i32), 1), '.' => old_state.insert((x as i32, y as i32), 0), _ => panic!("Invalid state detected"), }; } } for _ in 0..iterations { let mut new_state = HashMap::new(); for x in 0..size { for y in 0..size { if part2 && (x == 0 || x == size - 1) && (y == 0 || y == size - 1) { new_state.insert((x, y), 1); continue } let current = old_state.get(&(x, y)).unwrap(); let neighbors = old_state.get(&(x - 1, y - 1)).unwrap_or(&0) + old_state.get(&(x, y - 1)).unwrap_or(&0) + old_state.get(&(x + 1, y - 1)).unwrap_or(&0) + old_state.get(&(x - 1, y)).unwrap_or(&0) + old_state.get(&(x + 1, y)).unwrap_or(&0) + old_state.get(&(x - 1, y + 1)).unwrap_or(&0) + old_state.get(&(x, y + 1)).unwrap_or(&0) + old_state.get(&(x + 1, y + 1)).unwrap_or(&0); if neighbors == 3 || (neighbors == 2 && *current == 1) { new_state.insert((x, y), 1); } else { new_state.insert((x, y), 0); } } } old_state = new_state; } old_state.values().sum() }
38.194444
84
0.467636
29cae4e800bd9049d369afe7b73bc8e4b1060bd1
2,480
use crate::data_source::asset::AssetDataSource; use crate::proofs::AssetProof; use crate::secp256k1::Point; pub struct AssetProofBuilder<'a> { ds: &'a mut AssetDataSource, g: Point, h: Point, } impl<'a> AssetProofBuilder<'a> { pub fn new(ds: &'a mut AssetDataSource) -> AssetProofBuilder { AssetProofBuilder { ds, g: crate::g(), h: crate::h(), } } pub fn build(&mut self) { loop { match self.ds.next() { None => break, Some(asset) => { let proof = AssetProof::create(asset.0, &asset.1, asset.2, &self.g, &self.h); self.ds.put_proof(proof).expect("put works"); } } } } } #[cfg(test)] mod tests { use super::*; use crate::data_source::asset::AssetData; use crate::fields::Field256; use crate::secp256k1::{point_mul, Point}; use num_bigint::BigUint; #[test] fn asset_proof_builder_builds_all_assets() { let asset_count = 2; let assets = gen_assets(asset_count); let mut asset_ds = MemoryAssetDataSource::new(assets); let mut builder = AssetProofBuilder::new(&mut asset_ds); builder.build(); assert_eq!(asset_ds.assets.len(), 0); assert_eq!(asset_ds.proofs.len(), asset_count); } fn gen_assets(num: usize) -> Vec<AssetData> { (0..num) .into_iter() .map(|_| { let x = Field256::from(1); let y = point_mul(Point::g(), &x); let bal = BigUint::from(10u8); (Some(x), y, bal) }) .collect() } struct MemoryAssetDataSource { assets: Vec<AssetData>, proofs: Vec<AssetProof>, } impl MemoryAssetDataSource { fn new(assets: Vec<AssetData>) -> MemoryAssetDataSource { MemoryAssetDataSource { assets, proofs: vec![], } } } impl AssetDataSource for MemoryAssetDataSource { fn next(&mut self) -> Option<AssetData> { if self.assets.len() > 0 { let asset = self.assets.remove(0); Some(asset) } else { None } } fn put_proof(&mut self, proof: AssetProof) -> Result<(), &str> { self.proofs.push(proof); Ok(()) } } }
25.56701
97
0.504839
1e7864323a5143a3ceab2b76005e2a9ac765f68e
276
pub trait Assign { type Item: Clone; fn assign(&mut self, e: Self::Item, len: usize); } impl<T: Clone> Assign for Vec<T> { type Item = T; fn assign(&mut self, e: T, len: usize) { self.clear(); self.extend((0..len).map(|_| e.clone())); } }
21.230769
52
0.539855
67de917f7b5297fb3f119297b700af8ccc38a56d
19,538
// This file is part of the uutils coreutils package. // // (c) Jordi Boggiano <[email protected]> // (c) Evgeniy Klyuchikov <[email protected]> // (c) Joshua S. Miller <[email protected]> // (c) Árni Dagur <[email protected]> // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) nonprint nonblank nonprinting #[cfg(unix)] extern crate unix_socket; // last synced with: cat (GNU coreutils) 8.13 use clap::{crate_version, Arg, Command}; use std::fs::{metadata, File}; use std::io::{self, Read, Write}; use thiserror::Error; use uucore::display::Quotable; use uucore::error::UResult; use uucore::fs::FileInformation; #[cfg(unix)] use std::os::unix::io::AsRawFd; /// Linux splice support #[cfg(any(target_os = "linux", target_os = "android"))] mod splice; /// Unix domain socket support #[cfg(unix)] use std::net::Shutdown; #[cfg(unix)] use std::os::unix::fs::FileTypeExt; #[cfg(unix)] use unix_socket::UnixStream; use uucore::{format_usage, InvalidEncodingHandling}; static NAME: &str = "cat"; static USAGE: &str = "{} [OPTION]... [FILE]..."; static SUMMARY: &str = "Concatenate FILE(s), or standard input, to standard output With no FILE, or when FILE is -, read standard input."; #[derive(Error, Debug)] enum CatError { /// Wrapper around `io::Error` #[error("{0}")] Io(#[from] io::Error), /// Wrapper around `nix::Error` #[cfg(any(target_os = "linux", target_os = "android"))] #[error("{0}")] Nix(#[from] nix::Error), /// Unknown file type; it's not a regular file, socket, etc. #[error("unknown filetype: {}", ft_debug)] UnknownFiletype { /// A debug print of the file type ft_debug: String, }, #[error("Is a directory")] IsDirectory, #[error("input file is output file")] OutputIsInput, } type CatResult<T> = Result<T, CatError>; #[derive(PartialEq)] enum NumberingMode { None, NonEmpty, All, } struct OutputOptions { /// Line numbering mode number: NumberingMode, /// Suppress repeated empty output lines squeeze_blank: bool, /// display TAB characters as `tab` show_tabs: bool, /// Show end of lines show_ends: bool, /// use ^ and M- notation, except for LF (\\n) and TAB (\\t) show_nonprint: bool, } impl OutputOptions { fn tab(&self) -> &'static str { if self.show_tabs { "^I" } else { "\t" } } fn end_of_line(&self) -> &'static str { if self.show_ends { "$\n" } else { "\n" } } /// We can write fast if we can simply copy the contents of the file to /// stdout, without augmenting the output with e.g. line numbers. fn can_write_fast(&self) -> bool { !(self.show_tabs || self.show_nonprint || self.show_ends || self.squeeze_blank || self.number != NumberingMode::None) } } /// State that persists between output of each file. This struct is only used /// when we can't write fast. struct OutputState { /// The current line number line_number: usize, /// Whether the output cursor is at the beginning of a new line at_line_start: bool, /// Whether we skipped a \r, which still needs to be printed skipped_carriage_return: bool, /// Whether we have already printed a blank line one_blank_kept: bool, } #[cfg(unix)] trait FdReadable: Read + AsRawFd {} #[cfg(not(unix))] trait FdReadable: Read {} #[cfg(unix)] impl<T> FdReadable for T where T: Read + AsRawFd {} #[cfg(not(unix))] impl<T> FdReadable for T where T: Read {} /// Represents an open file handle, stream, or other device struct InputHandle<R: FdReadable> { reader: R, is_interactive: bool, } /// Concrete enum of recognized file types. /// /// *Note*: `cat`-ing a directory should result in an /// CatError::IsDirectory enum InputType { Directory, File, StdIn, SymLink, #[cfg(unix)] BlockDevice, #[cfg(unix)] CharacterDevice, #[cfg(unix)] Fifo, #[cfg(unix)] Socket, } mod options { pub static FILE: &str = "file"; pub static SHOW_ALL: &str = "show-all"; pub static NUMBER_NONBLANK: &str = "number-nonblank"; pub static SHOW_NONPRINTING_ENDS: &str = "e"; pub static SHOW_ENDS: &str = "show-ends"; pub static NUMBER: &str = "number"; pub static SQUEEZE_BLANK: &str = "squeeze-blank"; pub static SHOW_NONPRINTING_TABS: &str = "t"; pub static SHOW_TABS: &str = "show-tabs"; pub static SHOW_NONPRINTING: &str = "show-nonprinting"; } #[uucore::main] pub fn uumain(args: impl uucore::Args) -> UResult<()> { let args = args .collect_str(InvalidEncodingHandling::Ignore) .accept_any(); let matches = uu_app().try_get_matches_from(args)?; let number_mode = if matches.is_present(options::NUMBER_NONBLANK) { NumberingMode::NonEmpty } else if matches.is_present(options::NUMBER) { NumberingMode::All } else { NumberingMode::None }; let show_nonprint = vec![ options::SHOW_ALL.to_owned(), options::SHOW_NONPRINTING_ENDS.to_owned(), options::SHOW_NONPRINTING_TABS.to_owned(), options::SHOW_NONPRINTING.to_owned(), ] .iter() .any(|v| matches.is_present(v)); let show_ends = vec![ options::SHOW_ENDS.to_owned(), options::SHOW_ALL.to_owned(), options::SHOW_NONPRINTING_ENDS.to_owned(), ] .iter() .any(|v| matches.is_present(v)); let show_tabs = vec![ options::SHOW_ALL.to_owned(), options::SHOW_TABS.to_owned(), options::SHOW_NONPRINTING_TABS.to_owned(), ] .iter() .any(|v| matches.is_present(v)); let squeeze_blank = matches.is_present(options::SQUEEZE_BLANK); let files: Vec<String> = match matches.values_of(options::FILE) { Some(v) => v.clone().map(|v| v.to_owned()).collect(), None => vec!["-".to_owned()], }; let options = OutputOptions { show_ends, number: number_mode, show_nonprint, show_tabs, squeeze_blank, }; cat_files(&files, &options) } pub fn uu_app<'a>() -> Command<'a> { Command::new(uucore::util_name()) .name(NAME) .version(crate_version!()) .override_usage(format_usage(USAGE)) .about(SUMMARY) .infer_long_args(true) .arg( Arg::new(options::FILE) .hide(true) .multiple_occurrences(true), ) .arg( Arg::new(options::SHOW_ALL) .short('A') .long(options::SHOW_ALL) .help("equivalent to -vET"), ) .arg( Arg::new(options::NUMBER_NONBLANK) .short('b') .long(options::NUMBER_NONBLANK) .help("number nonempty output lines, overrides -n") .overrides_with(options::NUMBER), ) .arg( Arg::new(options::SHOW_NONPRINTING_ENDS) .short('e') .help("equivalent to -vE"), ) .arg( Arg::new(options::SHOW_ENDS) .short('E') .long(options::SHOW_ENDS) .help("display $ at end of each line"), ) .arg( Arg::new(options::NUMBER) .short('n') .long(options::NUMBER) .help("number all output lines"), ) .arg( Arg::new(options::SQUEEZE_BLANK) .short('s') .long(options::SQUEEZE_BLANK) .help("suppress repeated empty output lines"), ) .arg( Arg::new(options::SHOW_NONPRINTING_TABS) .short('t') .long(options::SHOW_NONPRINTING_TABS) .help("equivalent to -vT"), ) .arg( Arg::new(options::SHOW_TABS) .short('T') .long(options::SHOW_TABS) .help("display TAB characters at ^I"), ) .arg( Arg::new(options::SHOW_NONPRINTING) .short('v') .long(options::SHOW_NONPRINTING) .help("use ^ and M- notation, except for LF (\\n) and TAB (\\t)"), ) } fn cat_handle<R: FdReadable>( handle: &mut InputHandle<R>, options: &OutputOptions, state: &mut OutputState, ) -> CatResult<()> { if options.can_write_fast() { write_fast(handle) } else { write_lines(handle, options, state) } } fn cat_path( path: &str, options: &OutputOptions, state: &mut OutputState, out_info: Option<&FileInformation>, ) -> CatResult<()> { match get_input_type(path)? { InputType::StdIn => { let stdin = io::stdin(); let mut handle = InputHandle { reader: stdin, is_interactive: atty::is(atty::Stream::Stdin), }; cat_handle(&mut handle, options, state) } InputType::Directory => Err(CatError::IsDirectory), #[cfg(unix)] InputType::Socket => { let socket = UnixStream::connect(path)?; socket.shutdown(Shutdown::Write)?; let mut handle = InputHandle { reader: socket, is_interactive: false, }; cat_handle(&mut handle, options, state) } _ => { let file = File::open(path)?; if let Some(out_info) = out_info { if out_info.file_size() != 0 && FileInformation::from_file(&file).as_ref() == Some(out_info) { return Err(CatError::OutputIsInput); } } let mut handle = InputHandle { reader: file, is_interactive: false, }; cat_handle(&mut handle, options, state) } } } fn cat_files(files: &[String], options: &OutputOptions) -> UResult<()> { let out_info = FileInformation::from_file(&std::io::stdout()); let mut state = OutputState { line_number: 1, at_line_start: true, skipped_carriage_return: false, one_blank_kept: false, }; let mut error_messages: Vec<String> = Vec::new(); for path in files { if let Err(err) = cat_path(path, options, &mut state, out_info.as_ref()) { error_messages.push(format!("{}: {}", path.maybe_quote(), err)); } } if state.skipped_carriage_return { print!("\r"); } if error_messages.is_empty() { Ok(()) } else { // each next line is expected to display "cat: …" let line_joiner = format!("\n{}: ", uucore::util_name()); Err(uucore::error::USimpleError::new( error_messages.len() as i32, error_messages.join(&line_joiner), )) } } /// Classifies the `InputType` of file at `path` if possible /// /// # Arguments /// /// * `path` - Path on a file system to classify metadata fn get_input_type(path: &str) -> CatResult<InputType> { if path == "-" { return Ok(InputType::StdIn); } let ft = metadata(path)?.file_type(); match ft { #[cfg(unix)] ft if ft.is_block_device() => Ok(InputType::BlockDevice), #[cfg(unix)] ft if ft.is_char_device() => Ok(InputType::CharacterDevice), #[cfg(unix)] ft if ft.is_fifo() => Ok(InputType::Fifo), #[cfg(unix)] ft if ft.is_socket() => Ok(InputType::Socket), ft if ft.is_dir() => Ok(InputType::Directory), ft if ft.is_file() => Ok(InputType::File), ft if ft.is_symlink() => Ok(InputType::SymLink), _ => Err(CatError::UnknownFiletype { ft_debug: format!("{:?}", ft), }), } } /// Writes handle to stdout with no configuration. This allows a /// simple memory copy. fn write_fast<R: FdReadable>(handle: &mut InputHandle<R>) -> CatResult<()> { let stdout = io::stdout(); let mut stdout_lock = stdout.lock(); #[cfg(any(target_os = "linux", target_os = "android"))] { // If we're on Linux or Android, try to use the splice() system call // for faster writing. If it works, we're done. if !splice::write_fast_using_splice(handle, &stdout_lock)? { return Ok(()); } } // If we're not on Linux or Android, or the splice() call failed, // fall back on slower writing. let mut buf = [0; 1024 * 64]; while let Ok(n) = handle.reader.read(&mut buf) { if n == 0 { break; } stdout_lock.write_all(&buf[..n])?; } Ok(()) } /// Outputs file contents to stdout in a line-by-line fashion, /// propagating any errors that might occur. fn write_lines<R: FdReadable>( handle: &mut InputHandle<R>, options: &OutputOptions, state: &mut OutputState, ) -> CatResult<()> { let mut in_buf = [0; 1024 * 31]; let stdout = io::stdout(); let mut writer = stdout.lock(); while let Ok(n) = handle.reader.read(&mut in_buf) { if n == 0 { break; } let in_buf = &in_buf[..n]; let mut pos = 0; while pos < n { // skip empty line_number enumerating them if needed if in_buf[pos] == b'\n' { // \r followed by \n is printed as ^M when show_ends is enabled, so that \r\n prints as ^M$ if state.skipped_carriage_return && options.show_ends { writer.write_all(b"^M")?; state.skipped_carriage_return = false; } if !state.at_line_start || !options.squeeze_blank || !state.one_blank_kept { state.one_blank_kept = true; if state.at_line_start && options.number == NumberingMode::All { write!(writer, "{0:6}\t", state.line_number)?; state.line_number += 1; } writer.write_all(options.end_of_line().as_bytes())?; if handle.is_interactive { writer.flush()?; } } state.at_line_start = true; pos += 1; continue; } if state.skipped_carriage_return { writer.write_all(b"\r")?; state.skipped_carriage_return = false; state.at_line_start = false; } state.one_blank_kept = false; if state.at_line_start && options.number != NumberingMode::None { write!(writer, "{0:6}\t", state.line_number)?; state.line_number += 1; } // print to end of line or end of buffer let offset = if options.show_nonprint { write_nonprint_to_end(&in_buf[pos..], &mut writer, options.tab().as_bytes()) } else if options.show_tabs { write_tab_to_end(&in_buf[pos..], &mut writer) } else { write_to_end(&in_buf[pos..], &mut writer) }; // end of buffer? if offset + pos == in_buf.len() { state.at_line_start = false; break; } if in_buf[pos + offset] == b'\r' { state.skipped_carriage_return = true; } else { assert_eq!(in_buf[pos + offset], b'\n'); // print suitable end of line writer.write_all(options.end_of_line().as_bytes())?; if handle.is_interactive { writer.flush()?; } state.at_line_start = true; } pos += offset + 1; } } Ok(()) } // write***_to_end methods // Write all symbols till \n or \r or end of buffer is reached // We need to stop at \r because it may be written as ^M depending on the byte after and settings; // however, write_nonprint_to_end doesn't need to stop at \r because it will always write \r as ^M. // Return the number of written symbols fn write_to_end<W: Write>(in_buf: &[u8], writer: &mut W) -> usize { match in_buf.iter().position(|c| *c == b'\n' || *c == b'\r') { Some(p) => { writer.write_all(&in_buf[..p]).unwrap(); p } None => { writer.write_all(in_buf).unwrap(); in_buf.len() } } } fn write_tab_to_end<W: Write>(mut in_buf: &[u8], writer: &mut W) -> usize { let mut count = 0; loop { match in_buf .iter() .position(|c| *c == b'\n' || *c == b'\t' || *c == b'\r') { Some(p) => { writer.write_all(&in_buf[..p]).unwrap(); if in_buf[p] == b'\t' { writer.write_all(b"^I").unwrap(); in_buf = &in_buf[p + 1..]; count += p + 1; } else { // b'\n' or b'\r' return count + p; } } None => { writer.write_all(in_buf).unwrap(); return in_buf.len(); } }; } } fn write_nonprint_to_end<W: Write>(in_buf: &[u8], writer: &mut W, tab: &[u8]) -> usize { let mut count = 0; for byte in in_buf.iter().copied() { if byte == b'\n' { break; } match byte { 9 => writer.write_all(tab), 0..=8 | 10..=31 => writer.write_all(&[b'^', byte + 64]), 32..=126 => writer.write_all(&[byte]), 127 => writer.write_all(&[b'^', b'?']), 128..=159 => writer.write_all(&[b'M', b'-', b'^', byte - 64]), 160..=254 => writer.write_all(&[b'M', b'-', byte - 128]), _ => writer.write_all(&[b'M', b'-', b'^', b'?']), } .unwrap(); count += 1; } count } #[cfg(test)] mod tests { use std::io::{stdout, BufWriter}; #[test] fn test_write_nonprint_to_end_new_line() { let mut writer = BufWriter::with_capacity(1024 * 64, stdout()); let in_buf = b"\n"; let tab = b""; super::write_nonprint_to_end(in_buf, &mut writer, tab); assert_eq!(writer.buffer().len(), 0); } #[test] fn test_write_nonprint_to_end_9() { let mut writer = BufWriter::with_capacity(1024 * 64, stdout()); let in_buf = &[9u8]; let tab = b"tab"; super::write_nonprint_to_end(in_buf, &mut writer, tab); assert_eq!(writer.buffer(), tab); } #[test] fn test_write_nonprint_to_end_0_to_8() { for byte in 0u8..=8u8 { let mut writer = BufWriter::with_capacity(1024 * 64, stdout()); let in_buf = &[byte]; let tab = b""; super::write_nonprint_to_end(in_buf, &mut writer, tab); assert_eq!(writer.buffer(), [b'^', byte + 64]); } } #[test] fn test_write_nonprint_to_end_10_to_31() { for byte in 11u8..=31u8 { let mut writer = BufWriter::with_capacity(1024 * 64, stdout()); let in_buf = &[byte]; let tab = b""; super::write_nonprint_to_end(in_buf, &mut writer, tab); assert_eq!(writer.buffer(), [b'^', byte + 64]); } } }
30.244582
107
0.53767
0a699126424736809606555a24b9b01cf10f53bb
19,468
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::TIMCTL0 { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get() } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = "Possible values of the field `TIMOD`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TIMODR { #[doc = "Timer Disabled."] _0, #[doc = "Dual 8-bit counters baud/bit mode."] _1, #[doc = "Dual 8-bit counters PWM mode."] _10, #[doc = "Single 16-bit counter mode."] _11, } impl TIMODR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { match *self { TIMODR::_0 => 0, TIMODR::_1 => 1, TIMODR::_10 => 2, TIMODR::_11 => 3, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: u8) -> TIMODR { match value { 0 => TIMODR::_0, 1 => TIMODR::_1, 2 => TIMODR::_10, 3 => TIMODR::_11, _ => unreachable!(), } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == TIMODR::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == TIMODR::_1 } #[doc = "Checks if the value of the field is `_10`"] #[inline] pub fn is_10(&self) -> bool { *self == TIMODR::_10 } #[doc = "Checks if the value of the field is `_11`"] #[inline] pub fn is_11(&self) -> bool { *self == TIMODR::_11 } } #[doc = "Possible values of the field `PINPOL`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PINPOLR { #[doc = "Pin is active high"] _0, #[doc = "Pin is active low"] _1, } impl PINPOLR { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { PINPOLR::_0 => false, PINPOLR::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> PINPOLR { match value { false => PINPOLR::_0, true => PINPOLR::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == PINPOLR::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == PINPOLR::_1 } } #[doc = r" Value of the field"] pub struct PINSELR { bits: u8, } impl PINSELR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = "Possible values of the field `PINCFG`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PINCFGR { #[doc = "Timer pin output disabled"] _0, #[doc = "Timer pin open drain or bidirectional output enable"] _1, #[doc = "Timer pin bidirectional output data"] _10, #[doc = "Timer pin output"] _11, } impl PINCFGR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { match *self { PINCFGR::_0 => 0, PINCFGR::_1 => 1, PINCFGR::_10 => 2, PINCFGR::_11 => 3, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: u8) -> PINCFGR { match value { 0 => PINCFGR::_0, 1 => PINCFGR::_1, 2 => PINCFGR::_10, 3 => PINCFGR::_11, _ => unreachable!(), } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == PINCFGR::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == PINCFGR::_1 } #[doc = "Checks if the value of the field is `_10`"] #[inline] pub fn is_10(&self) -> bool { *self == PINCFGR::_10 } #[doc = "Checks if the value of the field is `_11`"] #[inline] pub fn is_11(&self) -> bool { *self == PINCFGR::_11 } } #[doc = "Possible values of the field `TRGSRC`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TRGSRCR { #[doc = "External trigger selected"] _0, #[doc = "Internal trigger selected"] _1, } impl TRGSRCR { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { TRGSRCR::_0 => false, TRGSRCR::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> TRGSRCR { match value { false => TRGSRCR::_0, true => TRGSRCR::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == TRGSRCR::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == TRGSRCR::_1 } } #[doc = "Possible values of the field `TRGPOL`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TRGPOLR { #[doc = "Trigger active high"] _0, #[doc = "Trigger active low"] _1, } impl TRGPOLR { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { TRGPOLR::_0 => false, TRGPOLR::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> TRGPOLR { match value { false => TRGPOLR::_0, true => TRGPOLR::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == TRGPOLR::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == TRGPOLR::_1 } } #[doc = r" Value of the field"] pub struct TRGSELR { bits: u8, } impl TRGSELR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = "Values that can be written to the field `TIMOD`"] pub enum TIMODW { #[doc = "Timer Disabled."] _0, #[doc = "Dual 8-bit counters baud/bit mode."] _1, #[doc = "Dual 8-bit counters PWM mode."] _10, #[doc = "Single 16-bit counter mode."] _11, } impl TIMODW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> u8 { match *self { TIMODW::_0 => 0, TIMODW::_1 => 1, TIMODW::_10 => 2, TIMODW::_11 => 3, } } } #[doc = r" Proxy"] pub struct _TIMODW<'a> { w: &'a mut W, } impl<'a> _TIMODW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: TIMODW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Timer Disabled."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(TIMODW::_0) } #[doc = "Dual 8-bit counters baud/bit mode."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(TIMODW::_1) } #[doc = "Dual 8-bit counters PWM mode."] #[inline] pub fn _10(self) -> &'a mut W { self.variant(TIMODW::_10) } #[doc = "Single 16-bit counter mode."] #[inline] pub fn _11(self) -> &'a mut W { self.variant(TIMODW::_11) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 3; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `PINPOL`"] pub enum PINPOLW { #[doc = "Pin is active high"] _0, #[doc = "Pin is active low"] _1, } impl PINPOLW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { PINPOLW::_0 => false, PINPOLW::_1 => true, } } } #[doc = r" Proxy"] pub struct _PINPOLW<'a> { w: &'a mut W, } impl<'a> _PINPOLW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: PINPOLW) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "Pin is active high"] #[inline] pub fn _0(self) -> &'a mut W { self.variant(PINPOLW::_0) } #[doc = "Pin is active low"] #[inline] pub fn _1(self) -> &'a mut W { self.variant(PINPOLW::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 7; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _PINSELW<'a> { w: &'a mut W, } impl<'a> _PINSELW<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 7; const OFFSET: u8 = 8; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `PINCFG`"] pub enum PINCFGW { #[doc = "Timer pin output disabled"] _0, #[doc = "Timer pin open drain or bidirectional output enable"] _1, #[doc = "Timer pin bidirectional output data"] _10, #[doc = "Timer pin output"] _11, } impl PINCFGW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> u8 { match *self { PINCFGW::_0 => 0, PINCFGW::_1 => 1, PINCFGW::_10 => 2, PINCFGW::_11 => 3, } } } #[doc = r" Proxy"] pub struct _PINCFGW<'a> { w: &'a mut W, } impl<'a> _PINCFGW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: PINCFGW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Timer pin output disabled"] #[inline] pub fn _0(self) -> &'a mut W { self.variant(PINCFGW::_0) } #[doc = "Timer pin open drain or bidirectional output enable"] #[inline] pub fn _1(self) -> &'a mut W { self.variant(PINCFGW::_1) } #[doc = "Timer pin bidirectional output data"] #[inline] pub fn _10(self) -> &'a mut W { self.variant(PINCFGW::_10) } #[doc = "Timer pin output"] #[inline] pub fn _11(self) -> &'a mut W { self.variant(PINCFGW::_11) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 3; const OFFSET: u8 = 16; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `TRGSRC`"] pub enum TRGSRCW { #[doc = "External trigger selected"] _0, #[doc = "Internal trigger selected"] _1, } impl TRGSRCW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { TRGSRCW::_0 => false, TRGSRCW::_1 => true, } } } #[doc = r" Proxy"] pub struct _TRGSRCW<'a> { w: &'a mut W, } impl<'a> _TRGSRCW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: TRGSRCW) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "External trigger selected"] #[inline] pub fn _0(self) -> &'a mut W { self.variant(TRGSRCW::_0) } #[doc = "Internal trigger selected"] #[inline] pub fn _1(self) -> &'a mut W { self.variant(TRGSRCW::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 22; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `TRGPOL`"] pub enum TRGPOLW { #[doc = "Trigger active high"] _0, #[doc = "Trigger active low"] _1, } impl TRGPOLW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { TRGPOLW::_0 => false, TRGPOLW::_1 => true, } } } #[doc = r" Proxy"] pub struct _TRGPOLW<'a> { w: &'a mut W, } impl<'a> _TRGPOLW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: TRGPOLW) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "Trigger active high"] #[inline] pub fn _0(self) -> &'a mut W { self.variant(TRGPOLW::_0) } #[doc = "Trigger active low"] #[inline] pub fn _1(self) -> &'a mut W { self.variant(TRGPOLW::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 23; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _TRGSELW<'a> { w: &'a mut W, } impl<'a> _TRGSELW<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 15; const OFFSET: u8 = 24; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bits 0:1 - Timer Mode"] #[inline] pub fn timod(&self) -> TIMODR { TIMODR::_from({ const MASK: u8 = 3; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 7 - Timer Pin Polarity"] #[inline] pub fn pinpol(&self) -> PINPOLR { PINPOLR::_from({ const MASK: bool = true; const OFFSET: u8 = 7; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bits 8:10 - Timer Pin Select"] #[inline] pub fn pinsel(&self) -> PINSELR { let bits = { const MASK: u8 = 7; const OFFSET: u8 = 8; ((self.bits >> OFFSET) & MASK as u32) as u8 }; PINSELR { bits } } #[doc = "Bits 16:17 - Timer Pin Configuration"] #[inline] pub fn pincfg(&self) -> PINCFGR { PINCFGR::_from({ const MASK: u8 = 3; const OFFSET: u8 = 16; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 22 - Trigger Source"] #[inline] pub fn trgsrc(&self) -> TRGSRCR { TRGSRCR::_from({ const MASK: bool = true; const OFFSET: u8 = 22; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 23 - Trigger Polarity"] #[inline] pub fn trgpol(&self) -> TRGPOLR { TRGPOLR::_from({ const MASK: bool = true; const OFFSET: u8 = 23; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bits 24:27 - Trigger Select"] #[inline] pub fn trgsel(&self) -> TRGSELR { let bits = { const MASK: u8 = 15; const OFFSET: u8 = 24; ((self.bits >> OFFSET) & MASK as u32) as u8 }; TRGSELR { bits } } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bits 0:1 - Timer Mode"] #[inline] pub fn timod(&mut self) -> _TIMODW { _TIMODW { w: self } } #[doc = "Bit 7 - Timer Pin Polarity"] #[inline] pub fn pinpol(&mut self) -> _PINPOLW { _PINPOLW { w: self } } #[doc = "Bits 8:10 - Timer Pin Select"] #[inline] pub fn pinsel(&mut self) -> _PINSELW { _PINSELW { w: self } } #[doc = "Bits 16:17 - Timer Pin Configuration"] #[inline] pub fn pincfg(&mut self) -> _PINCFGW { _PINCFGW { w: self } } #[doc = "Bit 22 - Trigger Source"] #[inline] pub fn trgsrc(&mut self) -> _TRGSRCW { _TRGSRCW { w: self } } #[doc = "Bit 23 - Trigger Polarity"] #[inline] pub fn trgpol(&mut self) -> _TRGPOLW { _TRGPOLW { w: self } } #[doc = "Bits 24:27 - Trigger Select"] #[inline] pub fn trgsel(&mut self) -> _TRGSELW { _TRGSELW { w: self } } }
25.152455
66
0.494915
6ae8f82d75380dd1e48e15bab197af63c7639580
6,533
#[doc = "Register `PULSE3` reader"] pub struct R(crate::R<PULSE3_SPEC>); impl core::ops::Deref for R { type Target = crate::R<PULSE3_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::convert::From<crate::R<PULSE3_SPEC>> for R { fn from(reader: crate::R<PULSE3_SPEC>) -> Self { R(reader) } } #[doc = "Register `PULSE3` writer"] pub struct W(crate::W<PULSE3_SPEC>); impl core::ops::Deref for W { type Target = crate::W<PULSE3_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl core::convert::From<crate::W<PULSE3_SPEC>> for W { fn from(writer: crate::W<PULSE3_SPEC>) -> Self { W(writer) } } #[doc = "Field `NWE_PULSE` reader - NWE Pulse Length"] pub struct NWE_PULSE_R(crate::FieldReader<u8, u8>); impl NWE_PULSE_R { pub(crate) fn new(bits: u8) -> Self { NWE_PULSE_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for NWE_PULSE_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `NWE_PULSE` writer - NWE Pulse Length"] pub struct NWE_PULSE_W<'a> { w: &'a mut W, } impl<'a> NWE_PULSE_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0x7f) | (value as u32 & 0x7f); self.w } } #[doc = "Field `NCS_WR_PULSE` reader - NCS Pulse Length in WRITE Access"] pub struct NCS_WR_PULSE_R(crate::FieldReader<u8, u8>); impl NCS_WR_PULSE_R { pub(crate) fn new(bits: u8) -> Self { NCS_WR_PULSE_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for NCS_WR_PULSE_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `NCS_WR_PULSE` writer - NCS Pulse Length in WRITE Access"] pub struct NCS_WR_PULSE_W<'a> { w: &'a mut W, } impl<'a> NCS_WR_PULSE_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x7f << 8)) | ((value as u32 & 0x7f) << 8); self.w } } #[doc = "Field `NRD_PULSE` reader - NRD Pulse Length"] pub struct NRD_PULSE_R(crate::FieldReader<u8, u8>); impl NRD_PULSE_R { pub(crate) fn new(bits: u8) -> Self { NRD_PULSE_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for NRD_PULSE_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `NRD_PULSE` writer - NRD Pulse Length"] pub struct NRD_PULSE_W<'a> { w: &'a mut W, } impl<'a> NRD_PULSE_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x7f << 16)) | ((value as u32 & 0x7f) << 16); self.w } } #[doc = "Field `NCS_RD_PULSE` reader - NCS Pulse Length in READ Access"] pub struct NCS_RD_PULSE_R(crate::FieldReader<u8, u8>); impl NCS_RD_PULSE_R { pub(crate) fn new(bits: u8) -> Self { NCS_RD_PULSE_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for NCS_RD_PULSE_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `NCS_RD_PULSE` writer - NCS Pulse Length in READ Access"] pub struct NCS_RD_PULSE_W<'a> { w: &'a mut W, } impl<'a> NCS_RD_PULSE_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x7f << 24)) | ((value as u32 & 0x7f) << 24); self.w } } impl R { #[doc = "Bits 0:6 - NWE Pulse Length"] #[inline(always)] pub fn nwe_pulse(&self) -> NWE_PULSE_R { NWE_PULSE_R::new((self.bits & 0x7f) as u8) } #[doc = "Bits 8:14 - NCS Pulse Length in WRITE Access"] #[inline(always)] pub fn ncs_wr_pulse(&self) -> NCS_WR_PULSE_R { NCS_WR_PULSE_R::new(((self.bits >> 8) & 0x7f) as u8) } #[doc = "Bits 16:22 - NRD Pulse Length"] #[inline(always)] pub fn nrd_pulse(&self) -> NRD_PULSE_R { NRD_PULSE_R::new(((self.bits >> 16) & 0x7f) as u8) } #[doc = "Bits 24:30 - NCS Pulse Length in READ Access"] #[inline(always)] pub fn ncs_rd_pulse(&self) -> NCS_RD_PULSE_R { NCS_RD_PULSE_R::new(((self.bits >> 24) & 0x7f) as u8) } } impl W { #[doc = "Bits 0:6 - NWE Pulse Length"] #[inline(always)] pub fn nwe_pulse(&mut self) -> NWE_PULSE_W { NWE_PULSE_W { w: self } } #[doc = "Bits 8:14 - NCS Pulse Length in WRITE Access"] #[inline(always)] pub fn ncs_wr_pulse(&mut self) -> NCS_WR_PULSE_W { NCS_WR_PULSE_W { w: self } } #[doc = "Bits 16:22 - NRD Pulse Length"] #[inline(always)] pub fn nrd_pulse(&mut self) -> NRD_PULSE_W { NRD_PULSE_W { w: self } } #[doc = "Bits 24:30 - NCS Pulse Length in READ Access"] #[inline(always)] pub fn ncs_rd_pulse(&mut self) -> NCS_RD_PULSE_W { NCS_RD_PULSE_W { w: self } } #[doc = "Writes raw bits to the register."] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "SMC Pulse Register (CS_number = 3)\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [pulse3](index.html) module"] pub struct PULSE3_SPEC; impl crate::RegisterSpec for PULSE3_SPEC { type Ux = u32; } #[doc = "`read()` method returns [pulse3::R](R) reader structure"] impl crate::Readable for PULSE3_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [pulse3::W](W) writer structure"] impl crate::Writable for PULSE3_SPEC { type Writer = W; } #[doc = "`reset()` method sets PULSE3 to value 0x0101_0101"] impl crate::Resettable for PULSE3_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0x0101_0101 } }
31.408654
421
0.599571
646dc9c762c70c46e252bc7128df90c98c0b0cf0
361
use std; use std::io::Read; pub fn in_docker() -> bool { match do_in_docker() { Ok(b) => b, Err(_) => false, } } fn do_in_docker() -> std::io::Result<bool> { let mut f = std::fs::File::open("/proc/1/cgroup")?; let mut contents = String::new(); f.read_to_string(&mut contents)?; Ok(contents.find("docker").is_some()) }
20.055556
55
0.559557
cccae2e68fd60401ebb59897e4d57aec63f8f715
4,666
use cargo::core::resolver::ResolveError; use cargo::core::{compiler::CompileMode, Workspace}; use cargo::ops::{self, CompileOptions}; use cargo::util::{config::Config, errors::ManifestError}; use crate::support::project; /// Tests inclusion of a `ManifestError` pointing to a member manifest /// when that manifest fails to deserialize. #[test] fn toml_deserialize_manifest_error() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.1.0" authors = [] [dependencies] bar = { path = "bar" } [workspace] "#, ) .file("src/main.rs", "fn main() {}") .file( "bar/Cargo.toml", r#" [project] name = "bar" version = "0.1.0" authors = [] [dependencies] foobar == "0.55" "#, ) .file("bar/src/main.rs", "fn main() {}") .build(); let root_manifest_path = p.root().join("Cargo.toml"); let member_manifest_path = p.root().join("bar").join("Cargo.toml"); let error = Workspace::new(&root_manifest_path, &Config::default().unwrap()).unwrap_err(); eprintln!("{:?}", error); let manifest_err: &ManifestError = error.downcast_ref().expect("Not a ManifestError"); assert_eq!(manifest_err.manifest_path(), &root_manifest_path); let causes: Vec<_> = manifest_err.manifest_causes().collect(); assert_eq!(causes.len(), 1, "{:?}", causes); assert_eq!(causes[0].manifest_path(), &member_manifest_path); } /// Tests inclusion of a `ManifestError` pointing to a member manifest /// when that manifest has an invalid dependency path. #[test] fn member_manifest_path_io_error() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.1.0" authors = [] [dependencies] bar = { path = "bar" } [workspace] "#, ) .file("src/main.rs", "fn main() {}") .file( "bar/Cargo.toml", r#" [project] name = "bar" version = "0.1.0" authors = [] [dependencies] foobar = { path = "nosuch" } "#, ) .file("bar/src/main.rs", "fn main() {}") .build(); let root_manifest_path = p.root().join("Cargo.toml"); let member_manifest_path = p.root().join("bar").join("Cargo.toml"); let missing_manifest_path = p.root().join("bar").join("nosuch").join("Cargo.toml"); let error = Workspace::new(&root_manifest_path, &Config::default().unwrap()).unwrap_err(); eprintln!("{:?}", error); let manifest_err: &ManifestError = error.downcast_ref().expect("Not a ManifestError"); assert_eq!(manifest_err.manifest_path(), &root_manifest_path); let causes: Vec<_> = manifest_err.manifest_causes().collect(); assert_eq!(causes.len(), 2, "{:?}", causes); assert_eq!(causes[0].manifest_path(), &member_manifest_path); assert_eq!(causes[1].manifest_path(), &missing_manifest_path); } /// Tests dependency version errors provide which package failed via a `ResolveError`. #[test] fn member_manifest_version_error() { let p = project() .file( "Cargo.toml", r#" [project] name = "foo" version = "0.1.0" authors = [] [dependencies] bar = { path = "bar" } [workspace] "#, ) .file("src/main.rs", "fn main() {}") .file( "bar/Cargo.toml", r#" [project] name = "bar" version = "0.1.0" authors = [] [dependencies] i-dont-exist = "0.55" "#, ) .file("bar/src/main.rs", "fn main() {}") .build(); let config = Config::default().unwrap(); let ws = Workspace::new(&p.root().join("Cargo.toml"), &config).unwrap(); let compile_options = CompileOptions::new(&config, CompileMode::Build).unwrap(); let member_bar = ws.members().find(|m| &*m.name() == "bar").unwrap(); let error = ops::compile(&ws, &compile_options).map(|_| ()).unwrap_err(); eprintln!("{:?}", error); let resolve_err: &ResolveError = error.downcast_ref().expect("Not a ResolveError"); let package_path = resolve_err.package_path(); assert_eq!(package_path.len(), 1, "package_path: {:?}", package_path); assert_eq!(package_path[0], member_bar.package_id()); }
30.103226
94
0.536862
61565c84fe2bb1af35b802d2f621f747f0cd727e
46,361
//! This modules takes care of rendering various definitions as completion items. use hir::{Docs, HasAttrs, HasSource, HirDisplay, ModPath, ScopeDef, StructKind, Type}; use ra_syntax::ast::NameOwner; use stdx::SepBy; use test_utils::mark; use crate::{ completion::{ completion_item::Builder, CompletionContext, CompletionItem, CompletionItemKind, CompletionKind, Completions, }, display::{const_label, macro_label, type_label, FunctionSignature}, CompletionScore, RootDatabase, }; impl Completions { pub(crate) fn add_field(&mut self, ctx: &CompletionContext, field: hir::Field, ty: &Type) { let is_deprecated = is_deprecated(field, ctx.db); let name = field.name(ctx.db); let mut completion_item = CompletionItem::new(CompletionKind::Reference, ctx.source_range(), name.to_string()) .kind(CompletionItemKind::Field) .detail(ty.display(ctx.db).to_string()) .set_documentation(field.docs(ctx.db)) .set_deprecated(is_deprecated); if let Some(score) = compute_score(ctx, &ty, &name.to_string()) { completion_item = completion_item.set_score(score); } completion_item.add_to(self); } pub(crate) fn add_tuple_field(&mut self, ctx: &CompletionContext, field: usize, ty: &Type) { CompletionItem::new(CompletionKind::Reference, ctx.source_range(), field.to_string()) .kind(CompletionItemKind::Field) .detail(ty.display(ctx.db).to_string()) .add_to(self); } pub(crate) fn add_resolution( &mut self, ctx: &CompletionContext, local_name: String, resolution: &ScopeDef, ) { use hir::ModuleDef::*; let completion_kind = match resolution { ScopeDef::ModuleDef(BuiltinType(..)) => CompletionKind::BuiltinType, _ => CompletionKind::Reference, }; let kind = match resolution { ScopeDef::ModuleDef(Module(..)) => CompletionItemKind::Module, ScopeDef::ModuleDef(Function(func)) => { return self.add_function(ctx, *func, Some(local_name)); } ScopeDef::ModuleDef(Adt(hir::Adt::Struct(_))) => CompletionItemKind::Struct, // FIXME: add CompletionItemKind::Union ScopeDef::ModuleDef(Adt(hir::Adt::Union(_))) => CompletionItemKind::Struct, ScopeDef::ModuleDef(Adt(hir::Adt::Enum(_))) => CompletionItemKind::Enum, ScopeDef::ModuleDef(EnumVariant(var)) => { return self.add_enum_variant(ctx, *var, Some(local_name)); } ScopeDef::ModuleDef(Const(..)) => CompletionItemKind::Const, ScopeDef::ModuleDef(Static(..)) => CompletionItemKind::Static, ScopeDef::ModuleDef(Trait(..)) => CompletionItemKind::Trait, ScopeDef::ModuleDef(TypeAlias(..)) => CompletionItemKind::TypeAlias, ScopeDef::ModuleDef(BuiltinType(..)) => CompletionItemKind::BuiltinType, ScopeDef::GenericParam(..) => CompletionItemKind::TypeParam, ScopeDef::Local(..) => CompletionItemKind::Binding, // (does this need its own kind?) ScopeDef::AdtSelfType(..) | ScopeDef::ImplSelfType(..) => CompletionItemKind::TypeParam, ScopeDef::MacroDef(mac) => { return self.add_macro(ctx, Some(local_name), *mac); } ScopeDef::Unknown => { return self.add(CompletionItem::new( CompletionKind::Reference, ctx.source_range(), local_name, )); } }; let docs = match resolution { ScopeDef::ModuleDef(Module(it)) => it.docs(ctx.db), ScopeDef::ModuleDef(Adt(it)) => it.docs(ctx.db), ScopeDef::ModuleDef(EnumVariant(it)) => it.docs(ctx.db), ScopeDef::ModuleDef(Const(it)) => it.docs(ctx.db), ScopeDef::ModuleDef(Static(it)) => it.docs(ctx.db), ScopeDef::ModuleDef(Trait(it)) => it.docs(ctx.db), ScopeDef::ModuleDef(TypeAlias(it)) => it.docs(ctx.db), _ => None, }; let mut completion_item = CompletionItem::new(completion_kind, ctx.source_range(), local_name.clone()); if let ScopeDef::Local(local) = resolution { let ty = local.ty(ctx.db); if !ty.is_unknown() { completion_item = completion_item.detail(ty.display(ctx.db).to_string()); } }; if let ScopeDef::Local(local) = resolution { if let Some(score) = compute_score(ctx, &local.ty(ctx.db), &local_name) { completion_item = completion_item.set_score(score); } } // Add `<>` for generic types if ctx.is_path_type && !ctx.has_type_args && ctx.config.add_call_parenthesis { if let Some(cap) = ctx.config.snippet_cap { let has_non_default_type_params = match resolution { ScopeDef::ModuleDef(Adt(it)) => it.has_non_default_type_params(ctx.db), ScopeDef::ModuleDef(TypeAlias(it)) => it.has_non_default_type_params(ctx.db), _ => false, }; if has_non_default_type_params { mark::hit!(inserts_angle_brackets_for_generics); completion_item = completion_item .lookup_by(local_name.clone()) .label(format!("{}<…>", local_name)) .insert_snippet(cap, format!("{}<$0>", local_name)); } } } completion_item.kind(kind).set_documentation(docs).add_to(self) } pub(crate) fn add_macro( &mut self, ctx: &CompletionContext, name: Option<String>, macro_: hir::MacroDef, ) { // FIXME: Currently proc-macro do not have ast-node, // such that it does not have source if macro_.is_proc_macro() { return; } let name = match name { Some(it) => it, None => return, }; let ast_node = macro_.source(ctx.db).value; let detail = macro_label(&ast_node); let docs = macro_.docs(ctx.db); let mut builder = CompletionItem::new( CompletionKind::Reference, ctx.source_range(), &format!("{}!", name), ) .kind(CompletionItemKind::Macro) .set_documentation(docs.clone()) .set_deprecated(is_deprecated(macro_, ctx.db)) .detail(detail); let needs_bang = ctx.use_item_syntax.is_none() && !ctx.is_macro_call; builder = match ctx.config.snippet_cap { Some(cap) if needs_bang => { let docs = docs.as_ref().map_or("", |s| s.as_str()); let (bra, ket) = guess_macro_braces(&name, docs); builder .insert_snippet(cap, format!("{}!{}$0{}", name, bra, ket)) .label(format!("{}!{}…{}", name, bra, ket)) } None if needs_bang => builder.insert_text(format!("{}!", name)), _ => { mark::hit!(dont_insert_macro_call_parens_unncessary); builder.insert_text(name) } }; self.add(builder); } pub(crate) fn add_function( &mut self, ctx: &CompletionContext, func: hir::Function, local_name: Option<String>, ) { let has_self_param = func.has_self_param(ctx.db); let name = local_name.unwrap_or_else(|| func.name(ctx.db).to_string()); let ast_node = func.source(ctx.db).value; let function_signature = FunctionSignature::from(&ast_node); let mut builder = CompletionItem::new(CompletionKind::Reference, ctx.source_range(), name.clone()) .kind(if has_self_param { CompletionItemKind::Method } else { CompletionItemKind::Function }) .set_documentation(func.docs(ctx.db)) .set_deprecated(is_deprecated(func, ctx.db)) .detail(function_signature.to_string()); let params = function_signature .parameter_names .iter() .skip(if function_signature.has_self_param { 1 } else { 0 }) .map(|name| name.trim_start_matches('_').into()) .collect(); builder = builder.add_call_parens(ctx, name, Params::Named(params)); self.add(builder) } pub(crate) fn add_const(&mut self, ctx: &CompletionContext, constant: hir::Const) { let ast_node = constant.source(ctx.db).value; let name = match ast_node.name() { Some(name) => name, _ => return, }; let detail = const_label(&ast_node); CompletionItem::new(CompletionKind::Reference, ctx.source_range(), name.text().to_string()) .kind(CompletionItemKind::Const) .set_documentation(constant.docs(ctx.db)) .set_deprecated(is_deprecated(constant, ctx.db)) .detail(detail) .add_to(self); } pub(crate) fn add_type_alias(&mut self, ctx: &CompletionContext, type_alias: hir::TypeAlias) { let type_def = type_alias.source(ctx.db).value; let name = match type_def.name() { Some(name) => name, _ => return, }; let detail = type_label(&type_def); CompletionItem::new(CompletionKind::Reference, ctx.source_range(), name.text().to_string()) .kind(CompletionItemKind::TypeAlias) .set_documentation(type_alias.docs(ctx.db)) .set_deprecated(is_deprecated(type_alias, ctx.db)) .detail(detail) .add_to(self); } pub(crate) fn add_qualified_enum_variant( &mut self, ctx: &CompletionContext, variant: hir::EnumVariant, path: ModPath, ) { self.add_enum_variant_impl(ctx, variant, None, Some(path)) } pub(crate) fn add_enum_variant( &mut self, ctx: &CompletionContext, variant: hir::EnumVariant, local_name: Option<String>, ) { self.add_enum_variant_impl(ctx, variant, local_name, None) } fn add_enum_variant_impl( &mut self, ctx: &CompletionContext, variant: hir::EnumVariant, local_name: Option<String>, path: Option<ModPath>, ) { let is_deprecated = is_deprecated(variant, ctx.db); let name = local_name.unwrap_or_else(|| variant.name(ctx.db).to_string()); let qualified_name = match &path { Some(it) => it.to_string(), None => name.to_string(), }; let detail_types = variant .fields(ctx.db) .into_iter() .map(|field| (field.name(ctx.db), field.signature_ty(ctx.db))); let variant_kind = variant.kind(ctx.db); let detail = match variant_kind { StructKind::Tuple | StructKind::Unit => detail_types .map(|(_, t)| t.display(ctx.db).to_string()) .sep_by(", ") .surround_with("(", ")") .to_string(), StructKind::Record => detail_types .map(|(n, t)| format!("{}: {}", n, t.display(ctx.db).to_string())) .sep_by(", ") .surround_with("{ ", " }") .to_string(), }; let mut res = CompletionItem::new( CompletionKind::Reference, ctx.source_range(), qualified_name.clone(), ) .kind(CompletionItemKind::EnumVariant) .set_documentation(variant.docs(ctx.db)) .set_deprecated(is_deprecated) .detail(detail); if path.is_some() { res = res.lookup_by(name); } if variant_kind == StructKind::Tuple { let params = Params::Anonymous(variant.fields(ctx.db).len()); res = res.add_call_parens(ctx, qualified_name, params) } res.add_to(self); } } pub(crate) fn compute_score( ctx: &CompletionContext, ty: &Type, name: &str, ) -> Option<CompletionScore> { // FIXME: this should not fall back to string equality. let ty = &ty.display(ctx.db).to_string(); let (active_name, active_type) = if let Some(record_field) = &ctx.record_field_syntax { mark::hit!(test_struct_field_completion_in_record_lit); let (struct_field, _local) = ctx.sema.resolve_record_field(record_field)?; ( struct_field.name(ctx.db).to_string(), struct_field.signature_ty(ctx.db).display(ctx.db).to_string(), ) } else if let Some(active_parameter) = &ctx.active_parameter { mark::hit!(test_struct_field_completion_in_func_call); (active_parameter.name.clone(), active_parameter.ty.clone()) } else { return None; }; // Compute score // For the same type if &active_type != ty { return None; } let mut res = CompletionScore::TypeMatch; // If same type + same name then go top position if active_name == name { res = CompletionScore::TypeAndNameMatch } Some(res) } enum Params { Named(Vec<String>), Anonymous(usize), } impl Params { fn len(&self) -> usize { match self { Params::Named(xs) => xs.len(), Params::Anonymous(len) => *len, } } fn is_empty(&self) -> bool { self.len() == 0 } } impl Builder { fn add_call_parens(mut self, ctx: &CompletionContext, name: String, params: Params) -> Builder { if !ctx.config.add_call_parenthesis { return self; } if ctx.use_item_syntax.is_some() || ctx.is_call { return self; } // Don't add parentheses if the expected type is some function reference. if let Some(ty) = &ctx.expected_type { if ty.is_fn() { return self; } } let cap = match ctx.config.snippet_cap { Some(it) => it, None => return self, }; // If not an import, add parenthesis automatically. mark::hit!(inserts_parens_for_function_calls); let (snippet, label) = if params.is_empty() { (format!("{}()$0", name), format!("{}()", name)) } else { self = self.trigger_call_info(); let snippet = match (ctx.config.add_call_argument_snippets, params) { (true, Params::Named(params)) => { let function_params_snippet = params .iter() .enumerate() .map(|(index, param_name)| format!("${{{}:{}}}", index + 1, param_name)) .sep_by(", "); format!("{}({})$0", name, function_params_snippet) } _ => format!("{}($0)", name), }; (snippet, format!("{}(…)", name)) }; self.lookup_by(name).label(label).insert_snippet(cap, snippet) } } fn is_deprecated(node: impl HasAttrs, db: &RootDatabase) -> bool { node.attrs(db).by_key("deprecated").exists() } fn guess_macro_braces(macro_name: &str, docs: &str) -> (&'static str, &'static str) { let mut votes = [0, 0, 0]; for (idx, s) in docs.match_indices(&macro_name) { let (before, after) = (&docs[..idx], &docs[idx + s.len()..]); // Ensure to match the full word if after.starts_with('!') && !before.ends_with(|c: char| c == '_' || c.is_ascii_alphanumeric()) { // It may have spaces before the braces like `foo! {}` match after[1..].chars().find(|&c| !c.is_whitespace()) { Some('{') => votes[0] += 1, Some('[') => votes[1] += 1, Some('(') => votes[2] += 1, _ => {} } } } // Insert a space before `{}`. // We prefer the last one when some votes equal. let (_vote, (bra, ket)) = votes .iter() .zip(&[(" {", "}"), ("[", "]"), ("(", ")")]) .max_by_key(|&(&vote, _)| vote) .unwrap(); (*bra, *ket) } #[cfg(test)] mod tests { use insta::assert_debug_snapshot; use test_utils::mark; use crate::completion::{ test_utils::{do_completion, do_completion_with_options}, CompletionConfig, CompletionItem, CompletionKind, }; fn do_reference_completion(ra_fixture: &str) -> Vec<CompletionItem> { do_completion(ra_fixture, CompletionKind::Reference) } fn do_reference_completion_with_options( ra_fixture: &str, options: CompletionConfig, ) -> Vec<CompletionItem> { do_completion_with_options(ra_fixture, CompletionKind::Reference, &options) } #[test] fn enum_detail_includes_names_for_record() { assert_debug_snapshot!( do_reference_completion( r#" enum Foo { Foo {x: i32, y: i32} } fn main() { Foo::Fo<|> } "#, ), @r###" [ CompletionItem { label: "Foo", source_range: 121..123, delete: 121..123, insert: "Foo", kind: EnumVariant, detail: "{ x: i32, y: i32 }", }, ]"### ); } #[test] fn enum_detail_doesnt_include_names_for_tuple() { assert_debug_snapshot!( do_reference_completion( r#" enum Foo { Foo (i32, i32) } fn main() { Foo::Fo<|> } "#, ), @r###" [ CompletionItem { label: "Foo(…)", source_range: 115..117, delete: 115..117, insert: "Foo($0)", kind: EnumVariant, lookup: "Foo", detail: "(i32, i32)", trigger_call_info: true, }, ]"### ); } #[test] fn enum_detail_just_parentheses_for_unit() { assert_debug_snapshot!( do_reference_completion( r#" enum Foo { Foo } fn main() { Foo::Fo<|> } "#, ), @r###" [ CompletionItem { label: "Foo", source_range: 104..106, delete: 104..106, insert: "Foo", kind: EnumVariant, detail: "()", }, ]"### ); } #[test] fn sets_deprecated_flag_in_completion_items() { assert_debug_snapshot!( do_reference_completion( r#" #[deprecated] fn something_deprecated() {} #[deprecated(since = "1.0.0")] fn something_else_deprecated() {} fn main() { som<|> } "#, ), @r###" [ CompletionItem { label: "main()", source_range: 203..206, delete: 203..206, insert: "main()$0", kind: Function, lookup: "main", detail: "fn main()", }, CompletionItem { label: "something_deprecated()", source_range: 203..206, delete: 203..206, insert: "something_deprecated()$0", kind: Function, lookup: "something_deprecated", detail: "fn something_deprecated()", deprecated: true, }, CompletionItem { label: "something_else_deprecated()", source_range: 203..206, delete: 203..206, insert: "something_else_deprecated()$0", kind: Function, lookup: "something_else_deprecated", detail: "fn something_else_deprecated()", deprecated: true, }, ] "### ); } #[test] fn inserts_parens_for_function_calls() { mark::check!(inserts_parens_for_function_calls); assert_debug_snapshot!( do_reference_completion( r" fn no_args() {} fn main() { no_<|> } " ), @r###" [ CompletionItem { label: "main()", source_range: 61..64, delete: 61..64, insert: "main()$0", kind: Function, lookup: "main", detail: "fn main()", }, CompletionItem { label: "no_args()", source_range: 61..64, delete: 61..64, insert: "no_args()$0", kind: Function, lookup: "no_args", detail: "fn no_args()", }, ] "### ); assert_debug_snapshot!( do_reference_completion( r" fn with_args(x: i32, y: String) {} fn main() { with_<|> } " ), @r###" [ CompletionItem { label: "main()", source_range: 80..85, delete: 80..85, insert: "main()$0", kind: Function, lookup: "main", detail: "fn main()", }, CompletionItem { label: "with_args(…)", source_range: 80..85, delete: 80..85, insert: "with_args(${1:x}, ${2:y})$0", kind: Function, lookup: "with_args", detail: "fn with_args(x: i32, y: String)", trigger_call_info: true, }, ] "### ); assert_debug_snapshot!( do_reference_completion( r" fn with_ignored_args(_foo: i32, ___bar: bool, ho_ge_: String) {} fn main() { with_<|> } " ), @r###" [ CompletionItem { label: "main()", source_range: 110..115, delete: 110..115, insert: "main()$0", kind: Function, lookup: "main", detail: "fn main()", }, CompletionItem { label: "with_ignored_args(…)", source_range: 110..115, delete: 110..115, insert: "with_ignored_args(${1:foo}, ${2:bar}, ${3:ho_ge_})$0", kind: Function, lookup: "with_ignored_args", detail: "fn with_ignored_args(_foo: i32, ___bar: bool, ho_ge_: String)", trigger_call_info: true, }, ] "### ); assert_debug_snapshot!( do_reference_completion( r" struct S {} impl S { fn foo(&self) {} } fn bar(s: &S) { s.f<|> } " ), @r###" [ CompletionItem { label: "foo()", source_range: 163..164, delete: 163..164, insert: "foo()$0", kind: Method, lookup: "foo", detail: "fn foo(&self)", }, ] "### ); assert_debug_snapshot!( do_reference_completion( r" struct S {} impl S { fn foo_ignored_args(&self, _a: bool, b: i32) {} } fn bar(s: &S) { s.f<|> } " ), @r###" [ CompletionItem { label: "foo_ignored_args(…)", source_range: 194..195, delete: 194..195, insert: "foo_ignored_args(${1:a}, ${2:b})$0", kind: Method, lookup: "foo_ignored_args", detail: "fn foo_ignored_args(&self, _a: bool, b: i32)", trigger_call_info: true, }, ] "### ); } #[test] fn inserts_parens_for_tuple_enums() { assert_debug_snapshot!( do_reference_completion( r" enum Option<T> { Some(T), None } use Option::*; fn main() -> Option<i32> { Som<|> } " ), @r###" [ CompletionItem { label: "None", source_range: 144..147, delete: 144..147, insert: "None", kind: EnumVariant, detail: "()", }, CompletionItem { label: "Option", source_range: 144..147, delete: 144..147, insert: "Option", kind: Enum, }, CompletionItem { label: "Some(…)", source_range: 144..147, delete: 144..147, insert: "Some($0)", kind: EnumVariant, lookup: "Some", detail: "(T)", trigger_call_info: true, }, CompletionItem { label: "main()", source_range: 144..147, delete: 144..147, insert: "main()$0", kind: Function, lookup: "main", detail: "fn main() -> Option<i32>", }, ] "### ); assert_debug_snapshot!( do_reference_completion( r" enum Option<T> { Some(T), None } use Option::*; fn main(value: Option<i32>) { match value { Som<|> } } " ), @r###" [ CompletionItem { label: "None", source_range: 185..188, delete: 185..188, insert: "None", kind: EnumVariant, detail: "()", }, CompletionItem { label: "Option", source_range: 185..188, delete: 185..188, insert: "Option", kind: Enum, }, CompletionItem { label: "Some(…)", source_range: 185..188, delete: 185..188, insert: "Some($0)", kind: EnumVariant, lookup: "Some", detail: "(T)", trigger_call_info: true, }, ] "### ); } #[test] fn no_call_parens_if_fn_ptr_needed() { assert_debug_snapshot!( do_reference_completion( r" fn somefn(with: u8, a: u8, lot: u8, of: u8, args: u8) {} struct ManualVtable { method: fn(u8, u8, u8, u8, u8), } fn main() -> ManualVtable { ManualVtable { method: some<|> } } " ), @r###" [ CompletionItem { label: "ManualVtable", source_range: 295..299, delete: 295..299, insert: "ManualVtable", kind: Struct, }, CompletionItem { label: "main", source_range: 295..299, delete: 295..299, insert: "main", kind: Function, detail: "fn main() -> ManualVtable", }, CompletionItem { label: "somefn", source_range: 295..299, delete: 295..299, insert: "somefn", kind: Function, detail: "fn somefn(with: u8, a: u8, lot: u8, of: u8, args: u8)", }, ] "### ); } #[test] fn arg_snippets_for_method_call() { assert_debug_snapshot!( do_reference_completion( r" struct S {} impl S { fn foo(&self, x: i32) {} } fn bar(s: &S) { s.f<|> } " ), @r###" [ CompletionItem { label: "foo(…)", source_range: 171..172, delete: 171..172, insert: "foo(${1:x})$0", kind: Method, lookup: "foo", detail: "fn foo(&self, x: i32)", trigger_call_info: true, }, ] "### ) } #[test] fn no_arg_snippets_for_method_call() { assert_debug_snapshot!( do_reference_completion_with_options( r" struct S {} impl S { fn foo(&self, x: i32) {} } fn bar(s: &S) { s.f<|> } ", CompletionConfig { add_call_argument_snippets: false, .. Default::default() } ), @r###" [ CompletionItem { label: "foo(…)", source_range: 171..172, delete: 171..172, insert: "foo($0)", kind: Method, lookup: "foo", detail: "fn foo(&self, x: i32)", trigger_call_info: true, }, ] "### ) } #[test] fn dont_render_function_parens_in_use_item() { assert_debug_snapshot!( do_reference_completion( " //- /lib.rs mod m { pub fn foo() {} } use crate::m::f<|>; " ), @r###" [ CompletionItem { label: "foo", source_range: 40..41, delete: 40..41, insert: "foo", kind: Function, detail: "pub fn foo()", }, ] "### ); } #[test] fn dont_render_function_parens_if_already_call() { assert_debug_snapshot!( do_reference_completion( " //- /lib.rs fn frobnicate() {} fn main() { frob<|>(); } " ), @r###" [ CompletionItem { label: "frobnicate", source_range: 35..39, delete: 35..39, insert: "frobnicate", kind: Function, detail: "fn frobnicate()", }, CompletionItem { label: "main", source_range: 35..39, delete: 35..39, insert: "main", kind: Function, detail: "fn main()", }, ] "### ); assert_debug_snapshot!( do_reference_completion( " //- /lib.rs struct Foo {} impl Foo { fn new() -> Foo {} } fn main() { Foo::ne<|>(); } " ), @r###" [ CompletionItem { label: "new", source_range: 67..69, delete: 67..69, insert: "new", kind: Function, detail: "fn new() -> Foo", }, ] "### ); } #[test] fn inserts_angle_brackets_for_generics() { mark::check!(inserts_angle_brackets_for_generics); assert_debug_snapshot!( do_reference_completion( r" struct Vec<T> {} fn foo(xs: Ve<|>) " ), @r###" [ CompletionItem { label: "Vec<…>", source_range: 61..63, delete: 61..63, insert: "Vec<$0>", kind: Struct, lookup: "Vec", }, CompletionItem { label: "foo(…)", source_range: 61..63, delete: 61..63, insert: "foo(${1:xs})$0", kind: Function, lookup: "foo", detail: "fn foo(xs: Ve)", trigger_call_info: true, }, ] "### ); assert_debug_snapshot!( do_reference_completion( r" type Vec<T> = (T,); fn foo(xs: Ve<|>) " ), @r###" [ CompletionItem { label: "Vec<…>", source_range: 64..66, delete: 64..66, insert: "Vec<$0>", kind: TypeAlias, lookup: "Vec", }, CompletionItem { label: "foo(…)", source_range: 64..66, delete: 64..66, insert: "foo(${1:xs})$0", kind: Function, lookup: "foo", detail: "fn foo(xs: Ve)", trigger_call_info: true, }, ] "### ); assert_debug_snapshot!( do_reference_completion( r" struct Vec<T = i128> {} fn foo(xs: Ve<|>) " ), @r###" [ CompletionItem { label: "Vec", source_range: 68..70, delete: 68..70, insert: "Vec", kind: Struct, }, CompletionItem { label: "foo(…)", source_range: 68..70, delete: 68..70, insert: "foo(${1:xs})$0", kind: Function, lookup: "foo", detail: "fn foo(xs: Ve)", trigger_call_info: true, }, ] "### ); assert_debug_snapshot!( do_reference_completion( r" struct Vec<T> {} fn foo(xs: Ve<|><i128>) " ), @r###" [ CompletionItem { label: "Vec", source_range: 61..63, delete: 61..63, insert: "Vec", kind: Struct, }, CompletionItem { label: "foo(…)", source_range: 61..63, delete: 61..63, insert: "foo(${1:xs})$0", kind: Function, lookup: "foo", detail: "fn foo(xs: Ve<i128>)", trigger_call_info: true, }, ] "### ); } #[test] fn dont_insert_macro_call_parens_unncessary() { mark::check!(dont_insert_macro_call_parens_unncessary); assert_debug_snapshot!( do_reference_completion( r" //- /main.rs use foo::<|>; //- /foo/lib.rs #[macro_export] macro_rules frobnicate { () => () } " ), @r###" [ CompletionItem { label: "frobnicate!", source_range: 9..9, delete: 9..9, insert: "frobnicate", kind: Macro, detail: "#[macro_export]\nmacro_rules! frobnicate", }, ] "### ); assert_debug_snapshot!( do_reference_completion( r" //- /main.rs macro_rules frobnicate { () => () } fn main() { frob<|>!(); } " ), @r###" [ CompletionItem { label: "frobnicate!", source_range: 56..60, delete: 56..60, insert: "frobnicate", kind: Macro, detail: "macro_rules! frobnicate", }, CompletionItem { label: "main()", source_range: 56..60, delete: 56..60, insert: "main()$0", kind: Function, lookup: "main", detail: "fn main()", }, ] "### ); } #[test] fn test_struct_field_completion_in_func_call() { mark::check!(test_struct_field_completion_in_func_call); assert_debug_snapshot!( do_reference_completion( r" struct A { another_field: i64, the_field: u32, my_string: String } fn test(my_param: u32) -> u32 { my_param } fn foo(a: A) { test(a.<|>) } ", ), @r###" [ CompletionItem { label: "another_field", source_range: 201..201, delete: 201..201, insert: "another_field", kind: Field, detail: "i64", }, CompletionItem { label: "my_string", source_range: 201..201, delete: 201..201, insert: "my_string", kind: Field, detail: "{unknown}", }, CompletionItem { label: "the_field", source_range: 201..201, delete: 201..201, insert: "the_field", kind: Field, detail: "u32", score: TypeMatch, }, ] "### ); } #[test] fn test_struct_field_completion_in_func_call_with_type_and_name() { assert_debug_snapshot!( do_reference_completion( r" struct A { another_field: i64, another_good_type: u32, the_field: u32 } fn test(the_field: u32) -> u32 { the_field } fn foo(a: A) { test(a.<|>) } ", ), @r###" [ CompletionItem { label: "another_field", source_range: 208..208, delete: 208..208, insert: "another_field", kind: Field, detail: "i64", }, CompletionItem { label: "another_good_type", source_range: 208..208, delete: 208..208, insert: "another_good_type", kind: Field, detail: "u32", score: TypeMatch, }, CompletionItem { label: "the_field", source_range: 208..208, delete: 208..208, insert: "the_field", kind: Field, detail: "u32", score: TypeAndNameMatch, }, ] "### ); } #[test] fn test_struct_field_completion_in_record_lit() { mark::check!(test_struct_field_completion_in_record_lit); assert_debug_snapshot!( do_reference_completion( r" struct A { another_field: i64, another_good_type: u32, the_field: u32 } struct B { my_string: String, my_vec: Vec<u32>, the_field: u32 } fn foo(a: A) { let b = B { the_field: a.<|> }; } ", ), @r###" [ CompletionItem { label: "another_field", source_range: 270..270, delete: 270..270, insert: "another_field", kind: Field, detail: "i64", }, CompletionItem { label: "another_good_type", source_range: 270..270, delete: 270..270, insert: "another_good_type", kind: Field, detail: "u32", score: TypeMatch, }, CompletionItem { label: "the_field", source_range: 270..270, delete: 270..270, insert: "the_field", kind: Field, detail: "u32", score: TypeAndNameMatch, }, ] "### ); } #[test] fn test_struct_field_completion_in_record_lit_and_fn_call() { assert_debug_snapshot!( do_reference_completion( r" struct A { another_field: i64, another_good_type: u32, the_field: u32 } struct B { my_string: String, my_vec: Vec<u32>, the_field: u32 } fn test(the_field: i64) -> i64 { the_field } fn foo(a: A) { let b = B { the_field: test(a.<|>) }; } ", ), @r###" [ CompletionItem { label: "another_field", source_range: 336..336, delete: 336..336, insert: "another_field", kind: Field, detail: "i64", score: TypeMatch, }, CompletionItem { label: "another_good_type", source_range: 336..336, delete: 336..336, insert: "another_good_type", kind: Field, detail: "u32", }, CompletionItem { label: "the_field", source_range: 336..336, delete: 336..336, insert: "the_field", kind: Field, detail: "u32", }, ] "### ); } #[test] fn test_struct_field_completion_in_fn_call_and_record_lit() { assert_debug_snapshot!( do_reference_completion( r" struct A { another_field: i64, another_good_type: u32, the_field: u32 } struct B { my_string: String, my_vec: Vec<u32>, the_field: u32 } fn test(the_field: i64) -> i64 { the_field } fn foo(a: A) { test(B { the_field: a.<|> }); } ", ), @r###" [ CompletionItem { label: "another_field", source_range: 328..328, delete: 328..328, insert: "another_field", kind: Field, detail: "i64", }, CompletionItem { label: "another_good_type", source_range: 328..328, delete: 328..328, insert: "another_good_type", kind: Field, detail: "u32", score: TypeMatch, }, CompletionItem { label: "the_field", source_range: 328..328, delete: 328..328, insert: "the_field", kind: Field, detail: "u32", score: TypeAndNameMatch, }, ] "### ); } #[test] fn prioritize_exact_ref_match() { assert_debug_snapshot!( do_reference_completion( r" struct WorldSnapshot { _f: () }; fn go(world: &WorldSnapshot) { go(w<|>) } ", ), @r###" [ CompletionItem { label: "WorldSnapshot", source_range: 132..133, delete: 132..133, insert: "WorldSnapshot", kind: Struct, }, CompletionItem { label: "go(…)", source_range: 132..133, delete: 132..133, insert: "go(${1:world})$0", kind: Function, lookup: "go", detail: "fn go(world: &WorldSnapshot)", trigger_call_info: true, }, CompletionItem { label: "world", source_range: 132..133, delete: 132..133, insert: "world", kind: Binding, detail: "&WorldSnapshot", score: TypeAndNameMatch, }, ] "### ); } }
30.560976
100
0.430319
ef1fa2bb74b5ffede0b2cf53136cf21ba965e062
1,039
use crate::command_error::CommandError; use datamodel::diagnostics::Diagnostics; use introspection_connector::{ConnectorError, ErrorKind}; use thiserror::Error; #[derive(Debug, Error)] pub enum Error { #[error("Error in connector: {0}")] ConnectorError(ConnectorError), #[error("Failure during an introspection command: {0}")] CommandError(CommandError), #[error("Error in datamodel: {:?}", .0)] DatamodelError(Diagnostics), #[error("{}", _0)] InvalidDatabaseUrl(String), } impl From<ConnectorError> for Error { fn from(e: ConnectorError) -> Self { match e.kind { ErrorKind::InvalidDatabaseUrl(reason) => Self::InvalidDatabaseUrl(reason), _ => Error::ConnectorError(e), } } } impl From<CommandError> for Error { fn from(e: CommandError) -> Self { Error::CommandError(e) } } impl From<datamodel::diagnostics::Diagnostics> for Error { fn from(e: datamodel::diagnostics::Diagnostics) -> Self { Error::DatamodelError(e) } }
25.341463
86
0.653513
1e80b08df1b5de4580d629c48a78b5d1a42da5a7
2,558
//! This crate contains the procedural macro implementation for the [tailcall] crate. //! It is not designed to be used dierctly. //! [tailcall]: https://crates.io/crates/tailcall #![deny( missing_debug_implementations, missing_copy_implementations, trivial_casts, trivial_numeric_casts, unsafe_code, unstable_features, unused_import_braces, unused_qualifications )] extern crate proc_macro; mod helpers; mod transforms; use proc_macro::TokenStream; use quote::quote; use syn::{parse_macro_input, ItemFn}; /// Transforms a [function definition] so that all recursive calls within the body are /// guaranteed to use a single stack frame (via [tail recursion]). /// /// [function definition]: https://docs.rs/syn/1.0.9/syn/struct.ItemFn.html /// [tail recursion]: https://en.wikipedia.org/wiki/Tail_call /// /// # Example /// /// ``` /// use tailcall::tailcall; /// /// fn factorial(input: u64) -> u64 { /// #[tailcall] /// fn factorial_inner(accumulator: u64, input: u64) -> u64 { /// if input > 0 { /// factorial_inner(accumulator * input, input - 1) /// } else { /// accumulator /// } /// } /// /// factorial_inner(1, input) /// } /// ``` /// /// # Requirements /// /// - All recursive calls must be in [tail form]: /// /// ```compile_fail /// use tailcall::tailcall; /// /// #[tailcall] /// fn factorial(input: u64) -> u64 { /// if input > 0 { /// input * factorial(input - 1) /// // ^^^^^^^ This is not allowed. /// } else { /// 1 /// } /// } /// ``` /// /// - Methods (functions which bind `self` in the arguments list) are not supported: /// /// ```compile_fail /// trait Factorialable { /// fn factorial(self) -> Self { /// self.calc_factorial(1) /// } /// /// fn calc_factorial(self, accumulator: u64) -> u64; /// } /// /// impl Factorialable for u64 { /// #[tailcall] /// fn calc_factorial(self, accumulator: u64) -> u64 { /// // ^^^^ This is not allowed. /// if self > 0 { /// (self - 1).calc_factorial(self * accumulator) /// } else { /// accumulator /// } /// } /// } /// ``` /// /// [tail form]: https://en.wikipedia.org/wiki/Tail_call #[proc_macro_attribute] pub fn tailcall(_attr: TokenStream, tokens: TokenStream) -> TokenStream { let input = parse_macro_input!(tokens as ItemFn); let output = transforms::apply_fn_tailcall_transform(input); TokenStream::from(quote! { #output }) }
25.078431
86
0.585223
09616e1c547f7ef00572e83ce1fbec3aaa8a6f26
2,565
mod storage_class; pub use self::storage_class::StorageClass; #[cfg(feature = "api")] pub use self::storage_class::{CreateStorageClassOptional, CreateStorageClassResponse}; #[cfg(feature = "api")] pub use self::storage_class::DeleteCollectionStorageClassResponse; #[cfg(feature = "api")] pub use self::storage_class::DeleteStorageClassResponse; #[cfg(feature = "api")] pub use self::storage_class::ListStorageClassResponse; #[cfg(feature = "api")] pub use self::storage_class::PatchStorageClassResponse; #[cfg(feature = "api")] pub use self::storage_class::{ReadStorageClassOptional, ReadStorageClassResponse}; #[cfg(feature = "api")] pub use self::storage_class::{ReplaceStorageClassOptional, ReplaceStorageClassResponse}; #[cfg(feature = "api")] pub use self::storage_class::WatchStorageClassResponse; mod storage_class_list; pub use self::storage_class_list::StorageClassList; mod volume_attachment; pub use self::volume_attachment::VolumeAttachment; #[cfg(feature = "api")] pub use self::volume_attachment::{CreateVolumeAttachmentOptional, CreateVolumeAttachmentResponse}; #[cfg(feature = "api")] pub use self::volume_attachment::DeleteCollectionVolumeAttachmentResponse; #[cfg(feature = "api")] pub use self::volume_attachment::DeleteVolumeAttachmentResponse; #[cfg(feature = "api")] pub use self::volume_attachment::ListVolumeAttachmentResponse; #[cfg(feature = "api")] pub use self::volume_attachment::PatchVolumeAttachmentResponse; #[cfg(feature = "api")] pub use self::volume_attachment::PatchVolumeAttachmentStatusResponse; #[cfg(feature = "api")] pub use self::volume_attachment::{ReadVolumeAttachmentOptional, ReadVolumeAttachmentResponse}; #[cfg(feature = "api")] pub use self::volume_attachment::{ReadVolumeAttachmentStatusOptional, ReadVolumeAttachmentStatusResponse}; #[cfg(feature = "api")] pub use self::volume_attachment::{ReplaceVolumeAttachmentOptional, ReplaceVolumeAttachmentResponse}; #[cfg(feature = "api")] pub use self::volume_attachment::{ReplaceVolumeAttachmentStatusOptional, ReplaceVolumeAttachmentStatusResponse}; #[cfg(feature = "api")] pub use self::volume_attachment::WatchVolumeAttachmentResponse; mod volume_attachment_list; pub use self::volume_attachment_list::VolumeAttachmentList; mod volume_attachment_source; pub use self::volume_attachment_source::VolumeAttachmentSource; mod volume_attachment_spec; pub use self::volume_attachment_spec::VolumeAttachmentSpec; mod volume_attachment_status; pub use self::volume_attachment_status::VolumeAttachmentStatus; mod volume_error; pub use self::volume_error::VolumeError;
58.295455
136
0.809357
3a32f1cb903e583f24117db65fc84197e2aa3b84
9,923
use super::{ ObligationCauseCode, OnUnimplementedDirective, OnUnimplementedNote, PredicateObligation, }; use crate::infer::InferCtxt; use rustc_hir as hir; use rustc_hir::def_id::DefId; use rustc_middle::ty::subst::Subst; use rustc_middle::ty::{self, GenericParamDefKind}; use rustc_span::symbol::sym; use std::iter; use super::InferCtxtPrivExt; crate trait InferCtxtExt<'tcx> { /*private*/ fn impl_similar_to( &self, trait_ref: ty::PolyTraitRef<'tcx>, obligation: &PredicateObligation<'tcx>, ) -> Option<DefId>; /*private*/ fn describe_enclosure(&self, hir_id: hir::HirId) -> Option<&'static str>; fn on_unimplemented_note( &self, trait_ref: ty::PolyTraitRef<'tcx>, obligation: &PredicateObligation<'tcx>, ) -> OnUnimplementedNote; } impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { fn impl_similar_to( &self, trait_ref: ty::PolyTraitRef<'tcx>, obligation: &PredicateObligation<'tcx>, ) -> Option<DefId> { let tcx = self.tcx; let param_env = obligation.param_env; let trait_ref = tcx.erase_late_bound_regions(trait_ref); let trait_self_ty = trait_ref.self_ty(); let mut self_match_impls = vec![]; let mut fuzzy_match_impls = vec![]; self.tcx.for_each_relevant_impl(trait_ref.def_id, trait_self_ty, |def_id| { let impl_substs = self.fresh_substs_for_item(obligation.cause.span, def_id); let impl_trait_ref = tcx.impl_trait_ref(def_id).unwrap().subst(tcx, impl_substs); let impl_self_ty = impl_trait_ref.self_ty(); if let Ok(..) = self.can_eq(param_env, trait_self_ty, impl_self_ty) { self_match_impls.push(def_id); if iter::zip( trait_ref.substs.types().skip(1), impl_trait_ref.substs.types().skip(1), ) .all(|(u, v)| self.fuzzy_match_tys(u, v)) { fuzzy_match_impls.push(def_id); } } }); let impl_def_id = if self_match_impls.len() == 1 { self_match_impls[0] } else if fuzzy_match_impls.len() == 1 { fuzzy_match_impls[0] } else { return None; }; tcx.has_attr(impl_def_id, sym::rustc_on_unimplemented).then_some(impl_def_id) } /// Used to set on_unimplemented's `ItemContext` /// to be the enclosing (async) block/function/closure fn describe_enclosure(&self, hir_id: hir::HirId) -> Option<&'static str> { let hir = &self.tcx.hir(); let node = hir.find(hir_id)?; match &node { hir::Node::Item(hir::Item { kind: hir::ItemKind::Fn(sig, _, body_id), .. }) => { self.describe_generator(*body_id).or_else(|| { Some(match sig.header { hir::FnHeader { asyncness: hir::IsAsync::Async, .. } => "an async function", _ => "a function", }) }) } hir::Node::TraitItem(hir::TraitItem { kind: hir::TraitItemKind::Fn(_, hir::TraitFn::Provided(body_id)), .. }) => self.describe_generator(*body_id).or_else(|| Some("a trait method")), hir::Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Fn(sig, body_id), .. }) => self.describe_generator(*body_id).or_else(|| { Some(match sig.header { hir::FnHeader { asyncness: hir::IsAsync::Async, .. } => "an async method", _ => "a method", }) }), hir::Node::Expr(hir::Expr { kind: hir::ExprKind::Closure(_is_move, _, body_id, _, gen_movability), .. }) => self.describe_generator(*body_id).or_else(|| { Some(if gen_movability.is_some() { "an async closure" } else { "a closure" }) }), hir::Node::Expr(hir::Expr { .. }) => { let parent_hid = hir.get_parent_node(hir_id); if parent_hid != hir_id { self.describe_enclosure(parent_hid) } else { None } } _ => None, } } fn on_unimplemented_note( &self, trait_ref: ty::PolyTraitRef<'tcx>, obligation: &PredicateObligation<'tcx>, ) -> OnUnimplementedNote { let def_id = self.impl_similar_to(trait_ref, obligation).unwrap_or_else(|| trait_ref.def_id()); let trait_ref = trait_ref.skip_binder(); let mut flags = vec![( sym::ItemContext, self.describe_enclosure(obligation.cause.body_id).map(|s| s.to_owned()), )]; match obligation.cause.code { ObligationCauseCode::BuiltinDerivedObligation(..) | ObligationCauseCode::ImplDerivedObligation(..) | ObligationCauseCode::DerivedObligation(..) => {} _ => { // this is a "direct", user-specified, rather than derived, // obligation. flags.push((sym::direct, None)); } } if let ObligationCauseCode::ItemObligation(item) | ObligationCauseCode::BindingObligation(item, _) = obligation.cause.code { // FIXME: maybe also have some way of handling methods // from other traits? That would require name resolution, // which we might want to be some sort of hygienic. // // Currently I'm leaving it for what I need for `try`. if self.tcx.trait_of_item(item) == Some(trait_ref.def_id) { let method = self.tcx.item_name(item); flags.push((sym::from_method, None)); flags.push((sym::from_method, Some(method.to_string()))); } } if let Some((t, _)) = self.get_parent_trait_ref(&obligation.cause.code) { flags.push((sym::parent_trait, Some(t))); } if let Some(k) = obligation.cause.span.desugaring_kind() { flags.push((sym::from_desugaring, None)); flags.push((sym::from_desugaring, Some(format!("{:?}", k)))); } // Add all types without trimmed paths. ty::print::with_no_trimmed_paths(|| { let generics = self.tcx.generics_of(def_id); let self_ty = trait_ref.self_ty(); // This is also included through the generics list as `Self`, // but the parser won't allow you to use it flags.push((sym::_Self, Some(self_ty.to_string()))); if let Some(def) = self_ty.ty_adt_def() { // We also want to be able to select self's original // signature with no type arguments resolved flags.push((sym::_Self, Some(self.tcx.type_of(def.did).to_string()))); } for param in generics.params.iter() { let value = match param.kind { GenericParamDefKind::Type { .. } | GenericParamDefKind::Const { .. } => { trait_ref.substs[param.index as usize].to_string() } GenericParamDefKind::Lifetime => continue, }; let name = param.name; flags.push((name, Some(value))); if let GenericParamDefKind::Type { .. } = param.kind { let param_ty = trait_ref.substs[param.index as usize].expect_ty(); if let Some(def) = param_ty.ty_adt_def() { // We also want to be able to select the parameter's // original signature with no type arguments resolved flags.push((name, Some(self.tcx.type_of(def.did).to_string()))); } } } if let Some(true) = self_ty.ty_adt_def().map(|def| def.did.is_local()) { flags.push((sym::crate_local, None)); } // Allow targeting all integers using `{integral}`, even if the exact type was resolved if self_ty.is_integral() { flags.push((sym::_Self, Some("{integral}".to_owned()))); } if let ty::Array(aty, len) = self_ty.kind() { flags.push((sym::_Self, Some("[]".to_owned()))); flags.push((sym::_Self, Some(format!("[{}]", aty)))); if let Some(def) = aty.ty_adt_def() { // We also want to be able to select the array's type's original // signature with no type arguments resolved let type_string = self.tcx.type_of(def.did).to_string(); flags.push((sym::_Self, Some(format!("[{}]", type_string)))); let len = len.val.try_to_value().and_then(|v| v.try_to_machine_usize(self.tcx)); let string = match len { Some(n) => format!("[{}; {}]", type_string, n), None => format!("[{}; _]", type_string), }; flags.push((sym::_Self, Some(string))); } } if let ty::Dynamic(traits, _) = self_ty.kind() { for t in traits.iter() { if let ty::ExistentialPredicate::Trait(trait_ref) = t.skip_binder() { flags.push((sym::_Self, Some(self.tcx.def_path_str(trait_ref.def_id)))) } } } }); if let Ok(Some(command)) = OnUnimplementedDirective::of_item(self.tcx, trait_ref.def_id, def_id) { command.evaluate(self.tcx, trait_ref, &flags[..]) } else { OnUnimplementedNote::default() } } }
40.835391
100
0.530888
d552ea3ec30435cf6c7c07860fa2bb09c372df24
8,479
//! Defines conversion traits between Rust types and NumPy data types. use std::{mem, os::raw::c_int, ptr}; use ndarray::{ArrayBase, Data, Dimension, IntoDimension, Ix1, OwnedRepr}; use pyo3::Python; use crate::array::PyArray; use crate::dtype::Element; use crate::error::MAX_DIMENSIONALITY_ERR; use crate::npyffi::{self, npy_intp}; use crate::sealed::Sealed; /// Conversion trait from owning Rust types into [`PyArray`]. /// /// This trait takes ownership of `self`, which means it holds a pointer into the Rust heap. /// /// In addition, some destructive methods like `resize` cannot be used with NumPy arrays constructed using this trait. /// /// # Example /// /// ``` /// use numpy::{PyArray, IntoPyArray}; /// use pyo3::Python; /// /// Python::with_gil(|py| { /// let py_array = vec![1, 2, 3].into_pyarray(py); /// /// assert_eq!(py_array.readonly().as_slice().unwrap(), &[1, 2, 3]); /// /// // Array cannot be resized when its data is owned by Rust. /// unsafe { /// assert!(py_array.resize(100).is_err()); /// } /// }); /// ``` pub trait IntoPyArray { /// The element type of resulting array. type Item: Element; /// The dimension type of the resulting array. type Dim: Dimension; /// Consumes `self` and moves its data into a NumPy array. fn into_pyarray<'py>(self, py: Python<'py>) -> &'py PyArray<Self::Item, Self::Dim>; } impl<T: Element> IntoPyArray for Box<[T]> { type Item = T; type Dim = Ix1; fn into_pyarray<'py>(self, py: Python<'py>) -> &'py PyArray<Self::Item, Self::Dim> { let dims = [self.len()]; let strides = [mem::size_of::<T>() as npy_intp]; let data_ptr = self.as_ptr(); unsafe { PyArray::from_raw_parts(py, dims, strides.as_ptr(), data_ptr, self) } } } impl<T: Element> IntoPyArray for Vec<T> { type Item = T; type Dim = Ix1; fn into_pyarray<'py>(self, py: Python<'py>) -> &'py PyArray<Self::Item, Self::Dim> { let dims = [self.len()]; let strides = [mem::size_of::<T>() as npy_intp]; let data_ptr = self.as_ptr(); unsafe { PyArray::from_raw_parts(py, dims, strides.as_ptr(), data_ptr, self) } } } impl<A, D> IntoPyArray for ArrayBase<OwnedRepr<A>, D> where A: Element, D: Dimension, { type Item = A; type Dim = D; fn into_pyarray<'py>(self, py: Python<'py>) -> &'py PyArray<Self::Item, Self::Dim> { PyArray::from_owned_array(py, self) } } /// Conversion trait from borrowing Rust types to [`PyArray`]. /// /// This trait takes `&self` by reference, which means it allocates in Python heap and then copies the elements there. /// /// # Examples /// /// ``` /// use numpy::{PyArray, ToPyArray}; /// use pyo3::Python; /// /// Python::with_gil(|py| { /// let py_array = vec![1, 2, 3].to_pyarray(py); /// /// assert_eq!(py_array.readonly().as_slice().unwrap(), &[1, 2, 3]); /// }); /// ``` /// /// Due to copying the elments, this method converts non-contiguous arrays to C-order contiguous arrays. /// /// ``` /// use numpy::{PyArray, ToPyArray}; /// use ndarray::{arr3, s}; /// use pyo3::Python; /// /// Python::with_gil(|py| { /// let array = arr3(&[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]); /// let py_array = array.slice(s![.., 0..1, ..]).to_pyarray(py); /// /// assert_eq!(py_array.readonly().as_array(), arr3(&[[[1, 2, 3]], [[7, 8, 9]]])); /// assert!(py_array.is_c_contiguous()); /// }); /// ``` pub trait ToPyArray { /// The element type of resulting array. type Item: Element; /// The dimension type of the resulting array. type Dim: Dimension; /// Copies the content pointed to by `&self` into a newly allocated NumPy array. fn to_pyarray<'py>(&self, py: Python<'py>) -> &'py PyArray<Self::Item, Self::Dim>; } impl<T: Element> ToPyArray for [T] { type Item = T; type Dim = Ix1; fn to_pyarray<'py>(&self, py: Python<'py>) -> &'py PyArray<Self::Item, Self::Dim> { PyArray::from_slice(py, self) } } impl<S, D, A> ToPyArray for ArrayBase<S, D> where S: Data<Elem = A>, D: Dimension, A: Element, { type Item = A; type Dim = D; fn to_pyarray<'py>(&self, py: Python<'py>) -> &'py PyArray<Self::Item, Self::Dim> { let len = self.len(); match self.order() { Some(flag) if A::IS_COPY => { // if the array is contiguous, copy it by `copy_nonoverlapping`. let strides = self.npy_strides(); unsafe { let array = PyArray::new_uninit(py, self.raw_dim(), strides.as_ptr(), flag); ptr::copy_nonoverlapping(self.as_ptr(), array.data(), len); array } } _ => { // if the array is not contiguous, copy all elements by `ArrayBase::iter`. let dim = self.raw_dim(); unsafe { let array = PyArray::<A, _>::new(py, dim, false); let mut data_ptr = array.data(); for item in self.iter() { data_ptr.write(item.clone()); data_ptr = data_ptr.add(1); } array } } } } } pub(crate) trait ArrayExt { fn npy_strides(&self) -> [npyffi::npy_intp; 32]; fn order(&self) -> Option<c_int>; } impl<A, S, D> ArrayExt for ArrayBase<S, D> where S: Data<Elem = A>, D: Dimension, { fn npy_strides(&self) -> [npyffi::npy_intp; 32] { let strides = self.strides(); let itemsize = mem::size_of::<A>() as isize; assert!(strides.len() <= 32, "{}", MAX_DIMENSIONALITY_ERR); let mut new_strides = [0; 32]; for i in 0..strides.len() { new_strides[i] = (strides[i] * itemsize) as npyffi::npy_intp; } new_strides } fn order(&self) -> Option<c_int> { if self.is_standard_layout() { Some(npyffi::NPY_ORDER::NPY_CORDER as _) } else if self.ndim() > 1 && self.raw_view().reversed_axes().is_standard_layout() { Some(npyffi::NPY_ORDER::NPY_FORTRANORDER as _) } else { None } } } /// Utility trait to specify the dimensions of an array. pub trait ToNpyDims: Dimension + Sealed { #[doc(hidden)] fn ndim_cint(&self) -> c_int { self.ndim() as c_int } #[doc(hidden)] fn as_dims_ptr(&self) -> *mut npyffi::npy_intp { self.slice().as_ptr() as *mut npyffi::npy_intp } #[doc(hidden)] fn to_npy_dims(&self) -> npyffi::PyArray_Dims { npyffi::PyArray_Dims { ptr: self.as_dims_ptr(), len: self.ndim_cint(), } } } impl<D> ToNpyDims for D where D: Dimension {} /// Trait implemented by types that can be used to index an array. /// /// This is equivalent to [`ndarray::NdIndex`] but accounts for /// NumPy strides being in units of bytes instead of elements. /// /// All types which implement [`IntoDimension`] implement this trait as well. /// This includes at least /// - [tuple](https://doc.rust-lang.org/stable/std/primitive.tuple.html) /// - [array](https://doc.rust-lang.org/stable/std/primitive.array.html) /// - [slice](https://doc.rust-lang.org/stable/std/primitive.slice.html) pub trait NpyIndex: IntoDimension + Sealed { #[doc(hidden)] fn get_checked<T>(self, dims: &[usize], strides: &[isize]) -> Option<isize>; #[doc(hidden)] fn get_unchecked<T>(self, strides: &[isize]) -> isize; } impl<D: IntoDimension> Sealed for D {} impl<D: IntoDimension> NpyIndex for D { fn get_checked<T>(self, dims: &[usize], strides: &[isize]) -> Option<isize> { let indices = self.into_dimension(); let indices = indices.slice(); if indices.len() != dims.len() { return None; } if indices.iter().zip(dims).any(|(i, d)| i >= d) { return None; } Some(get_unchecked_impl::<T>(indices, strides)) } fn get_unchecked<T>(self, strides: &[isize]) -> isize { let indices = self.into_dimension(); let indices = indices.slice(); get_unchecked_impl::<T>(indices, strides) } } fn get_unchecked_impl<T>(indices: &[usize], strides: &[isize]) -> isize { let size = mem::size_of::<T>() as isize; indices .iter() .zip(strides) .map(|(&i, stride)| stride * i as isize / size) .sum() }
30.282143
118
0.572001
7546d1b367c5e2907da0e0efac46a9964c32fb0b
17,572
use std::{cmp, fmt, ops}; use std::time::Duration; use std::convert::From; use libc::{timespec, timeval}; #[cfg_attr(target_env = "musl", allow(deprecated))] // https://github.com/rust-lang/libc/issues/1848 pub use libc::{time_t, suseconds_t}; pub trait TimeValLike: Sized { #[inline] fn zero() -> Self { Self::seconds(0) } #[inline] fn hours(hours: i64) -> Self { let secs = hours.checked_mul(SECS_PER_HOUR) .expect("TimeValLike::hours ouf of bounds"); Self::seconds(secs) } #[inline] fn minutes(minutes: i64) -> Self { let secs = minutes.checked_mul(SECS_PER_MINUTE) .expect("TimeValLike::minutes out of bounds"); Self::seconds(secs) } fn seconds(seconds: i64) -> Self; fn milliseconds(milliseconds: i64) -> Self; fn microseconds(microseconds: i64) -> Self; fn nanoseconds(nanoseconds: i64) -> Self; #[inline] fn num_hours(&self) -> i64 { self.num_seconds() / 3600 } #[inline] fn num_minutes(&self) -> i64 { self.num_seconds() / 60 } fn num_seconds(&self) -> i64; fn num_milliseconds(&self) -> i64; fn num_microseconds(&self) -> i64; fn num_nanoseconds(&self) -> i64; } #[repr(C)] #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] pub struct TimeSpec(timespec); const NANOS_PER_SEC: i64 = 1_000_000_000; const SECS_PER_MINUTE: i64 = 60; const SECS_PER_HOUR: i64 = 3600; #[cfg(target_pointer_width = "64")] const TS_MAX_SECONDS: i64 = (::std::i64::MAX / NANOS_PER_SEC) - 1; #[cfg(target_pointer_width = "32")] const TS_MAX_SECONDS: i64 = ::std::isize::MAX as i64; const TS_MIN_SECONDS: i64 = -TS_MAX_SECONDS; // x32 compatibility // See https://sourceware.org/bugzilla/show_bug.cgi?id=16437 #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] type timespec_tv_nsec_t = i64; #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] type timespec_tv_nsec_t = libc::c_long; impl From<timespec> for TimeSpec { fn from(ts: timespec) -> Self { Self(ts) } } impl From<Duration> for TimeSpec { fn from(duration: Duration) -> Self { #[cfg_attr(target_env = "musl", allow(deprecated))] // https://github.com/rust-lang/libc/issues/1848 TimeSpec(timespec { tv_sec: duration.as_secs() as time_t, tv_nsec: duration.subsec_nanos() as timespec_tv_nsec_t }) } } impl From<TimeSpec> for Duration { fn from(timespec: TimeSpec) -> Self { Duration::new(timespec.0.tv_sec as u64, timespec.0.tv_nsec as u32) } } impl AsRef<timespec> for TimeSpec { fn as_ref(&self) -> &timespec { &self.0 } } impl AsMut<timespec> for TimeSpec { fn as_mut(&mut self) -> &mut timespec { &mut self.0 } } impl Ord for TimeSpec { // The implementation of cmp is simplified by assuming that the struct is // normalized. That is, tv_nsec must always be within [0, 1_000_000_000) fn cmp(&self, other: &TimeSpec) -> cmp::Ordering { if self.tv_sec() == other.tv_sec() { self.tv_nsec().cmp(&other.tv_nsec()) } else { self.tv_sec().cmp(&other.tv_sec()) } } } impl PartialOrd for TimeSpec { fn partial_cmp(&self, other: &TimeSpec) -> Option<cmp::Ordering> { Some(self.cmp(other)) } } impl TimeValLike for TimeSpec { #[inline] fn seconds(seconds: i64) -> TimeSpec { assert!(seconds >= TS_MIN_SECONDS && seconds <= TS_MAX_SECONDS, "TimeSpec out of bounds; seconds={}", seconds); #[cfg_attr(target_env = "musl", allow(deprecated))] // https://github.com/rust-lang/libc/issues/1848 TimeSpec(timespec {tv_sec: seconds as time_t, tv_nsec: 0 }) } #[inline] fn milliseconds(milliseconds: i64) -> TimeSpec { let nanoseconds = milliseconds.checked_mul(1_000_000) .expect("TimeSpec::milliseconds out of bounds"); TimeSpec::nanoseconds(nanoseconds) } /// Makes a new `TimeSpec` with given number of microseconds. #[inline] fn microseconds(microseconds: i64) -> TimeSpec { let nanoseconds = microseconds.checked_mul(1_000) .expect("TimeSpec::milliseconds out of bounds"); TimeSpec::nanoseconds(nanoseconds) } /// Makes a new `TimeSpec` with given number of nanoseconds. #[inline] fn nanoseconds(nanoseconds: i64) -> TimeSpec { let (secs, nanos) = div_mod_floor_64(nanoseconds, NANOS_PER_SEC); assert!(secs >= TS_MIN_SECONDS && secs <= TS_MAX_SECONDS, "TimeSpec out of bounds"); #[cfg_attr(target_env = "musl", allow(deprecated))] // https://github.com/rust-lang/libc/issues/1848 TimeSpec(timespec {tv_sec: secs as time_t, tv_nsec: nanos as timespec_tv_nsec_t }) } fn num_seconds(&self) -> i64 { if self.tv_sec() < 0 && self.tv_nsec() > 0 { (self.tv_sec() + 1) as i64 } else { self.tv_sec() as i64 } } fn num_milliseconds(&self) -> i64 { self.num_nanoseconds() / 1_000_000 } fn num_microseconds(&self) -> i64 { self.num_nanoseconds() / 1_000_000_000 } fn num_nanoseconds(&self) -> i64 { let secs = self.num_seconds() * 1_000_000_000; let nsec = self.nanos_mod_sec(); secs + nsec as i64 } } impl TimeSpec { fn nanos_mod_sec(&self) -> timespec_tv_nsec_t { if self.tv_sec() < 0 && self.tv_nsec() > 0 { self.tv_nsec() - NANOS_PER_SEC as timespec_tv_nsec_t } else { self.tv_nsec() } } #[cfg_attr(target_env = "musl", allow(deprecated))] // https://github.com/rust-lang/libc/issues/1848 pub fn tv_sec(&self) -> time_t { self.0.tv_sec } pub fn tv_nsec(&self) -> timespec_tv_nsec_t { self.0.tv_nsec } } impl ops::Neg for TimeSpec { type Output = TimeSpec; fn neg(self) -> TimeSpec { TimeSpec::nanoseconds(-self.num_nanoseconds()) } } impl ops::Add for TimeSpec { type Output = TimeSpec; fn add(self, rhs: TimeSpec) -> TimeSpec { TimeSpec::nanoseconds( self.num_nanoseconds() + rhs.num_nanoseconds()) } } impl ops::Sub for TimeSpec { type Output = TimeSpec; fn sub(self, rhs: TimeSpec) -> TimeSpec { TimeSpec::nanoseconds( self.num_nanoseconds() - rhs.num_nanoseconds()) } } impl ops::Mul<i32> for TimeSpec { type Output = TimeSpec; fn mul(self, rhs: i32) -> TimeSpec { let usec = self.num_nanoseconds().checked_mul(i64::from(rhs)) .expect("TimeSpec multiply out of bounds"); TimeSpec::nanoseconds(usec) } } impl ops::Div<i32> for TimeSpec { type Output = TimeSpec; fn div(self, rhs: i32) -> TimeSpec { let usec = self.num_nanoseconds() / i64::from(rhs); TimeSpec::nanoseconds(usec) } } impl fmt::Display for TimeSpec { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let (abs, sign) = if self.tv_sec() < 0 { (-*self, "-") } else { (*self, "") }; let sec = abs.tv_sec(); write!(f, "{}", sign)?; if abs.tv_nsec() == 0 { if abs.tv_sec() == 1 { write!(f, "{} second", sec)?; } else { write!(f, "{} seconds", sec)?; } } else if abs.tv_nsec() % 1_000_000 == 0 { write!(f, "{}.{:03} seconds", sec, abs.tv_nsec() / 1_000_000)?; } else if abs.tv_nsec() % 1_000 == 0 { write!(f, "{}.{:06} seconds", sec, abs.tv_nsec() / 1_000)?; } else { write!(f, "{}.{:09} seconds", sec, abs.tv_nsec())?; } Ok(()) } } #[repr(transparent)] #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] pub struct TimeVal(timeval); const MICROS_PER_SEC: i64 = 1_000_000; #[cfg(target_pointer_width = "64")] const TV_MAX_SECONDS: i64 = (::std::i64::MAX / MICROS_PER_SEC) - 1; #[cfg(target_pointer_width = "32")] const TV_MAX_SECONDS: i64 = ::std::isize::MAX as i64; const TV_MIN_SECONDS: i64 = -TV_MAX_SECONDS; impl AsRef<timeval> for TimeVal { fn as_ref(&self) -> &timeval { &self.0 } } impl AsMut<timeval> for TimeVal { fn as_mut(&mut self) -> &mut timeval { &mut self.0 } } impl Ord for TimeVal { // The implementation of cmp is simplified by assuming that the struct is // normalized. That is, tv_usec must always be within [0, 1_000_000) fn cmp(&self, other: &TimeVal) -> cmp::Ordering { if self.tv_sec() == other.tv_sec() { self.tv_usec().cmp(&other.tv_usec()) } else { self.tv_sec().cmp(&other.tv_sec()) } } } impl PartialOrd for TimeVal { fn partial_cmp(&self, other: &TimeVal) -> Option<cmp::Ordering> { Some(self.cmp(other)) } } impl TimeValLike for TimeVal { #[inline] fn seconds(seconds: i64) -> TimeVal { assert!(seconds >= TV_MIN_SECONDS && seconds <= TV_MAX_SECONDS, "TimeVal out of bounds; seconds={}", seconds); #[cfg_attr(target_env = "musl", allow(deprecated))] // https://github.com/rust-lang/libc/issues/1848 TimeVal(timeval {tv_sec: seconds as time_t, tv_usec: 0 }) } #[inline] fn milliseconds(milliseconds: i64) -> TimeVal { let microseconds = milliseconds.checked_mul(1_000) .expect("TimeVal::milliseconds out of bounds"); TimeVal::microseconds(microseconds) } /// Makes a new `TimeVal` with given number of microseconds. #[inline] fn microseconds(microseconds: i64) -> TimeVal { let (secs, micros) = div_mod_floor_64(microseconds, MICROS_PER_SEC); assert!(secs >= TV_MIN_SECONDS && secs <= TV_MAX_SECONDS, "TimeVal out of bounds"); #[cfg_attr(target_env = "musl", allow(deprecated))] // https://github.com/rust-lang/libc/issues/1848 TimeVal(timeval {tv_sec: secs as time_t, tv_usec: micros as suseconds_t }) } /// Makes a new `TimeVal` with given number of nanoseconds. Some precision /// will be lost #[inline] fn nanoseconds(nanoseconds: i64) -> TimeVal { let microseconds = nanoseconds / 1000; let (secs, micros) = div_mod_floor_64(microseconds, MICROS_PER_SEC); assert!(secs >= TV_MIN_SECONDS && secs <= TV_MAX_SECONDS, "TimeVal out of bounds"); #[cfg_attr(target_env = "musl", allow(deprecated))] // https://github.com/rust-lang/libc/issues/1848 TimeVal(timeval {tv_sec: secs as time_t, tv_usec: micros as suseconds_t }) } fn num_seconds(&self) -> i64 { if self.tv_sec() < 0 && self.tv_usec() > 0 { (self.tv_sec() + 1) as i64 } else { self.tv_sec() as i64 } } fn num_milliseconds(&self) -> i64 { self.num_microseconds() / 1_000 } fn num_microseconds(&self) -> i64 { let secs = self.num_seconds() * 1_000_000; let usec = self.micros_mod_sec(); secs + usec as i64 } fn num_nanoseconds(&self) -> i64 { self.num_microseconds() * 1_000 } } impl TimeVal { fn micros_mod_sec(&self) -> suseconds_t { if self.tv_sec() < 0 && self.tv_usec() > 0 { self.tv_usec() - MICROS_PER_SEC as suseconds_t } else { self.tv_usec() } } #[cfg_attr(target_env = "musl", allow(deprecated))] // https://github.com/rust-lang/libc/issues/1848 pub fn tv_sec(&self) -> time_t { self.0.tv_sec } pub fn tv_usec(&self) -> suseconds_t { self.0.tv_usec } } impl ops::Neg for TimeVal { type Output = TimeVal; fn neg(self) -> TimeVal { TimeVal::microseconds(-self.num_microseconds()) } } impl ops::Add for TimeVal { type Output = TimeVal; fn add(self, rhs: TimeVal) -> TimeVal { TimeVal::microseconds( self.num_microseconds() + rhs.num_microseconds()) } } impl ops::Sub for TimeVal { type Output = TimeVal; fn sub(self, rhs: TimeVal) -> TimeVal { TimeVal::microseconds( self.num_microseconds() - rhs.num_microseconds()) } } impl ops::Mul<i32> for TimeVal { type Output = TimeVal; fn mul(self, rhs: i32) -> TimeVal { let usec = self.num_microseconds().checked_mul(i64::from(rhs)) .expect("TimeVal multiply out of bounds"); TimeVal::microseconds(usec) } } impl ops::Div<i32> for TimeVal { type Output = TimeVal; fn div(self, rhs: i32) -> TimeVal { let usec = self.num_microseconds() / i64::from(rhs); TimeVal::microseconds(usec) } } impl fmt::Display for TimeVal { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let (abs, sign) = if self.tv_sec() < 0 { (-*self, "-") } else { (*self, "") }; let sec = abs.tv_sec(); write!(f, "{}", sign)?; if abs.tv_usec() == 0 { if abs.tv_sec() == 1 { write!(f, "{} second", sec)?; } else { write!(f, "{} seconds", sec)?; } } else if abs.tv_usec() % 1000 == 0 { write!(f, "{}.{:03} seconds", sec, abs.tv_usec() / 1000)?; } else { write!(f, "{}.{:06} seconds", sec, abs.tv_usec())?; } Ok(()) } } impl From<timeval> for TimeVal { fn from(tv: timeval) -> Self { TimeVal(tv) } } #[inline] fn div_mod_floor_64(this: i64, other: i64) -> (i64, i64) { (div_floor_64(this, other), mod_floor_64(this, other)) } #[inline] fn div_floor_64(this: i64, other: i64) -> i64 { match div_rem_64(this, other) { (d, r) if (r > 0 && other < 0) || (r < 0 && other > 0) => d - 1, (d, _) => d, } } #[inline] fn mod_floor_64(this: i64, other: i64) -> i64 { match this % other { r if (r > 0 && other < 0) || (r < 0 && other > 0) => r + other, r => r, } } #[inline] fn div_rem_64(this: i64, other: i64) -> (i64, i64) { (this / other, this % other) } #[cfg(test)] mod test { use super::{TimeSpec, TimeVal, TimeValLike}; use std::time::Duration; #[test] pub fn test_timespec() { assert!(TimeSpec::seconds(1) != TimeSpec::zero()); assert_eq!(TimeSpec::seconds(1) + TimeSpec::seconds(2), TimeSpec::seconds(3)); assert_eq!(TimeSpec::minutes(3) + TimeSpec::seconds(2), TimeSpec::seconds(182)); } #[test] pub fn test_timespec_from() { let duration = Duration::new(123, 123_456_789); let timespec = TimeSpec::nanoseconds(123_123_456_789); assert_eq!(TimeSpec::from(duration), timespec); assert_eq!(Duration::from(timespec), duration); } #[test] pub fn test_timespec_neg() { let a = TimeSpec::seconds(1) + TimeSpec::nanoseconds(123); let b = TimeSpec::seconds(-1) + TimeSpec::nanoseconds(-123); assert_eq!(a, -b); } #[test] pub fn test_timespec_ord() { assert!(TimeSpec::seconds(1) == TimeSpec::nanoseconds(1_000_000_000)); assert!(TimeSpec::seconds(1) < TimeSpec::nanoseconds(1_000_000_001)); assert!(TimeSpec::seconds(1) > TimeSpec::nanoseconds(999_999_999)); assert!(TimeSpec::seconds(-1) < TimeSpec::nanoseconds(-999_999_999)); assert!(TimeSpec::seconds(-1) > TimeSpec::nanoseconds(-1_000_000_001)); } #[test] pub fn test_timespec_fmt() { assert_eq!(TimeSpec::zero().to_string(), "0 seconds"); assert_eq!(TimeSpec::seconds(42).to_string(), "42 seconds"); assert_eq!(TimeSpec::milliseconds(42).to_string(), "0.042 seconds"); assert_eq!(TimeSpec::microseconds(42).to_string(), "0.000042 seconds"); assert_eq!(TimeSpec::nanoseconds(42).to_string(), "0.000000042 seconds"); assert_eq!(TimeSpec::seconds(-86401).to_string(), "-86401 seconds"); } #[test] pub fn test_timeval() { assert!(TimeVal::seconds(1) != TimeVal::zero()); assert_eq!(TimeVal::seconds(1) + TimeVal::seconds(2), TimeVal::seconds(3)); assert_eq!(TimeVal::minutes(3) + TimeVal::seconds(2), TimeVal::seconds(182)); } #[test] pub fn test_timeval_ord() { assert!(TimeVal::seconds(1) == TimeVal::microseconds(1_000_000)); assert!(TimeVal::seconds(1) < TimeVal::microseconds(1_000_001)); assert!(TimeVal::seconds(1) > TimeVal::microseconds(999_999)); assert!(TimeVal::seconds(-1) < TimeVal::microseconds(-999_999)); assert!(TimeVal::seconds(-1) > TimeVal::microseconds(-1_000_001)); } #[test] pub fn test_timeval_neg() { let a = TimeVal::seconds(1) + TimeVal::microseconds(123); let b = TimeVal::seconds(-1) + TimeVal::microseconds(-123); assert_eq!(a, -b); } #[test] pub fn test_timeval_fmt() { assert_eq!(TimeVal::zero().to_string(), "0 seconds"); assert_eq!(TimeVal::seconds(42).to_string(), "42 seconds"); assert_eq!(TimeVal::milliseconds(42).to_string(), "0.042 seconds"); assert_eq!(TimeVal::microseconds(42).to_string(), "0.000042 seconds"); assert_eq!(TimeVal::nanoseconds(1402).to_string(), "0.000001 seconds"); assert_eq!(TimeVal::seconds(-86401).to_string(), "-86401 seconds"); } }
29.189369
108
0.580981
56ac1eb4378343a61c5513a6c5bdc9cdefe7a235
13,079
#![deny(missing_docs)] #![cfg_attr(docsrs, feature(doc_cfg))] //! Async TLS listener //! //! This library is intended to automatically initiate a TLS connection //! for each new connection in a source of new streams (such as a listening //! TCP or unix domain socket). //! //! # Features: //! - `rustls`: Support the tokio-rustls backend for tls (default) //! - `native-tls`: support the tokio-native-tls backend for tls //! - `hyper-h1`: hyper support with http/1 //! - `hyper-h2`: hyper support with http/2 //! - `tokio-net`: Implementations for tokio socket types (default) use futures_util::stream::{FuturesUnordered, Stream, StreamExt}; use pin_project_lite::pin_project; use std::future::Future; use std::io; use std::marker::Unpin; use std::pin::Pin; use std::task::{Context, Poll}; use std::time::Duration; use thiserror::Error; use tokio::io::{AsyncRead, AsyncWrite}; use tokio::time::{timeout, Timeout}; /// Default number of concurrent handshakes pub const DEFAULT_MAX_HANDSHAKES: usize = 64; /// Default timeout for the TLS handshake. pub const DEFAULT_HANDSHAKE_TIMEOUT: Duration = Duration::from_millis(200); /// Trait for TLS implementation. /// /// Implementations are provided by the rustls and native-tls features. pub trait AsyncTls<C: AsyncRead + AsyncWrite>: Clone { /// The type of the TLS stream created from the underlying stream. type Stream; /// Error type for completing the TLS handshake type Error: std::error::Error; /// Type of the Future for the TLS stream that is accepted. type AcceptFuture: Future<Output = Result<Self::Stream, Self::Error>> + Unpin; /// Accept a TLS connection on an underlying stream fn accept(&self, stream: C) -> Self::AcceptFuture; } /// Asynchronously accept connections. pub trait AsyncAccept { /// The type of the connection that is accepted. type Connection: AsyncRead + AsyncWrite; /// The type of error that may be returned. type Error; /// Poll to accept the next connection. fn poll_accept( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<Option<Result<Self::Connection, Self::Error>>>; /// Return a new `AsyncAccept` that stops accepting connections after /// `ender` completes. /// /// Useful for graceful shutdown. /// /// See [examples/echo.rs](https://github.com/tmccombs/tls-listener/blob/main/examples/echo.rs) /// for example of how to use. fn until<F: Future>(self, ender: F) -> Until<Self, F> where Self: Sized, { Until { acceptor: self, ender, } } } pin_project! { /// /// Wraps a `Stream` of connections (such as a TCP listener) so that each connection is itself /// encrypted using TLS. /// /// It is similar to: /// /// ```ignore /// tcpListener.and_then(|s| tlsAcceptor.accept(s)) /// ``` /// /// except that it has the ability to accept multiple transport-level connections /// simultaneously while the TLS handshake is pending for other connections. /// /// By default, if a client fails the TLS handshake, that is treated as an error, and the /// `TlsListener` will return an `Err`. If the `TlsListener` is passed directly to a hyper /// [`Server`][1], then an invalid handshake can cause the server to stop accepting connections. /// See [`http-stream.rs`][2] or [`http-low-level`][3] examples, for examples of how to avoid this. /// /// Note that if the maximum number of pending connections is greater than 1, the resulting /// [`T::Stream`][4] connections may come in a different order than the connections produced by the /// underlying listener. /// /// [1]: https://docs.rs/hyper/latest/hyper/server/struct.Server.html /// [2]: https://github.com/tmccombs/tls-listener/blob/main/examples/http-stream.rs /// [3]: https://github.com/tmccombs/tls-listener/blob/main/examples/http-low-level.rs /// [4]: AsyncTls::Stream /// pub struct TlsListener<A: AsyncAccept, T: AsyncTls<A::Connection>> { #[pin] listener: A, tls: T, waiting: FuturesUnordered<Timeout<T::AcceptFuture>>, max_handshakes: usize, timeout: Duration, } } /// Builder for `TlsListener`. #[derive(Clone)] pub struct Builder<T> { tls: T, max_handshakes: usize, handshake_timeout: Duration, } /// Wraps errors from either the listener or the TLS Acceptor #[derive(Debug, Error)] pub enum Error<LE: std::error::Error, TE: std::error::Error> { /// An error that arose from the listener ([AsyncAccept::Error]) #[error("{0}")] ListenerError(#[source] LE), /// An error that occurred during the TLS accept handshake #[error("{0}")] TlsAcceptError(#[source] TE), } impl<A: AsyncAccept, T> TlsListener<A, T> where T: AsyncTls<A::Connection>, { /// Create a `TlsListener` with default options. pub fn new(tls: T, listener: A) -> Self { builder(tls).listen(listener) } } impl<A, T> TlsListener<A, T> where A: AsyncAccept, A::Connection: AsyncRead + AsyncWrite + Unpin + 'static, A::Error: std::error::Error, T: AsyncTls<A::Connection>, Self: Unpin, { /// Accept the next connection /// /// This is essentially an alias to `self.next()` with a more domain-appropriate name. pub fn accept(&mut self) -> impl Future<Output = Option<<Self as Stream>::Item>> + '_ { self.next() } /// Replaces the Tls Acceptor configuration, which will be used for new connections. /// /// This can be used to change the certificate used at runtime. pub fn replace_acceptor(&mut self, acceptor: T) { self.tls = acceptor; } } impl<A, T> Stream for TlsListener<A, T> where A: AsyncAccept, A::Connection: AsyncRead + AsyncWrite + Unpin + 'static, A::Error: std::error::Error, T: AsyncTls<A::Connection>, { type Item = Result<T::Stream, Error<A::Error, T::Error>>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { let mut this = self.project(); while this.waiting.len() < *this.max_handshakes { match this.listener.as_mut().poll_accept(cx) { Poll::Pending => break, Poll::Ready(Some(Ok(conn))) => { this.waiting .push(timeout(*this.timeout, this.tls.accept(conn))); } Poll::Ready(Some(Err(e))) => { return Poll::Ready(Some(Err(Error::ListenerError(e)))); } Poll::Ready(None) => return Poll::Ready(None), } } loop { return match this.waiting.poll_next_unpin(cx) { Poll::Ready(Some(Ok(conn))) => { Poll::Ready(Some(conn.map_err(Error::TlsAcceptError))) } // The handshake timed out, try getting another connection from the // queue Poll::Ready(Some(Err(_))) => continue, _ => Poll::Pending, }; } } } #[cfg(feature = "rustls")] impl<C: AsyncRead + AsyncWrite + Unpin> AsyncTls<C> for tokio_rustls::TlsAcceptor { type Stream = tokio_rustls::server::TlsStream<C>; type Error = io::Error; type AcceptFuture = tokio_rustls::Accept<C>; fn accept(&self, conn: C) -> Self::AcceptFuture { tokio_rustls::TlsAcceptor::accept(self, conn) } } #[cfg(feature = "native-tls")] impl<C> AsyncTls<C> for tokio_native_tls::TlsAcceptor where C: AsyncRead + AsyncWrite + Unpin + Send + 'static, { type Stream = tokio_native_tls::TlsStream<C>; type Error = tokio_native_tls::native_tls::Error; type AcceptFuture = Pin<Box<dyn Future<Output = Result<Self::Stream, Self::Error>> + Send>>; fn accept(&self, conn: C) -> Self::AcceptFuture { let tls = self.clone(); Box::pin(async move { tokio_native_tls::TlsAcceptor::accept(&tls, conn).await }) } } impl<T> Builder<T> { /// Set the maximum number of concurrent handshakes. /// /// At most `max` handshakes will be concurrently processed. If that limit is /// reached, the `TlsListener` will stop polling the underlying listener until a /// handshake completes and the encrypted stream has been returned. /// /// Defaults to `DEFAULT_MAX_HANDSHAKES`. pub fn max_handshakes(&mut self, max: usize) -> &mut Self { self.max_handshakes = max; self } /// Set the timeout for handshakes. /// /// If a timeout takes longer than `timeout`, then the handshake will be /// aborted and the underlying connection will be dropped. /// /// Defaults to `DEFAULT_HANDSHAKE_TIMEOUT`. pub fn handshake_timeout(&mut self, timeout: Duration) -> &mut Self { self.handshake_timeout = timeout; self } /// Create a `TlsListener` from the builder /// /// Actually build the `TlsListener`. The `listener` argument should be /// an implementation of the `AsyncAccept` trait that accepts new connections /// that the `TlsListener` will encrypt using TLS. pub fn listen<A: AsyncAccept>(&self, listener: A) -> TlsListener<A, T> where T: AsyncTls<A::Connection>, { TlsListener { listener, tls: self.tls.clone(), waiting: FuturesUnordered::new(), max_handshakes: self.max_handshakes, timeout: self.handshake_timeout, } } } /// Create a new Builder for a TlsListener /// /// `server_config` will be used to configure the TLS sessions. pub fn builder<T>(tls: T) -> Builder<T> { Builder { tls, max_handshakes: DEFAULT_MAX_HANDSHAKES, handshake_timeout: DEFAULT_HANDSHAKE_TIMEOUT, } } #[cfg(feature = "tokio-net")] #[cfg_attr(docsrs, doc(cfg(feature = "tokio-net")))] impl AsyncAccept for tokio::net::TcpListener { type Connection = tokio::net::TcpStream; type Error = io::Error; fn poll_accept( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<Option<Result<Self::Connection, Self::Error>>> { match (*self).poll_accept(cx) { Poll::Ready(Ok((stream, _))) => Poll::Ready(Some(Ok(stream))), Poll::Ready(Err(e)) => Poll::Ready(Some(Err(e))), Poll::Pending => Poll::Pending, } } } #[cfg(all(unix, feature = "tokio-net"))] #[cfg_attr(docsrs, doc(cfg(feature = "tokio-net")))] impl AsyncAccept for tokio::net::UnixListener { type Connection = tokio::net::UnixStream; type Error = io::Error; fn poll_accept( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<Option<Result<Self::Connection, Self::Error>>> { match (*self).poll_accept(cx) { Poll::Ready(Ok((stream, _))) => Poll::Ready(Some(Ok(stream))), Poll::Ready(Err(e)) => Poll::Ready(Some(Err(e))), Poll::Pending => Poll::Pending, } } } pin_project! { /// See [`AsyncAccept::until`] pub struct Until<A, E> { #[pin] acceptor: A, #[pin] ender: E, } } impl<A: AsyncAccept, E: Future> AsyncAccept for Until<A, E> { type Connection = A::Connection; type Error = A::Error; fn poll_accept( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<Option<Result<Self::Connection, Self::Error>>> { let this = self.project(); match this.ender.poll(cx) { Poll::Pending => this.acceptor.poll_accept(cx), Poll::Ready(_) => Poll::Ready(None), } } } // Possibly make a blanket implementation for hyper::server::accept::Accept? #[cfg(any(feature = "hyper-h1", feature = "hyper-h2"))] mod hyper_impl { use super::*; use hyper::server::accept::Accept as HyperAccept; use hyper::server::conn::{AddrIncoming, AddrStream}; #[cfg_attr(docsrs, doc(cfg(any(feature = "hyper-h1", feature = "hyper-h2"))))] impl AsyncAccept for AddrIncoming { type Connection = AddrStream; type Error = io::Error; fn poll_accept( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<Option<Result<Self::Connection, Self::Error>>> { match <AddrIncoming as HyperAccept>::poll_accept(self, cx) { Poll::Ready(res) => Poll::Ready(res), Poll::Pending => Poll::Pending, } } } #[cfg_attr(docsrs, doc(cfg(any(feature = "hyper-h1", feature = "hyper-h2"))))] impl<A, T> HyperAccept for TlsListener<A, T> where A: AsyncAccept, A::Connection: AsyncRead + AsyncWrite + Unpin + 'static, A::Error: std::error::Error, T: AsyncTls<A::Connection>, { type Conn = T::Stream; type Error = Error<A::Error, T::Error>; fn poll_accept( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<Option<Result<Self::Conn, Self::Error>>> { self.poll_next(cx) } } }
33.027778
103
0.608992
56ef0d672eff4e063407d02ed58b83fb190b5d69
3,523
#[doc = "Reader of register MOSI"] pub type R = crate::R<u32, super::MOSI>; #[doc = "Writer for register MOSI"] pub type W = crate::W<u32, super::MOSI>; #[doc = "Register MOSI `reset()`'s with value 0xffff_ffff"] impl crate::ResetValue for super::MOSI { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0xffff_ffff } } #[doc = "Reader of field `PIN`"] pub type PIN_R = crate::R<u8, u8>; #[doc = "Write proxy for field `PIN`"] pub struct PIN_W<'a> { w: &'a mut W, } impl<'a> PIN_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0x1f) | ((value as u32) & 0x1f); self.w } } #[doc = "Connection\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum CONNECT_A { #[doc = "1: Disconnect"] DISCONNECTED, #[doc = "0: Connect"] CONNECTED, } impl From<CONNECT_A> for bool { #[inline(always)] fn from(variant: CONNECT_A) -> Self { match variant { CONNECT_A::DISCONNECTED => true, CONNECT_A::CONNECTED => false, } } } #[doc = "Reader of field `CONNECT`"] pub type CONNECT_R = crate::R<bool, CONNECT_A>; impl CONNECT_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> CONNECT_A { match self.bits { true => CONNECT_A::DISCONNECTED, false => CONNECT_A::CONNECTED, } } #[doc = "Checks if the value of the field is `DISCONNECTED`"] #[inline(always)] pub fn is_disconnected(&self) -> bool { *self == CONNECT_A::DISCONNECTED } #[doc = "Checks if the value of the field is `CONNECTED`"] #[inline(always)] pub fn is_connected(&self) -> bool { *self == CONNECT_A::CONNECTED } } #[doc = "Write proxy for field `CONNECT`"] pub struct CONNECT_W<'a> { w: &'a mut W, } impl<'a> CONNECT_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: CONNECT_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Disconnect"] #[inline(always)] pub fn disconnected(self) -> &'a mut W { self.variant(CONNECT_A::DISCONNECTED) } #[doc = "Connect"] #[inline(always)] pub fn connected(self) -> &'a mut W { self.variant(CONNECT_A::CONNECTED) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 31)) | (((value as u32) & 0x01) << 31); self.w } } impl R { #[doc = "Bits 0:4 - Pin number"] #[inline(always)] pub fn pin(&self) -> PIN_R { PIN_R::new((self.bits & 0x1f) as u8) } #[doc = "Bit 31 - Connection"] #[inline(always)] pub fn connect(&self) -> CONNECT_R { CONNECT_R::new(((self.bits >> 31) & 0x01) != 0) } } impl W { #[doc = "Bits 0:4 - Pin number"] #[inline(always)] pub fn pin(&mut self) -> PIN_W { PIN_W { w: self } } #[doc = "Bit 31 - Connection"] #[inline(always)] pub fn connect(&mut self) -> CONNECT_W { CONNECT_W { w: self } } }
27.310078
86
0.549532
7a0dc1881b348fa9d09364b158af34c7c73c91ed
77,637
//! Client to Spotify API endpoint // 3rd-part library use chrono::prelude::*; use failure; use reqwest::header::{HeaderMap, AUTHORIZATION, CONTENT_TYPE}; use reqwest::Client; use reqwest::Method; use reqwest::StatusCode; use serde::de::Deserialize; use serde_json; use serde_json::map::Map; use serde_json::Value; // built-in battery use std::borrow::Cow; use std::collections::HashMap; use std::fmt; use std::string::String; use super::model::album::{FullAlbum, FullAlbums, PageSimpliedAlbums, SavedAlbum, SimplifiedAlbum}; use super::model::artist::{CursorPageFullArtists, FullArtist, FullArtists}; use super::model::audio::{AudioAnalysis, AudioFeatures, AudioFeaturesPayload}; use super::model::category::PageCategory; use super::model::context::{FullPlayingContext, SimplifiedPlayingContext}; use super::model::cud_result::CUDResult; use super::model::device::DevicePayload; use super::model::page::{CursorBasedPage, Page}; use super::model::playing::{PlayHistory, Playing}; use super::model::playlist::{FeaturedPlaylists, FullPlaylist, PlaylistTrack, SimplifiedPlaylist}; use super::model::recommend::Recommendations; use super::model::search::{SearchAlbums, SearchArtists, SearchPlaylists, SearchTracks}; use super::model::track::{FullTrack, FullTracks, SavedTrack, SimplifiedTrack}; use super::model::user::{PrivateUser, PublicUser}; use super::oauth2::SpotifyClientCredentials; use super::senum::{AlbumType, Country, RepeatState, SearchType, TimeRange, Type}; use super::util::convert_map_to_string; lazy_static! { /// HTTP Client pub static ref CLIENT: Client = Client::new(); } /// Describes API errors #[derive(Debug)] pub enum ApiError { Unauthorized, RateLimited(Option<usize>), Other(u16), } impl failure::Fail for ApiError {} impl fmt::Display for ApiError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { ApiError::Unauthorized => write!(f, "Unauthorized request to API"), ApiError::RateLimited(e) => { if let Some(d) = e { write!(f, "Exceeded API request limit - please wait {} seconds", d) } else { write!(f, "Exceeded API request limit") } } ApiError::Other(s) => write!(f, "Spotify API reported error code {}", s), } } } impl From<&reqwest::Response> for ApiError { fn from(response: &reqwest::Response) -> Self { match response.status() { StatusCode::UNAUTHORIZED => ApiError::Unauthorized, StatusCode::TOO_MANY_REQUESTS => { if let Ok(duration) = response.headers()[reqwest::header::RETRY_AFTER].to_str() { ApiError::RateLimited(duration.parse::<usize>().ok()) } else { ApiError::RateLimited(None) } } status => ApiError::Other(status.as_u16()), } } } /// Spotify API object #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Spotify { pub prefix: String, pub access_token: Option<String>, pub client_credentials_manager: Option<SpotifyClientCredentials>, } impl Spotify { //! If you want to check examples of all API endpoint, you could check the //! [examples](https://github.com/samrayleung/rspotify/tree/master/examples) in github pub fn default() -> Spotify { Spotify { prefix: "https://api.spotify.com/v1/".to_owned(), access_token: None, client_credentials_manager: None, } } // pub fn prefix(mut self, prefix: &str) -> Spotify { pub fn prefix(mut self, prefix: &str) -> Spotify { self.prefix = prefix.to_owned(); self } pub fn access_token(mut self, access_token: &str) -> Spotify { self.access_token = Some(access_token.to_owned()); self } pub fn client_credentials_manager( mut self, client_credential_manager: SpotifyClientCredentials, ) -> Spotify { self.client_credentials_manager = Some(client_credential_manager); self } pub fn build(self) -> Spotify { if self.access_token.is_none() && self.client_credentials_manager.is_none() { panic!("access_token and client_credentials_manager are none!!!"); } self } async fn auth_headers(&self) -> String { let token = match self.access_token { Some(ref token) => token.to_owned(), None => match self.client_credentials_manager { Some(ref client_credentials_manager) => { client_credentials_manager.get_access_token().await } None => panic!("client credentials manager is none"), }, }; "Bearer ".to_owned() + &token } async fn internal_call( &self, method: Method, url: &str, payload: Option<&Value>, ) -> Result<String, failure::Error> { let mut url: Cow<str> = url.into(); if !url.starts_with("http") { url = ["https://api.spotify.com/v1/", &url].concat().into(); } let mut headers = HeaderMap::new(); headers.insert(AUTHORIZATION, self.auth_headers().await.parse().unwrap()); headers.insert(CONTENT_TYPE, "application/json".parse().unwrap()); let response = { let builder = CLIENT.request(method, &url.into_owned()).headers(headers); // only add body if necessary // spotify rejects GET requests that have a body with a 400 response let builder = if let Some(json) = payload { builder.json(json) } else { builder }; builder.send().await? }; if response.status().is_success() { match response.text().await { Ok(text) => Ok(text), Err(e) => Err(failure::err_msg(format!( "Error getting text out of response {}", e ))), } } else { Err(failure::Error::from(ApiError::from(&response))) } } ///send get request async fn get( &self, url: &str, params: &mut HashMap<String, String>, ) -> Result<String, failure::Error> { if !params.is_empty() { let param: String = convert_map_to_string(params); let mut url_with_params = url.to_owned(); url_with_params.push('?'); url_with_params.push_str(&param); self.internal_call(Method::GET, &url_with_params, None) .await } else { self.internal_call(Method::GET, url, None).await } } ///send post request async fn post(&self, url: &str, payload: &Value) -> Result<String, failure::Error> { self.internal_call(Method::POST, url, Some(payload)).await } ///send put request async fn put(&self, url: &str, payload: &Value) -> Result<String, failure::Error> { self.internal_call(Method::PUT, url, Some(payload)).await } /// send delete request async fn delete(&self, url: &str, payload: &Value) -> Result<String, failure::Error> { self.internal_call(Method::DELETE, url, Some(payload)).await } ///[get-track](https://developer.spotify.com/web-api/get-track/) ///returns a single track given the track's ID, URI or URL ///Parameters: ///- track_id - a spotify URI, URL or ID pub async fn track(&self, track_id: &str) -> Result<FullTrack, failure::Error> { let trid = self.get_id(Type::Track, track_id); let url = format!("tracks/{}", trid); let result = self.get(&url, &mut HashMap::new()).await?; self.convert_result::<FullTrack>(&result) } ///[get-several-tracks](https://developer.spotify.com/web-api/get-several-tracks/) ///returns a list of tracks given a list of track IDs, URIs, or URLs ///Parameters: ///- track_ids - a list of spotify URIs, URLs or IDs ///- market - an ISO 3166-1 alpha-2 country code. pub async fn tracks( &self, track_ids: Vec<&str>, market: Option<Country>, ) -> Result<FullTracks, failure::Error> { let mut ids: Vec<String> = vec![]; for track_id in track_ids { ids.push(self.get_id(Type::Track, track_id)); } let url = format!("tracks/?ids={}", ids.join(",")); let mut params: HashMap<String, String> = HashMap::new(); if let Some(_market) = market { params.insert("market".to_owned(), _market.as_str().to_owned()); } trace!("{:?}", &url); let result = self.get(&url, &mut params).await?; self.convert_result::<FullTracks>(&result) } ///[get-artist](https://developer.spotify.com/web-api/get-artist/) ///returns a single artist given the artist's ID, URI or URL ///Parameters: ///- artist_id - an artist ID, URI or URL pub async fn artist(&self, artist_id: &str) -> Result<FullArtist, failure::Error> { let trid = self.get_id(Type::Artist, artist_id); let url = format!("artists/{}", trid); let result = self.get(&url, &mut HashMap::new()).await?; self.convert_result::<FullArtist>(&result) } ///[get-several-artists](https://developer.spotify.com/web-api/get-several-artists/) ///returns a list of artists given the artist IDs, URIs, or URLs ///Parameters: ///- artist_ids - a list of artist IDs, URIs or URLs pub async fn artists(&self, artist_ids: Vec<String>) -> Result<FullArtists, failure::Error> { let mut ids: Vec<String> = vec![]; for artist_id in artist_ids { ids.push(self.get_id(Type::Artist, &artist_id)); } let url = format!("artists/?ids={}", ids.join(",")); let result = self.get(&url, &mut HashMap::new()).await?; self.convert_result::<FullArtists>(&result) } ///[get-artists-albums](https://developer.spotify.com/web-api/get-artists-albums/) /// Get Spotify catalog information about an artist's albums /// - artist_id - the artist ID, URI or URL /// - album_type - 'album', 'single', 'appears_on', 'compilation' /// - country - limit the response to one particular country. /// - limit - the number of albums to return /// - offset - the index of the first album to return pub async fn artist_albums( &self, artist_id: &str, album_type: Option<AlbumType>, country: Option<Country>, limit: Option<u32>, offset: Option<u32>, ) -> Result<Page<SimplifiedAlbum>, failure::Error> { let mut params: HashMap<String, String> = HashMap::new(); if let Some(_limit) = limit { params.insert("limit".to_owned(), _limit.to_string()); } if let Some(_album_type) = album_type { params.insert("album_type".to_owned(), _album_type.as_str().to_owned()); } if let Some(_offset) = offset { params.insert("offset".to_owned(), _offset.to_string()); } if let Some(_country) = country { params.insert("country".to_owned(), _country.as_str().to_owned()); } let trid = self.get_id(Type::Artist, artist_id); let url = format!("artists/{}/albums", trid); let result = self.get(&url, &mut params).await?; self.convert_result::<Page<SimplifiedAlbum>>(&result) } ///[get artists to tracks](https://developer.spotify.com/web-api/get-artists-top-tracks/) /// Get Spotify catalog information about an artist's top 10 tracks by country. /// Parameters: /// - artist_id - the artist ID, URI or URL /// - country - limit the response to one particular country. pub async fn artist_top_tracks<T: Into<Option<Country>>>( &self, artist_id: &str, country: T, ) -> Result<FullTracks, failure::Error> { let mut params: HashMap<String, String> = HashMap::new(); let country = country .into() .unwrap_or(Country::UnitedStates) .as_str() .to_string(); params.insert("country".to_owned(), country); let trid = self.get_id(Type::Artist, artist_id); let url = format!("artists/{}/top-tracks", trid); let result = self.get(&url, &mut params).await?; self.convert_result::<FullTracks>(&result) } ///[get related artists](https://developer.spotify.com/web-api/get-related-artists/) ///Get Spotify catalog information about artists similar to an ///identified artist. Similarity is based on analysis of the ///Spotify community's listening history. ///Parameters: ///- artist_id - the artist ID, URI or URL pub async fn artist_related_artists( &self, artist_id: &str, ) -> Result<FullArtists, failure::Error> { let trid = self.get_id(Type::Artist, artist_id); let url = format!("artists/{}/related-artists", trid); let result = self.get(&url, &mut HashMap::new()).await?; self.convert_result::<FullArtists>(&result) } ///[get album](https://developer.spotify.com/web-api/get-album/) ///returns a single album given the album's ID, URIs or URL ///Parameters: ///- album_id - the album ID, URI or URL pub async fn album(&self, album_id: &str) -> Result<FullAlbum, failure::Error> { let trid = self.get_id(Type::Album, album_id); let url = format!("albums/{}", trid); let result = self.get(&url, &mut HashMap::new()).await?; self.convert_result::<FullAlbum>(&result) } ///[get several albums](https://developer.spotify.com/web-api/get-several-albums/) ///returns a list of albums given the album IDs, URIs, or URLs ///Parameters: ///- albums_ids - a list of album IDs, URIs or URLs pub async fn albums(&self, album_ids: Vec<String>) -> Result<FullAlbums, failure::Error> { let mut ids: Vec<String> = vec![]; for album_id in album_ids { ids.push(self.get_id(Type::Album, &album_id)); } let url = format!("albums/?ids={}", ids.join(",")); let result = self.get(&url, &mut HashMap::new()).await?; self.convert_result::<FullAlbums>(&result) } ///[search for items](https://developer.spotify.com/web-api/search-item/) ///Search for an Item ///Get Spotify catalog information about artists, albums, tracks or /// playlists that match a keyword string. /// Parameters: ///- q - the search query ///- limit - the number of items to return ///- offset - the index of the first item to return ///- type - the type of item to return. One of 'artist', 'album', ///'track' or 'playlist' ///- market - An ISO 3166-1 alpha-2 country code or the string from_token. async fn search<L: Into<Option<u32>>, O: Into<Option<u32>>>( &self, q: &str, _type: SearchType, limit: L, offset: O, market: Option<Country>, ) -> Result<String, failure::Error> { let mut params = HashMap::new(); let limit = limit.into().unwrap_or(10); let offset = offset.into().unwrap_or(0); if let Some(_market) = market { params.insert("market".to_owned(), _market.as_str().to_owned()); } params.insert("limit".to_owned(), limit.to_string()); params.insert("offset".to_owned(), offset.to_string()); params.insert("q".to_owned(), q.to_owned()); params.insert("type".to_owned(), _type.as_str().to_owned()); let url = String::from("search"); self.get(&url, &mut params).await } ///search item, type is album ///[search for items](https://developer.spotify.com/web-api/search-item/) ///Get Spotify catalog information about artists, albums, tracks or /// playlists that match a keyword string. /// Parameters: ///- q - the search query ///- limit - the number of items to return ///- offset - the index of the first item to return ///'track' or 'playlist' ///- market - An ISO 3166-1 alpha-2 country code or the string from_token. pub async fn search_album<L: Into<Option<u32>>, O: Into<Option<u32>>>( &self, q: &str, limit: L, offset: O, market: Option<Country>, ) -> Result<SearchAlbums, failure::Error> { let result = self .search(q, SearchType::Album, limit, offset, market) .await?; self.convert_result::<SearchAlbums>(&result) } ///search item, type is artist ///[search for items](https://developer.spotify.com/web-api/search-item/) ///Get Spotify catalog information about artists, albums, tracks or /// playlists that match a keyword string. /// Parameters: ///- q - the search query ///- limit - the number of items to return ///- offset - the index of the first item to return ///'track' or 'playlist' ///- market - An ISO 3166-1 alpha-2 country code or the string from_token. pub async fn search_artist<L: Into<Option<u32>>, O: Into<Option<u32>>>( &self, q: &str, limit: L, offset: O, market: Option<Country>, ) -> Result<SearchArtists, failure::Error> { let result = self .search(q, SearchType::Artist, limit, offset, market) .await?; self.convert_result::<SearchArtists>(&result) } ///search item, type is track ///[search for items](https://developer.spotify.com/web-api/search-item/) ///Get Spotify catalog information about artists, albums, tracks or /// playlists that match a keyword string. /// Parameters: ///- q - the search query ///- limit - the number of items to return ///- offset - the index of the first item to return ///'track' or 'playlist' ///- market - An ISO 3166-1 alpha-2 country code or the string from_token. pub async fn search_track<L: Into<Option<u32>>, O: Into<Option<u32>>>( &self, q: &str, limit: L, offset: O, market: Option<Country>, ) -> Result<SearchTracks, failure::Error> { let result = self .search(q, SearchType::Track, limit, offset, market) .await?; self.convert_result::<SearchTracks>(&result) } ///search item, type is playlist ///[search for items](https://developer.spotify.com/web-api/search-item/) ///Get Spotify catalog information about artists, albums, tracks or /// playlists that match a keyword string. /// Parameters: ///- q - the search query ///- limit - the number of items to return ///- offset - the index of the first item to return ///'track' or 'playlist' ///- market - An ISO 3166-1 alpha-2 country code or the string from_token. pub async fn search_playlist<L: Into<Option<u32>>, O: Into<Option<u32>>>( &self, q: &str, limit: L, offset: O, market: Option<Country>, ) -> Result<SearchPlaylists, failure::Error> { let result = self .search(q, SearchType::Playlist, limit, offset, market) .await?; self.convert_result::<SearchPlaylists>(&result) } ///[get albums tracks](https://developer.spotify.com/web-api/get-albums-tracks/) ///Get Spotify catalog information about an album's tracks ///Parameters: ///- album_id - the album ID, URI or URL ///- limit - the number of items to return ///- offset - the index of the first item to return pub async fn album_track<L: Into<Option<u32>>, O: Into<Option<u32>>>( &self, album_id: &str, limit: L, offset: O, ) -> Result<Page<SimplifiedTrack>, failure::Error> { let mut params = HashMap::new(); let trid = self.get_id(Type::Album, album_id); let url = format!("albums/{}/tracks", trid); params.insert("limit".to_owned(), limit.into().unwrap_or(50).to_string()); params.insert("offset".to_owned(), offset.into().unwrap_or(0).to_string()); let result = self.get(&url, &mut params).await?; self.convert_result::<Page<SimplifiedTrack>>(&result) } ///[get users profile](https://developer.spotify.com/web-api/get-users-profile/) ///Gets basic profile information about a Spotify User ///Parameters: ///- user - the id of the usr pub async fn user(&self, user_id: &str) -> Result<PublicUser, failure::Error> { let url = format!("users/{}", user_id); let result = self.get(&url, &mut HashMap::new()).await?; self.convert_result::<PublicUser>(&result) } ///[get playlist](https://developer.spotify.com/documentation/web-api/reference/playlists/get-playlist/) ///Get full details about Spotify playlist ///Parameters: ///- playlist_id - the id of the playlist ///- market - an ISO 3166-1 alpha-2 country code. pub async fn playlist( &self, playlist_id: &str, fields: Option<&str>, market: Option<Country>, ) -> Result<FullPlaylist, failure::Error> { let mut params = HashMap::new(); if let Some(_fields) = fields { params.insert("fields".to_owned(), _fields.to_string()); } if let Some(_market) = market { params.insert("market".to_owned(), _market.as_str().to_owned()); } let plid = self.get_id(Type::Playlist, playlist_id); let url = format!("playlists/{}", plid); let result = self.get(&url, &mut params).await?; self.convert_result::<FullPlaylist>(&result) } ///[get users playlists](https://developer.spotify.com/web-api/get-a-list-of-current-users-playlists/) ///Get current user playlists without required getting his profile ///Parameters: ///- limit - the number of items to return ///- offset - the index of the first item to return pub async fn current_user_playlists<L: Into<Option<u32>>, O: Into<Option<u32>>>( &self, limit: L, offset: O, ) -> Result<Page<SimplifiedPlaylist>, failure::Error> { let mut params = HashMap::new(); params.insert("limit".to_owned(), limit.into().unwrap_or(50).to_string()); params.insert("offset".to_owned(), offset.into().unwrap_or(0).to_string()); let url = String::from("me/playlists"); let result = self.get(&url, &mut params).await?; self.convert_result::<Page<SimplifiedPlaylist>>(&result) } ///[get list users playlists](https://developer.spotify.com/web-api/get-list-users-playlists/) ///Gets playlists of a user ///Parameters: ///- user_id - the id of the usr ///- limit - the number of items to return ///- offset - the index of the first item to return pub async fn user_playlists<L: Into<Option<u32>>, O: Into<Option<u32>>>( &self, user_id: &str, limit: L, offset: O, ) -> Result<Page<SimplifiedPlaylist>, failure::Error> { let mut params = HashMap::new(); params.insert("limit".to_owned(), limit.into().unwrap_or(50).to_string()); params.insert("offset".to_owned(), offset.into().unwrap_or(0).to_string()); let url = format!("users/{}/playlists", user_id); let result = self.get(&url, &mut params).await?; self.convert_result::<Page<SimplifiedPlaylist>>(&result) } ///[get list users playlists](https://developer.spotify.com/web-api/get-list-users-playlists/) ///Gets playlist of a user ///Parameters: ///- user_id - the id of the user ///- playlist_id - the id of the playlist ///- fields - which fields to return pub async fn user_playlist( &self, user_id: &str, playlist_id: Option<&mut str>, fields: Option<&str>, market: Option<Country>, ) -> Result<FullPlaylist, failure::Error> { let mut params = HashMap::new(); if let Some(_fields) = fields { params.insert("fields".to_owned(), _fields.to_string()); } if let Some(_market) = market { params.insert("market".to_owned(), _market.as_str().to_owned()); } match playlist_id { Some(_playlist_id) => { let plid = self.get_id(Type::Playlist, _playlist_id); let url = format!("users/{}/playlists/{}", user_id, plid); let result = self.get(&url, &mut params).await?; self.convert_result::<FullPlaylist>(&result) } None => { let url = format!("users/{}/starred", user_id); let result = self.get(&url, &mut params).await?; self.convert_result::<FullPlaylist>(&result) } } } ///[get playlists tracks](https://developer.spotify.com/web-api/get-playlists-tracks/) ///Get full details of the tracks of a playlist owned by a user ///Parameters: ///- user_id - the id of the user ///- playlist_id - the id of the playlist ///- fields - which fields to return ///- limit - the maximum number of tracks to return ///- offset - the index of the first track to return ///- market - an ISO 3166-1 alpha-2 country code. pub async fn user_playlist_tracks<L: Into<Option<u32>>, O: Into<Option<u32>>>( &self, user_id: &str, playlist_id: &str, fields: Option<&str>, limit: L, offset: O, market: Option<Country>, ) -> Result<Page<PlaylistTrack>, failure::Error> { let mut params = HashMap::new(); params.insert("limit".to_owned(), limit.into().unwrap_or(50).to_string()); params.insert("offset".to_owned(), offset.into().unwrap_or(0).to_string()); if let Some(_market) = market { params.insert("market".to_owned(), _market.as_str().to_owned()); } if let Some(_fields) = fields { params.insert("fields".to_owned(), _fields.to_string()); } let plid = self.get_id(Type::Playlist, playlist_id); let url = format!("users/{}/playlists/{}/tracks", user_id, plid); let result = self.get(&url, &mut params).await?; self.convert_result::<Page<PlaylistTrack>>(&result) } ///[create playlist](https://developer.spotify.com/web-api/create-playlist/) ///Creates a playlist for a user ///Parameters: ///- user_id - the id of the user ///- name - the name of the playlist ///- public - is the created playlist public ///- description - the description of the playlist pub async fn user_playlist_create<P: Into<Option<bool>>, D: Into<Option<String>>>( &self, user_id: &str, name: &str, public: P, description: D, ) -> Result<FullPlaylist, failure::Error> { let public = public.into().unwrap_or(true); let description = description.into().unwrap_or_else(|| "".to_owned()); let params = json!({ "name": name, "public": public, "description": description }); let url = format!("users/{}/playlists", user_id); let result = self.post(&url, &params).await?; self.convert_result::<FullPlaylist>(&result) } ///[change playlists details](https://developer.spotify.com/web-api/change-playlist-details/) ///Changes a playlist's name and/or public/private state ///Parameters: ///- user_id - the id of the user ///- playlist_id - the id of the playlist ///- name - optional name of the playlist ///- public - optional is the playlist public ///- collaborative - optional is the playlist collaborative ///- description - optional description of the playlist pub async fn user_playlist_change_detail( &self, user_id: &str, playlist_id: &str, name: Option<&str>, public: Option<bool>, description: Option<String>, collaborative: Option<bool>, ) -> Result<String, failure::Error> { let mut params = Map::new(); if let Some(_name) = name { params.insert("name".to_owned(), _name.into()); } if let Some(_public) = public { params.insert("public".to_owned(), _public.into()); } if let Some(_collaborative) = collaborative { params.insert("collaborative".to_owned(), _collaborative.into()); } if let Some(_description) = description { params.insert("description".to_owned(), _description.into()); } let url = format!("users/{}/playlists/{}", user_id, playlist_id); self.put(&url, &Value::Object(params)).await } ///[unfollow playlist](https://developer.spotify.com/web-api/unfollow-playlist/) ///Unfollows (deletes) a playlist for a user ///Parameters: ///- user_id - the id of the user ///- playlist_id - the id of the playlist pub async fn user_playlist_unfollow( &self, user_id: &str, playlist_id: &str, ) -> Result<String, failure::Error> { let url = format!("users/{}/playlists/{}/followers", user_id, playlist_id); self.delete(&url, &json!({})).await } ///[add tracks to playlist](https://developer.spotify.com/web-api/add-tracks-to-playlist/) ///Adds tracks to a playlist ///Parameters: ///- user_id - the id of the user ///- playlist_id - the id of the playlist ///- track_ids - a list of track URIs, URLs or IDs ///- position - the position to add the tracks pub async fn user_playlist_add_tracks( &self, user_id: &str, playlist_id: &str, track_ids: &[String], position: Option<i32>, ) -> Result<CUDResult, failure::Error> { let plid = self.get_id(Type::Playlist, playlist_id); let uris: Vec<String> = track_ids .iter() .map(|id| self.get_uri(Type::Track, id)) .collect(); let mut params = Map::new(); if let Some(_position) = position { params.insert("position".to_owned(), _position.into()); } params.insert("uris".to_owned(), uris.into()); let url = format!("users/{}/playlists/{}/tracks", user_id, plid); let result = self.post(&url, &Value::Object(params)).await?; self.convert_result::<CUDResult>(&result) } ///[replaced playlists tracks](https://developer.spotify.com/web-api/replace-playlists-tracks/) ///Replace all tracks in a playlist ///Parameters: ///- user - the id of the user ///- playlist_id - the id of the playlist ///- tracks - the list of track ids to add to the playlist pub async fn user_playlist_replace_tracks( &self, user_id: &str, playlist_id: &str, track_ids: &[String], ) -> Result<(), failure::Error> { let plid = self.get_id(Type::Playlist, playlist_id); let uris: Vec<String> = track_ids .iter() .map(|id| self.get_uri(Type::Track, id)) .collect(); // let mut params = Map::new(); // params.insert("uris".to_owned(), uris.into()); let params = json!({ "uris": uris }); let url = format!("users/{}/playlists/{}/tracks", user_id, plid); match self.put(&url, &params).await { Ok(_) => Ok(()), Err(e) => Err(e), } } ///[reorder playlists tracks](https://developer.spotify.com/web-api/reorder-playlists-tracks/) ///Reorder tracks in a playlist ///Parameters: ///- user_id - the id of the user ///- playlist_id - the id of the playlist ///- range_start - the position of the first track to be reordered ///- range_length - optional the number of tracks to be reordered (default: 1) ///- insert_before - the position where the tracks should be inserted ///- snapshot_id - optional playlist's snapshot ID pub async fn user_playlist_recorder_tracks<R: Into<Option<u32>>>( &self, user_id: &str, playlist_id: &str, range_start: i32, range_length: R, insert_before: i32, snapshot_id: Option<String>, ) -> Result<CUDResult, failure::Error> { let plid = self.get_id(Type::Playlist, playlist_id); let range_length = range_length.into().unwrap_or(1); let mut params = Map::new(); if let Some(_snapshot_id) = snapshot_id { params.insert("snapshot_id".to_owned(), _snapshot_id.into()); } params.insert("range_start".to_owned(), range_start.into()); params.insert("range_length".to_owned(), range_length.into()); params.insert("insert_before".to_owned(), insert_before.into()); let url = format!("users/{}/playlists/{}/tracks", user_id, plid); let result = self.put(&url, &Value::Object(params)).await?; self.convert_result::<CUDResult>(&result) } ///[remove tracks playlist](https://developer.spotify.com/web-api/remove-tracks-playlist/) ///Removes all occurrences of the given tracks from the given playlist ///Parameters: ///- user_id - the id of the user ///- playlist_id - the id of the playlist ///- track_ids - the list of track ids to add to the playlist ///- snapshot_id - optional id of the playlist snapshot pub async fn user_playlist_remove_all_occurrences_of_tracks( &self, user_id: &str, playlist_id: &str, track_ids: &[String], snapshot_id: Option<String>, ) -> Result<CUDResult, failure::Error> { let plid = self.get_id(Type::Playlist, playlist_id); let uris: Vec<String> = track_ids .iter() .map(|id| self.get_uri(Type::Track, id)) .collect(); let mut params = Map::new(); let mut tracks: Vec<Map<String, Value>> = vec![]; for uri in uris { let mut map = Map::new(); map.insert("uri".to_owned(), uri.into()); tracks.push(map); } params.insert("tracks".to_owned(), tracks.into()); if let Some(_snapshot_id) = snapshot_id { params.insert("snapshot_id".to_owned(), _snapshot_id.into()); } let url = format!("users/{}/playlists/{}/tracks", user_id, plid); let result = self.delete(&url, &Value::Object(params)).await?; self.convert_result::<CUDResult>(&result) } ///[remove tracks playlist](https://developer.spotify.com/web-api/remove-tracks-playlist/) ///Removes all occurrences of the given tracks from the given playlist ///Parameters: ///- user_id - the id of the user ///- playlist_id - the id of the playlist ///- tracks - an array of map containing Spotify URIs of the tracks /// to remove with their current positions in the playlist. For example: ///{ "tracks": [{ "uri": "spotify:track:4iV5W9uYEdYUVa79Axb7Rh", "positions": [0,3] },{ ///"uri": "spotify:track:1301WleyT98MSxVHPZCA6M", "positions": [7] }] } ///- snapshot_id - optional id of the playlist snapshot pub async fn user_playlist_remove_specific_occurrenes_of_tracks( &self, user_id: &str, playlist_id: &str, tracks: Vec<Map<String, Value>>, snapshot_id: Option<String>, ) -> Result<CUDResult, failure::Error> { let mut params = Map::new(); let plid = self.get_id(Type::Playlist, playlist_id); let mut ftracks: Vec<Map<String, Value>> = vec![]; for track in tracks { let mut map = Map::new(); if let Some(_uri) = track.get("uri") { let uri = self.get_uri(Type::Track, &_uri.as_str().unwrap().to_owned()); map.insert("uri".to_owned(), uri.into()); } if let Some(_position) = track.get("position") { map.insert("position".to_owned(), _position.to_owned()); } ftracks.push(map); } params.insert("tracks".to_owned(), ftracks.into()); if let Some(_snapshot_id) = snapshot_id { params.insert("snapshot_id".to_owned(), _snapshot_id.into()); } let url = format!("users/{}/playlists/{}/tracks", user_id, plid); let result = self.delete(&url, &Value::Object(params)).await?; self.convert_result::<CUDResult>(&result) } ///[follow playlist](https://developer.spotify.com/web-api/follow-playlist/) ///Add the current authenticated user as a follower of a playlist. ///Parameters: ///- playlist_owner_id - the user id of the playlist owner ///- playlist_id - the id of the playlist pub async fn user_playlist_follow_playlist<P: Into<Option<bool>>>( &self, playlist_owner_id: &str, playlist_id: &str, public: P, ) -> Result<(), failure::Error> { let mut map = Map::new(); let public = public.into().unwrap_or(true); map.insert("public".to_owned(), public.into()); let url = format!( "users/{}/playlists/{}/followers", playlist_owner_id, playlist_id ); match self.put(&url, &Value::Object(map)).await { Ok(_) => Ok(()), Err(e) => Err(e), } } ///[check user following playlist](https://developer.spotify.com/web-api/check-user-following-playlist/) ///Check to see if the given users are following the given playlist ///Parameters: ///- playlist_owner_id - the user id of the playlist owner ///- playlist_id - the id of the playlist ///- user_ids - the ids of the users that you want to ///check to see if they follow the playlist. Maximum: 5 ids. pub async fn user_playlist_check_follow( &self, playlist_owner_id: &str, playlist_id: &str, user_ids: &[String], ) -> Result<Vec<bool>, failure::Error> { if user_ids.len() > 5 { error!("The maximum length of user ids is limited to 5 :-)"); } let url = format!( "users/{}/playlists/{}/followers/contains?ids={}", playlist_owner_id, playlist_id, user_ids.join(",") ); let mut dumb: HashMap<String, String> = HashMap::new(); let result = self.get(&url, &mut dumb).await?; self.convert_result::<Vec<bool>>(&result) } ///[get current users profile](https://developer.spotify.com/web-api/get-current-users-profile/) ///Get detailed profile information about the current user. ///An alias for the 'current_user' method. pub async fn me(&self) -> Result<PrivateUser, failure::Error> { let mut dumb: HashMap<String, String> = HashMap::new(); let url = String::from("me/"); let result = self.get(&url, &mut dumb).await?; self.convert_result::<PrivateUser>(&result) } ///Get detailed profile information about the current user. ///An alias for the 'me' method. pub async fn current_user(&self) -> Result<PrivateUser, failure::Error> { self.me().await } /// [get the users currently playing track](https://developer.spotify.com/web-api/get-the-users-currently-playing-track/) /// Get information about the current users currently playing track. pub async fn current_user_playing_track(&self) -> Result<Option<Playing>, failure::Error> { let mut dumb = HashMap::new(); let url = String::from("me/player/currently-playing"); match self.get(&url, &mut dumb).await { Ok(result) => { if result.is_empty() { Ok(None) } else { self.convert_result::<Option<Playing>>(&result) } } Err(e) => Err(e), } } ///[get user saved albums](https://developer.spotify.com/web-api/get-users-saved-albums/) ///Gets a list of the albums saved in the current authorized user's ///"Your Music" library ///Parameters: ///- limit - the number of albums to return ///- offset - the index of the first album to return ///- market - Provide this parameter if you want to apply Track Relinking. pub async fn current_user_saved_albums<L: Into<Option<u32>>, O: Into<Option<u32>>>( &self, limit: L, offset: O, ) -> Result<Page<SavedAlbum>, failure::Error> { let limit = limit.into().unwrap_or(20); let offset = offset.into().unwrap_or(0); let mut params = HashMap::new(); params.insert("limit".to_owned(), limit.to_string()); params.insert("offset".to_owned(), offset.to_string()); let url = String::from("me/albums"); let result = self.get(&url, &mut params).await?; self.convert_result::<Page<SavedAlbum>>(&result) } ///[get users saved tracks](https://developer.spotify.com/web-api/get-users-saved-tracks/) ///Parameters: ///- limit - the number of tracks to return ///- offset - the index of the first track to return ///- market - Provide this parameter if you want to apply Track Relinking. pub async fn current_user_saved_tracks<L: Into<Option<u32>>, O: Into<Option<u32>>>( &self, limit: L, offset: O, ) -> Result<Page<SavedTrack>, failure::Error> { let limit = limit.into().unwrap_or(20); let offset = offset.into().unwrap_or(0); let mut params = HashMap::new(); params.insert("limit".to_owned(), limit.to_string()); params.insert("offset".to_owned(), offset.to_string()); let url = String::from("me/tracks"); let result = self.get(&url, &mut params).await?; self.convert_result::<Page<SavedTrack>>(&result) } ///[get followed artists](https://developer.spotify.com/web-api/get-followed-artists/) ///Gets a list of the artists followed by the current authorized user ///Parameters: ///- limit - the number of tracks to return ///- after - ghe last artist ID retrieved from the previous request pub async fn current_user_followed_artists<L: Into<Option<u32>>>( &self, limit: L, after: Option<String>, ) -> Result<CursorPageFullArtists, failure::Error> { let limit = limit.into().unwrap_or(20); let mut params = HashMap::new(); params.insert("limit".to_owned(), limit.to_string()); if let Some(_after) = after { params.insert("after".to_owned(), _after); } params.insert("type".to_owned(), Type::Artist.as_str().to_owned()); let url = String::from("me/following"); let result = self.get(&url, &mut params).await?; self.convert_result::<CursorPageFullArtists>(&result) } ///[remove tracks users](https://developer.spotify.com/web-api/remove-tracks-user/) ///Remove one or more tracks from the current user's ///"Your Music" library. ///Parameters: ///- track_ids - a list of track URIs, URLs or IDs pub async fn current_user_saved_tracks_delete( &self, track_ids: &[String], ) -> Result<(), failure::Error> { let uris: Vec<String> = track_ids .iter() .map(|id| self.get_id(Type::Track, id)) .collect(); let url = format!("me/tracks/?ids={}", uris.join(",")); match self.delete(&url, &json!({})).await { Ok(_) => Ok(()), Err(e) => Err(e), } } ///[check users saved tracks](https://developer.spotify.com/web-api/check-users-saved-tracks/) ///Check if one or more tracks is already saved in ///the current Spotify user’s “Your Music” library. ///Parameters: ///- track_ids - a list of track URIs, URLs or IDs pub async fn current_user_saved_tracks_contains( &self, track_ids: &[String], ) -> Result<Vec<bool>, failure::Error> { let uris: Vec<String> = track_ids .iter() .map(|id| self.get_id(Type::Track, id)) .collect(); let url = format!("me/tracks/contains/?ids={}", uris.join(",")); let mut dumb = HashMap::new(); let result = self.get(&url, &mut dumb).await?; self.convert_result::<Vec<bool>>(&result) } ///[save tracks user ](https://developer.spotify.com/web-api/save-tracks-user/) ///Save one or more tracks to the current user's ///"Your Music" library. ///Parameters: ///- track_ids - a list of track URIs, URLs or IDs pub async fn current_user_saved_tracks_add( &self, track_ids: &[String], ) -> Result<(), failure::Error> { let uris: Vec<String> = track_ids .iter() .map(|id| self.get_id(Type::Track, id)) .collect(); let url = format!("me/tracks/?ids={}", uris.join(",")); match self.put(&url, &json!({})).await { Ok(_) => Ok(()), Err(e) => Err(e), } } ///[get users top artists and tracks](https://developer.spotify.com/web-api/get-users-top-artists-and-tracks/) ///Get the current user's top artists ///Parameters: ///- limit - the number of entities to return ///- offset - the index of the first entity to return ///- time_range - Over what time frame are the affinities computed pub async fn current_user_top_artists< L: Into<Option<u32>>, O: Into<Option<u32>>, T: Into<Option<TimeRange>>, >( &self, limit: L, offset: O, time_range: T, ) -> Result<Page<FullArtist>, failure::Error> { let limit = limit.into().unwrap_or(20); let offset = offset.into().unwrap_or(0); let time_range = time_range.into().unwrap_or(TimeRange::MediumTerm); let mut params = HashMap::new(); params.insert("limit".to_owned(), limit.to_string()); params.insert("offset".to_owned(), offset.to_string()); params.insert("time_range".to_owned(), time_range.as_str().to_owned()); let url = String::from("me/top/artists"); let result = self.get(&url, &mut params).await?; self.convert_result::<Page<FullArtist>>(&result) } ///[get users top artists and tracks](https://developer.spotify.com/web-api/get-users-top-artists-and-tracks/) ///Get the current user's top tracks ///Parameters: ///- limit - the number of entities to return ///- offset - the index of the first entity to return ///- time_range - Over what time frame are the affinities computed pub async fn current_user_top_tracks< L: Into<Option<u32>>, O: Into<Option<u32>>, T: Into<Option<TimeRange>>, >( &self, limit: L, offset: O, time_range: T, ) -> Result<Page<FullTrack>, failure::Error> { let limit = limit.into().unwrap_or(20); let offset = offset.into().unwrap_or(0); let time_range = time_range.into().unwrap_or(TimeRange::MediumTerm); let mut params = HashMap::new(); params.insert("limit".to_owned(), limit.to_string()); params.insert("offset".to_owned(), offset.to_string()); params.insert("time_range".to_owned(), time_range.as_str().to_owned()); let url = String::from("me/top/tracks"); let result = self.get(&url, &mut params).await?; self.convert_result::<Page<FullTrack>>(&result) } ///[get recently played](https://developer.spotify.com/web-api/web-api-personalization-endpoints/get-recently-played/) ///Get the current user's recently played tracks ///Parameters: ///- limit - the number of entities to return pub async fn current_user_recently_played<L: Into<Option<u32>>>( &self, limit: L, ) -> Result<CursorBasedPage<PlayHistory>, failure::Error> { let limit = limit.into().unwrap_or(50); let mut params = HashMap::new(); params.insert("limit".to_owned(), limit.to_string()); let url = String::from("me/player/recently-played"); let result = self.get(&url, &mut params).await?; self.convert_result::<CursorBasedPage<PlayHistory>>(&result) } ///[save albums user](https://developer.spotify.com/web-api/save-albums-user/) ///Add one or more albums to the current user's ///"Your Music" library. ///Parameters: ///- album_ids - a list of album URIs, URLs or IDs pub async fn current_user_saved_albums_add( &self, album_ids: &[String], ) -> Result<(), failure::Error> { let uris: Vec<String> = album_ids .iter() .map(|id| self.get_id(Type::Album, id)) .collect(); let url = format!("me/albums/?ids={}", uris.join(",")); match self.put(&url, &json!({})).await { Ok(_) => Ok(()), Err(e) => Err(e), } } ///[remove albums user](https://developer.spotify.com/documentation/web-api/reference/library/remove-albums-user/) ///Remove one or more albums from the current user's ///"Your Music" library. ///Parameters: ///- album_ids - a list of album URIs, URLs or IDs pub async fn current_user_saved_albums_delete( &self, album_ids: &[String], ) -> Result<(), failure::Error> { let uris: Vec<String> = album_ids .iter() .map(|id| self.get_id(Type::Album, id)) .collect(); let url = format!("me/albums/?ids={}", uris.join(",")); match self.delete(&url, &json!({})).await { Ok(_) => Ok(()), Err(e) => Err(e), } } ///[check users saved albums](https://developer.spotify.com/documentation/web-api/reference/library/check-users-saved-albums/) ///Check if one or more albums is already saved in ///the current Spotify user’s “Your Music” library. ///Parameters: ///- album_ids - a list of album URIs, URLs or IDs pub async fn current_user_saved_albums_contains( &self, album_ids: &[String], ) -> Result<Vec<bool>, failure::Error> { let uris: Vec<String> = album_ids .iter() .map(|id| self.get_id(Type::Album, id)) .collect(); let url = format!("me/albums/contains/?ids={}", uris.join(",")); let mut dumb = HashMap::new(); let result = self.get(&url, &mut dumb).await?; self.convert_result::<Vec<bool>>(&result) } ///[follow artists users](https://developer.spotify.com/web-api/follow-artists-users/) ///Follow one or more artists ///Parameters: ///- artist_ids - a list of artist IDs pub async fn user_follow_artists(&self, artist_ids: &[String]) -> Result<(), failure::Error> { let url = format!("me/following?type=artist&ids={}", artist_ids.join(",")); match self.put(&url, &json!({})).await { Ok(_) => Ok(()), Err(e) => Err(e), } } ///[unfollow artists users](https://developer.spotify.com/documentation/web-api/reference/follow/unfollow-artists-users/) ///Unfollow one or more artists ///Parameters: ///- artist_ids - a list of artist IDs pub async fn user_unfollow_artists(&self, artist_ids: &[String]) -> Result<(), failure::Error> { let url = format!("me/following?type=artist&ids={}", artist_ids.join(",")); match self.delete(&url, &json!({})).await { Ok(_) => Ok(()), Err(e) => Err(e), } } ///[check user following ///artists](https://developer.spotify.com/web-api/checkcurrent-user-follows/) ///Check to see if the given users are following the given artists ///Parameters: ///- artist_ids - the ids of the users that you want to pub async fn user_artist_check_follow( &self, artsit_ids: &[String], ) -> Result<Vec<bool>, failure::Error> { let url = format!( "me/following/contains?type=artist&ids={}", artsit_ids.join(",") ); let mut dumb = HashMap::new(); let result = self.get(&url, &mut dumb).await?; self.convert_result::<Vec<bool>>(&result) } ///[follow artists users](https://developer.spotify.com/web-api/follow-artists-users/) ///Follow one or more users ///Parameters: ///- user_ids - a list of artist IDs pub async fn user_follow_users(&self, user_ids: &[String]) -> Result<(), failure::Error> { let url = format!("me/following?type=user&ids={}", user_ids.join(",")); match self.put(&url, &json!({})).await { Ok(_) => Ok(()), Err(e) => Err(e), } } ///[unfollow artists users](https://developer.spotify.com/documentation/web-api/reference/follow/unfollow-artists-users/) ///Unfollow one or more users ///Parameters: ///- user_ids - a list of artist IDs pub async fn user_unfollow_users(&self, user_ids: &[String]) -> Result<(), failure::Error> { let url = format!("me/following?type=user&ids={}", user_ids.join(",")); match self.delete(&url, &json!({})).await { Ok(_) => Ok(()), Err(e) => Err(e), } } ///[get list featured playlists](https://developer.spotify.com/web-api/get-list-featured-playlists/) ///Get a list of Spotify featured playlists ///Parameters: ///- locale - The desired language, consisting of a lowercase ISO ///639 language code and an uppercase ISO 3166-1 alpha-2 country ///code, joined by an underscore. ///- country - An ISO 3166-1 alpha-2 country code. ///- timestamp - A timestamp in ISO 8601 format: ///yyyy-MM-ddTHH:mm:ss. Use this parameter to specify the user's ///local time to get results tailored for that specific date and ///time in the day ///- limit - The maximum number of items to return. Default: 20. ///Minimum: 1. Maximum: 50 ///- offset - The index of the first item to return. Default: 0 ///(the first object). Use with limit to get the next set of ///items. pub async fn featured_playlists<L: Into<Option<u32>>, O: Into<Option<u32>>>( &self, locale: Option<String>, country: Option<Country>, timestamp: Option<DateTime<Utc>>, limit: L, offset: O, ) -> Result<FeaturedPlaylists, failure::Error> { let mut params = HashMap::new(); let limit = limit.into().unwrap_or(20); let offset = offset.into().unwrap_or(0); if let Some(_locale) = locale { params.insert("locale".to_owned(), _locale); } if let Some(_country) = country { params.insert("country".to_owned(), _country.as_str().to_owned()); } if let Some(_timestamp) = timestamp { params.insert("timestamp".to_owned(), _timestamp.to_rfc3339()); } params.insert("limit".to_owned(), limit.to_string()); params.insert("offset".to_owned(), offset.to_string()); let url = String::from("browse/featured-playlists"); let result = self.get(&url, &mut params).await?; self.convert_result::<FeaturedPlaylists>(&result) } ///[get list new releases](https://developer.spotify.com/web-api/get-list-new-releases/) ///Get a list of new album releases featured in Spotify ///Parameters: ///- country - An ISO 3166-1 alpha-2 country code. ///- limit - The maximum number of items to return. Default: 20. ///Minimum: 1. Maximum: 50 ///- offset - The index of the first item to return. Default: 0 ///(the first object). Use with limit to get the next set of ///items. pub async fn new_releases<L: Into<Option<u32>>, O: Into<Option<u32>>>( &self, country: Option<Country>, limit: L, offset: O, ) -> Result<PageSimpliedAlbums, failure::Error> { let mut params = HashMap::new(); let limit = limit.into().unwrap_or(20); let offset = offset.into().unwrap_or(0); if let Some(_country) = country { params.insert("country".to_owned(), _country.as_str().to_owned()); } params.insert("limit".to_owned(), limit.to_string()); params.insert("offset".to_owned(), offset.to_string()); let url = String::from("browse/new-releases"); let result = self.get(&url, &mut params).await?; self.convert_result::<PageSimpliedAlbums>(&result) } ///[get list categories](https://developer.spotify.com/web-api/get-list-categories/) ///Get a list of new album releases featured in Spotify ///Parameters: ///- country - An ISO 3166-1 alpha-2 country code. ///- locale - The desired language, consisting of an ISO 639 ///language code and an ISO 3166-1 alpha-2 country code, joined ///by an underscore. ///- limit - The maximum number of items to return. Default: 20. ///Minimum: 1. Maximum: 50 ///- offset - The index of the first item to return. Default: 0 ///(the first object). Use with limit to get the next set of ///items. pub async fn categories<L: Into<Option<u32>>, O: Into<Option<u32>>>( &self, locale: Option<String>, country: Option<Country>, limit: L, offset: O, ) -> Result<PageCategory, failure::Error> { let mut params = HashMap::new(); let limit = limit.into().unwrap_or(20); let offset = offset.into().unwrap_or(0); if let Some(_locale) = locale { params.insert("locale".to_owned(), _locale); } if let Some(_country) = country { params.insert("country".to_owned(), _country.as_str().to_owned()); } params.insert("limit".to_owned(), limit.to_string()); params.insert("offset".to_owned(), offset.to_string()); let url = String::from("browse/categories"); let result = self.get(&url, &mut params).await?; self.convert_result::<PageCategory>(&result) } ///[get recommendtions](https://developer.spotify.com/web-api/get-recommendations/) ///Get Recommendations Based on Seeds /// Parameters: /// - seed_artists - a list of artist IDs, URIs or URLs /// - seed_tracks - a list of artist IDs, URIs or URLs /// - seed_genres - a list of genre names. Available genres for /// - country - An ISO 3166-1 alpha-2 country code. If provided, all /// results will be playable in this country. /// - limit - The maximum number of items to return. Default: 20. /// Minimum: 1. Maximum: 100 /// - min/max/target_<attribute> - For the tuneable track attributes listed /// in the documentation, these values provide filters and targeting on /// results. pub async fn recommendations<L: Into<Option<u32>>>( &self, seed_artists: Option<Vec<String>>, seed_genres: Option<Vec<String>>, seed_tracks: Option<Vec<String>>, limit: L, country: Option<Country>, payload: &Map<String, Value>, ) -> Result<Recommendations, failure::Error> { let mut params = HashMap::new(); let limit = limit.into().unwrap_or(20); params.insert("limit".to_owned(), limit.to_string()); if let Some(_seed_artists) = seed_artists { let seed_artists_ids: Vec<String> = _seed_artists .iter() .map(|id| self.get_id(Type::Artist, id)) .collect(); params.insert("seed_artists".to_owned(), seed_artists_ids.join(",")); } if let Some(_seed_genres) = seed_genres { params.insert("seed_genres".to_owned(), _seed_genres.join(",")); } if let Some(_seed_tracks) = seed_tracks { let seed_tracks_ids: Vec<String> = _seed_tracks .iter() .map(|id| self.get_id(Type::Track, id)) .collect(); params.insert("seed_tracks".to_owned(), seed_tracks_ids.join(",")); } if let Some(_country) = country { params.insert("market".to_owned(), _country.as_str().to_owned()); } let attributes = vec![ "acousticness", "danceability", "duration_ms", "energy", "instrumentalness", "key", "liveness", "loudness", "mode", "popularity", "speechiness", "tempo", "time_signature", "valence", ]; let prefixes = vec!["min_", "max_", "target_"]; for (attribute, prefix) in iproduct!(attributes, prefixes) { let param = prefix.to_owned() + attribute; if let Some(value) = payload.get(&param) { params.insert(param, value.to_string()); } } // for attribute in attributes { // for prefix in prefixes { // let param = prefix.to_owned() + attribute; // if let Some(value) = payload.get(&param) { // if let Some(value_str) = value.as_str() { // params.insert(&param, value_str.to_owned()); // } // } // } // } let url = String::from("recommendations"); let result = self.get(&url, &mut params).await?; self.convert_result::<Recommendations>(&result) } ///[get audio features](https://developer.spotify.com/web-api/get-audio-features/) ///Get audio features for a track ///- track - track URI, URL or ID pub async fn audio_features(&self, track: &str) -> Result<AudioFeatures, failure::Error> { let track_id = self.get_id(Type::Track, track); let url = format!("audio-features/{}", track_id); let mut dumb = HashMap::new(); let result = self.get(&url, &mut dumb).await?; self.convert_result::<AudioFeatures>(&result) } ///[get several audio features](https://developer.spotify.com/web-api/get-several-audio-features/) ///Get Audio Features for Several Tracks /// -tracks a list of track URIs, URLs or IDs pub async fn audios_features( &self, tracks: &[String], ) -> Result<Option<AudioFeaturesPayload>, failure::Error> { let ids: Vec<String> = tracks .iter() .map(|track| self.get_id(Type::Track, track)) .collect(); let url = format!("audio-features/?ids={}", ids.join(",")); let mut dumb = HashMap::new(); match self.get(&url, &mut dumb).await { Ok(result) => { if result.is_empty() { Ok(None) } else { self.convert_result::<Option<AudioFeaturesPayload>>(&result) } } Err(e) => Err(e), } } ///[get audio analysis](https://developer.spotify.com/web-api/get-audio-analysis/) ///Get Audio Analysis for a Track ///Parameters: ///- track_id - a track URI, URL or ID pub async fn audio_analysis(&self, track: &str) -> Result<AudioAnalysis, failure::Error> { let trid = self.get_id(Type::Track, track); let url = format!("audio-analysis/{}", trid); let mut dumb = HashMap::new(); let result = self.get(&url, &mut dumb).await?; self.convert_result::<AudioAnalysis>(&result) } ///[get a users available devices](https://developer.spotify.com/web-api/get-a-users-available-devices/) ///Get a User’s Available Devices pub async fn device(&self) -> Result<DevicePayload, failure::Error> { let url = String::from("me/player/devices"); let mut dumb = HashMap::new(); let result = self.get(&url, &mut dumb).await?; self.convert_result::<DevicePayload>(&result) } ///[get informatation about the users current playback](https://developer.spotify.com/web-api/get-information-about-the-users-current-playback/) ///Get Information About The User’s Current Playback /// Parameters: /// - market - an ISO 3166-1 alpha-2 country code. pub async fn current_playback( &self, market: Option<Country>, ) -> Result<Option<FullPlayingContext>, failure::Error> { let url = String::from("me/player"); let mut params = HashMap::new(); if let Some(_market) = market { params.insert("country".to_owned(), _market.as_str().to_owned()); } match self.get(&url, &mut params).await { Ok(result) => { if result.is_empty() { Ok(None) } else { self.convert_result::<Option<FullPlayingContext>>(&result) } } Err(e) => Err(e), } } ///[get the users currently playing track](https://developer.spotify.com/web-api/get-the-users-currently-playing-track/) /// Get the User’s Currently Playing Track /// Parameters: /// - market - an ISO 3166-1 alpha-2 country code. pub async fn current_playing( &self, market: Option<Country>, ) -> Result<Option<SimplifiedPlayingContext>, failure::Error> { let url = String::from("me/player/currently-playing"); let mut params = HashMap::new(); if let Some(_market) = market { params.insert("country".to_owned(), _market.as_str().to_owned()); } match self.get(&url, &mut params).await { Ok(result) => { if result.is_empty() { Ok(None) } else { self.convert_result::<Option<SimplifiedPlayingContext>>(&result) } } Err(e) => Err(e), } } ///[transfer a users playback](https://developer.spotify.com/web-api/transfer-a-users-playback/) ///Transfer a User’s Playback ///Note: Although an array is accepted, only a single device_id is currently /// supported. Supplying more than one will return 400 Bad Request /// Parameters: ///- device_id - transfer playback to this device ///- force_play - true: after transfer, play. false: ///keep current state. pub async fn transfer_playback<T: Into<Option<bool>>>( &self, device_id: &str, force_play: T, ) -> Result<(), failure::Error> { let device_ids = vec![device_id.to_owned()]; let force_play = force_play.into().unwrap_or(true); let mut payload = Map::new(); payload.insert("devie_ids".to_owned(), device_ids.into()); payload.insert("play".to_owned(), force_play.into()); let url = String::from("me/player"); match self.put(&url, &Value::Object(payload)).await { Ok(_) => Ok(()), Err(e) => Err(e), } } ///[start a users playback](https://developer.spotify.com/web-api/start-a-users-playback/) ///Start/Resume a User’s Playback ///Provide a `context_uri` to start playback or a album, ///artist, or playlist. /// ///Provide a `uris` list to start playback of one or more ///tracks. /// ///Provide `offset` as {"position": <int>} or {"uri": "<track uri>"} ///to start playback at a particular offset. /// ///Parameters: ///- device_id - device target for playback ///- context_uri - spotify context uri to play ///- uris - spotify track uris ///- offset - offset into context by index or track ///- position_ms - Indicates from what position to start playback. pub async fn start_playback( &self, device_id: Option<String>, context_uri: Option<String>, uris: Option<Vec<String>>, offset: Option<super::model::offset::Offset>, position_ms: Option<u32>, ) -> Result<(), failure::Error> { if context_uri.is_some() && uris.is_some() { error!("specify either contexxt uri or uris, not both"); } let mut params = Map::new(); if let Some(_context_uri) = context_uri { params.insert("context_uri".to_owned(), _context_uri.into()); } if let Some(_uris) = uris { params.insert("uris".to_owned(), _uris.into()); } if let Some(_offset) = offset { if let Some(_position) = _offset.position { let mut offset_map = Map::new(); offset_map.insert("position".to_owned(), _position.into()); params.insert("offset".to_owned(), offset_map.into()); } else if let Some(_uri) = _offset.uri { let mut offset_map = Map::new(); offset_map.insert("uri".to_owned(), _uri.into()); params.insert("offset".to_owned(), offset_map.into()); } } if let Some(_position_ms) = position_ms { params.insert("position_ms".to_owned(), _position_ms.into()); }; let url = self.append_device_id("me/player/play", device_id); match self.put(&url, &Value::Object(params)).await { Ok(_) => Ok(()), Err(e) => Err(e), } } ///[pause a users playback](https://developer.spotify.com/web-api/pause-a-users-playback/) ///Pause a User’s Playback ///Parameters: ///- device_id - device target for playback pub async fn pause_playback(&self, device_id: Option<String>) -> Result<(), failure::Error> { let url = self.append_device_id("me/player/pause", device_id); match self.put(&url, &json!({})).await { Ok(_) => Ok(()), Err(e) => Err(e), } } ///[skip users playback to the next track](https://developer.spotify.com/web-api/skip-users-playback-to-next-track/) ///Skip User’s Playback To Next Track /// Parameters: /// - device_id - device target for playback pub async fn next_track(&self, device_id: Option<String>) -> Result<(), failure::Error> { let url = self.append_device_id("me/player/next", device_id); match self.post(&url, &json!({})).await { Ok(_) => Ok(()), Err(e) => Err(e), } } ///[skip users playback to previous track](https://developer.spotify.com/web-api/skip-users-playback-to-previous-track/) ///Skip User’s Playback To Previous Track /// Parameters: /// - device_id - device target for playback pub async fn previous_track(&self, device_id: Option<String>) -> Result<(), failure::Error> { let url = self.append_device_id("me/player/previous", device_id); match self.post(&url, &json!({})).await { Ok(_) => Ok(()), Err(e) => Err(e), } } ///[seek-to-position-in-currently-playing-track/](https://developer.spotify.com/web-api/seek-to-position-in-currently-playing-track/) ///Seek To Position In Currently Playing Track /// Parameters: /// - position_ms - position in milliseconds to seek to /// - device_id - device target for playback pub async fn seek_track( &self, position_ms: u32, device_id: Option<String>, ) -> Result<(), failure::Error> { let url = self.append_device_id( &format!("me/player/seek?position_ms={}", position_ms), device_id, ); match self.put(&url, &json!({})).await { Ok(_) => Ok(()), Err(e) => Err(e), } } ///[set repeat mode on users playback](https://developer.spotify.com/web-api/set-repeat-mode-on-users-playback/) ///Set Repeat Mode On User’s Playback /// Parameters: /// - state - `track`, `context`, or `off` /// - device_id - device target for playback pub async fn repeat( &self, state: RepeatState, device_id: Option<String>, ) -> Result<(), failure::Error> { let url = self.append_device_id( &format!("me/player/repeat?state={}", state.as_str()), device_id, ); match self.put(&url, &json!({})).await { Ok(_) => Ok(()), Err(e) => Err(e), } } ///[set-volume-for-users-playback](https://developer.spotify.com/web-api/set-volume-for-users-playback/) ///Set Volume For User’s Playback /// Parameters: ///- volume_percent - volume between 0 and 100 ///- device_id - device target for playback pub async fn volume( &self, volume_percent: u8, device_id: Option<String>, ) -> Result<(), failure::Error> { if volume_percent > 100u8 { error!("volume must be between 0 and 100, inclusive"); } let url = self.append_device_id( &format!("me/player/volume?volume_percent={}", volume_percent), device_id, ); match self.put(&url, &json!({})).await { Ok(_) => Ok(()), Err(e) => Err(e), } } ///[toggle shuffle for user playback](https://developer.spotify.com/web-api/toggle-shuffle-for-users-playback/) ///Toggle Shuffle For User’s Playback /// Parameters: /// - state - true or false /// - device_id - device target for playback pub async fn shuffle( &self, state: bool, device_id: Option<String>, ) -> Result<(), failure::Error> { let url = self.append_device_id(&format!("me/player/shuffle?state={}", state), device_id); match self.put(&url, &json!({})).await { Ok(_) => Ok(()), Err(e) => Err(e), } } pub fn convert_result<'a, T: Deserialize<'a>>( &self, input: &'a str, ) -> Result<T, failure::Error> { let result = serde_json::from_str::<T>(input).map_err(|e| { format_err!( "convert result failed, reason: {:?}; content: [{:?}]", e, input ) })?; Ok(result) } ///Append device ID to API path. fn append_device_id(&self, path: &str, device_id: Option<String>) -> String { let mut new_path = path.to_string(); if let Some(_device_id) = device_id { if path.contains('?') { new_path.push_str(&format!("&device_id={}", _device_id)); } else { new_path.push_str(&format!("?device_id={}", _device_id)); } } new_path } fn get_uri(&self, _type: Type, _id: &str) -> String { let mut uri = String::from("spotify:"); uri.push_str(_type.as_str()); uri.push(':'); uri.push_str(&self.get_id(_type, _id)); uri } /// get spotify id by type and id fn get_id(&self, _type: Type, id: &str) -> String { let mut _id = id.to_owned(); let fields: Vec<&str> = _id.split(':').collect(); let len = fields.len(); if len >= 3 { if _type.as_str() != fields[len - 2] { error!( "expected id of type {:?} but found type {:?} {:?}", _type, fields[len - 2], _id ); } else { return fields[len - 1].to_owned(); } } let sfields: Vec<&str> = _id.split('/').collect(); let len: usize = sfields.len(); if len >= 3 { if _type.as_str() != sfields[len - 2] { error!( "expected id of type {:?} but found type {:?} {:?}", _type, sfields[len - 2], _id ); } else { return sfields[len - 1].to_owned(); } } _id.to_owned() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_get_id() { // assert artist let spotify = Spotify::default().access_token("test-access").build(); let mut artist_id = String::from("spotify:artist:2WX2uTcsvV5OnS0inACecP"); let id = spotify.get_id(Type::Artist, &mut artist_id); assert_eq!("2WX2uTcsvV5OnS0inACecP", &id); // assert album let mut artist_id_a = String::from("spotify/album/2WX2uTcsvV5OnS0inACecP"); assert_eq!( "2WX2uTcsvV5OnS0inACecP", &spotify.get_id(Type::Album, &mut artist_id_a) ); // mismatch type let mut artist_id_b = String::from("spotify:album:2WX2uTcsvV5OnS0inACecP"); assert_eq!( "spotify:album:2WX2uTcsvV5OnS0inACecP", &spotify.get_id(Type::Artist, &mut artist_id_b) ); // could not split let mut artist_id_c = String::from("spotify-album-2WX2uTcsvV5OnS0inACecP"); assert_eq!( "spotify-album-2WX2uTcsvV5OnS0inACecP", &spotify.get_id(Type::Artist, &mut artist_id_c) ); let mut playlist_id = String::from("spotify:playlist:59ZbFPES4DQwEjBpWHzrtC"); assert_eq!( "59ZbFPES4DQwEjBpWHzrtC", &spotify.get_id(Type::Playlist, &mut playlist_id) ); } #[test] fn test_get_uri() { let spotify = Spotify::default().access_token("test-access").build(); let track_id1 = "spotify:track:4iV5W9uYEdYUVa79Axb7Rh"; let track_id2 = "1301WleyT98MSxVHPZCA6M"; let uri1 = spotify.get_uri(Type::Track, track_id1); let uri2 = spotify.get_uri(Type::Track, track_id2); assert_eq!(track_id1, uri1); assert_eq!("spotify:track:1301WleyT98MSxVHPZCA6M", &uri2); } }
40.840084
149
0.583742
c17cf62ed46c63c0014b783a0c86f15bdfc865e8
38,872
// Copyright 2018 foundationdb-rs developers, https://github.com/bluejekyll/foundationdb-rs/graphs/contributors // Copyright 2013-2018 Apple, Inc and the FoundationDB project authors. // // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. //! Implementations of the FDBTransaction C API //! //! https://apple.github.io/foundationdb/api-c.html#transaction use foundationdb_sys as fdb; use futures::{ready, FutureExt, Stream}; use std::{ self, fmt, future::Future, pin::Pin, result::Result as StdResult, sync::Arc, task::{self, Poll}, }; use crate::{ database::*, error::{self, *}, future::*, keyselector::*, options, subspace::Subspace, tuple::Encode, }; /// In FoundationDB, a transaction is a mutable snapshot of a database. /// /// All read and write operations on a transaction see and modify an otherwise-unchanging version of the database and only change the underlying database if and when the transaction is committed. Read operations do see the effects of previous write operations on the same transaction. Committing a transaction usually succeeds in the absence of conflicts. /// /// Applications must provide error handling and an appropriate retry loop around the application code for a transaction. See the documentation for [fdb_transaction_on_error()](https://apple.github.io/foundationdb/api-c.html#transaction). /// /// Transactions group operations into a unit with the properties of atomicity, isolation, and durability. Transactions also provide the ability to maintain an application’s invariants or integrity constraints, supporting the property of consistency. Together these properties are known as ACID. /// /// Transactions are also causally consistent: once a transaction has been successfully committed, all subsequently created transactions will see the modifications made by it. #[derive(Clone)] pub struct Transaction { // Order of fields should not be changed, because Rust drops field top-to-bottom, and // transaction should be dropped before cluster. inner: Arc<TransactionInner>, database: Database, } impl std::fmt::Debug for Transaction { fn fmt(&self, f: &mut fmt::Formatter) -> StdResult<(), fmt::Error> { f.write_str("Transaction")?; Ok(()) } } /// Converts Rust `bool` into `fdb::fdb_bool_t` fn fdb_bool(v: bool) -> fdb::fdb_bool_t { if v { 1 } else { 0 } } /// Foundationdb API uses `c_int` type as a length of a value, while Rust uses `usize` for. Rust /// inteface uses `usize` if it represents length or size of something. Those `usize` values should /// be converted to `c_int` before passed to ffi, because naive casting with `v as i32` will /// convert some `usize` values to unsigned one. /// TODO: check if inverse function is needed, `cint_to_usize(v: c_int) -> usize`? fn usize_trunc(v: usize) -> std::os::raw::c_int { if v > std::i32::MAX as usize { std::i32::MAX } else { v as i32 } } /// `RangeOption` represents a query parameters for range scan query. #[derive(Debug, Clone)] pub struct RangeOption { begin: KeySelector, end: KeySelector, limit: Option<usize>, target_bytes: usize, mode: options::StreamingMode, //TODO: move snapshot out from `RangeOption`, as other methods like `Transaction::get` do? snapshot: bool, reverse: bool, } impl<'a> Default for RangeOption { fn default() -> Self { Self { begin: KeySelector::first_greater_or_equal(&[]).to_owned(), end: KeySelector::first_greater_or_equal(&[]).to_owned(), limit: None, target_bytes: 0, mode: options::StreamingMode::Iterator, snapshot: false, reverse: false, } } } /// A Builder with which options need to used for a range query. pub struct RangeOptionBuilder(RangeOption); impl<T: Encode> From<T> for RangeOptionBuilder { fn from(t: T) -> Self { let (begin, end) = Subspace::from(t).range(); Self::new( KeySelector::first_greater_or_equal(&begin), KeySelector::first_greater_or_equal(&end), ) } } impl RangeOptionBuilder { /// Creates new builder with given key selectors. pub fn new(begin: KeySelector, end: KeySelector) -> Self { let mut opt = RangeOption::default(); opt.begin = begin.to_owned(); opt.end = end.to_owned(); RangeOptionBuilder(opt) } /// If non-zero, indicates the maximum number of key-value pairs to return. pub fn limit(mut self, limit: usize) -> Self { if limit > 0 { self.0.limit = Some(limit); } self } /// If non-zero, indicates a (soft) cap on the combined number of bytes of keys and values to /// return for each item. pub fn target_bytes(mut self, target_bytes: usize) -> Self { self.0.target_bytes = target_bytes; self } /// One of the options::StreamingMode values indicating how the caller would like the data in /// the range returned. pub fn mode(mut self, mode: options::StreamingMode) -> Self { self.0.mode = mode; self } /// Non-zero if this is a snapshot read. pub fn snapshot(mut self, snapshot: bool) -> Self { self.0.snapshot = snapshot; self } /// If non-zero, key-value pairs will be returned in reverse lexicographical order beginning at /// the end of the range. pub fn reverse(mut self, reverse: bool) -> Self { self.0.reverse = reverse; self } /// Finalizes the construction of the RangeOption pub fn build(self) -> RangeOption { self.0 } } // TODO: many implementations left impl Transaction { pub(crate) fn new(database: Database, trx: *mut fdb::FDBTransaction) -> Self { let inner = Arc::new(TransactionInner::new(trx)); Self { database, inner } } /// Called to set an option on an FDBTransaction. pub fn set_option(&self, opt: options::TransactionOption) -> Result<()> { unsafe { opt.apply(self.inner.inner) } } /// Returns a clone of this transactions Database pub fn database(&self) -> Database { self.database.clone() } fn into_database(self) -> Database { self.database } /// Modify the database snapshot represented by transaction to change the given key to have the given value. /// /// If the given key was not previously present in the database it is inserted. The modification affects the actual database only if transaction is later committed with `Transaction::commit`. /// /// # Arguments /// /// * `key_name` - the name of the key to be inserted into the database. /// * `value` - the value to be inserted into the database pub fn set(&self, key: &[u8], value: &[u8]) { let trx = self.inner.inner; unsafe { fdb::fdb_transaction_set( trx, key.as_ptr(), key.len() as i32, value.as_ptr(), value.len() as i32, ) } } /// Modify the database snapshot represented by transaction to remove the given key from the database. /// /// If the key was not previously present in the database, there is no effect. The modification affects the actual database only if transaction is later committed with `Transaction::commit`. /// /// # Arguments /// /// * `key_name` - the name of the key to be removed from the database. pub fn clear(&self, key: &[u8]) { let trx = self.inner.inner; unsafe { fdb::fdb_transaction_clear(trx, key.as_ptr(), key.len() as i32) } } /// Reads a value from the database snapshot represented by transaction. /// /// Returns an FDBFuture which will be set to the value of key_name in the database. You must first wait for the FDBFuture to be ready, check for errors, call fdb_future_get_value() to extract the value, and then destroy the FDBFuture with fdb_future_destroy(). /// /// See `FdbFutureResult::value` to see exactly how results are unpacked. If key_name is not present in the database, the result is not an error, but a zero for *out_present returned from that function. /// /// # Arguments /// /// * `key_name` - the name of the key to be looked up in the database /// /// TODO: implement: snapshot Non-zero if this is a snapshot read. pub fn get(&self, key: &[u8], snapshot: bool) -> TrxGet { let trx = self.inner.inner; let f = unsafe { fdb::fdb_transaction_get( trx, key.as_ptr() as *const _, key.len() as i32, fdb_bool(snapshot), ) }; TrxGet { inner: self.new_fut_trx(f), } } /// Modify the database snapshot represented by transaction to perform the operation indicated /// by operationType with operand param to the value stored by the given key. /// /// An atomic operation is a single database command that carries out several logical steps: /// reading the value of a key, performing a transformation on that value, and writing the /// result. Different atomic operations perform different transformations. Like other database /// operations, an atomic operation is used within a transaction; however, its use within a /// transaction will not cause the transaction to conflict. /// /// Atomic operations do not expose the current value of the key to the client but simply send /// the database the transformation to apply. In regard to conflict checking, an atomic /// operation is equivalent to a write without a read. It can only cause other transactions /// performing reads of the key to conflict. /// /// By combining these logical steps into a single, read-free operation, FoundationDB can /// guarantee that the transaction will not conflict due to the operation. This makes atomic /// operations ideal for operating on keys that are frequently modified. A common example is /// the use of a key-value pair as a counter. pub fn atomic_op(&self, key: &[u8], param: &[u8], op_type: options::MutationType) { let trx = self.inner.inner; unsafe { fdb::fdb_transaction_atomic_op( trx, key.as_ptr() as *const _, key.len() as i32, param.as_ptr() as *const _, param.len() as i32, op_type.code(), ) } } /// Resolves a key selector against the keys in the database snapshot represented by /// transaction. /// /// Returns an FDBFuture which will be set to the key in the database matching the key /// selector. You must first wait for the FDBFuture to be ready, check for errors, call /// fdb_future_get_key() to extract the key, and then destroy the FDBFuture with /// fdb_future_destroy(). pub fn get_key(&self, selector: KeySelector, snapshot: bool) -> TrxGetKey { let trx = self.inner.inner; let key = selector.key(); let f = unsafe { fdb::fdb_transaction_get_key( trx, key.as_ptr() as *const _, key.len() as i32, fdb_bool(selector.or_equal()), selector.offset() as i32, fdb_bool(snapshot), ) }; TrxGetKey { inner: self.new_fut_trx(f), } } /// pub fn get_ranges(&self, opt: RangeOption) -> RangeStream { let iteration = 1; let inner = self.get_range(opt, iteration); RangeStream { iteration, trx: self.clone(), inner: Some(inner), } } /// Reads all key-value pairs in the database snapshot represented by transaction (potentially /// limited by limit, target_bytes, or mode) which have a key lexicographically greater than or /// equal to the key resolved by the begin key selector and lexicographically less than the key /// resolved by the end key selector. pub fn get_range(&self, opt: RangeOption, iteration: usize) -> TrxGetRange { let trx = self.inner.inner; let f = unsafe { let begin = &opt.begin; let end = &opt.end; let key_begin = begin.key(); let key_end = end.key(); fdb::fdb_transaction_get_range( trx, key_begin.as_ptr() as *const _, key_begin.len() as i32, fdb_bool(begin.or_equal()), begin.offset() as i32, key_end.as_ptr() as *const _, key_end.len() as i32, fdb_bool(end.or_equal()), end.offset() as i32, usize_trunc(opt.limit.unwrap_or(0)), usize_trunc(opt.target_bytes), opt.mode.code(), iteration as i32, fdb_bool(opt.snapshot), fdb_bool(opt.reverse), ) }; TrxGetRange { inner: self.new_fut_trx(f), opt: Some(opt), } } /// Modify the database snapshot represented by transaction to remove all keys (if any) which /// are lexicographically greater than or equal to the given begin key and lexicographically /// less than the given end_key. /// /// The modification affects the actual database only if transaction is later committed with /// `Tranasction::commit`. pub fn clear_range(&self, begin: &[u8], end: &[u8]) { let trx = self.inner.inner; unsafe { fdb::fdb_transaction_clear_range( trx, begin.as_ptr() as *const _, begin.len() as i32, end.as_ptr() as *const _, end.len() as i32, ) } } /// Clears all keys based on the range of the Subspace pub fn clear_subspace_range<S: Into<Subspace>>(&self, subspace: S) { let subspace = subspace.into(); let range = subspace.range(); self.clear_range(&range.0, &range.1) } /// Attempts to commit the sets and clears previously applied to the database snapshot represented by transaction to the actual database. /// /// The commit may or may not succeed – in particular, if a conflicting transaction previously committed, then the commit must fail in order to preserve transactional isolation. If the commit does succeed, the transaction is durably committed to the database and all subsequently started transactions will observe its effects. /// /// It is not necessary to commit a read-only transaction – you can simply call fdb_transaction_destroy(). /// /// Returns an `TrxCommit` representing an empty value. /// /// Callers will usually want to retry a transaction if the commit or a prior fdb_transaction_get_*() returns a retryable error (see fdb_transaction_on_error()). /// /// As with other client/server databases, in some failure scenarios a client may be unable to determine whether a transaction succeeded. In these cases, `Transaction::commit` will return a commit_unknown_result error. The fdb_transaction_on_error() function treats this error as retryable, so retry loops that don’t check for commit_unknown_result could execute the transaction twice. In these cases, you must consider the idempotence of the transaction. For more information, see Transactions with unknown results. /// /// Normally, commit will wait for outstanding reads to return. However, if those reads were snapshot reads or the transaction option for disabling “read-your-writes” has been invoked, any outstanding reads will immediately return errors. pub async fn commit(self) -> Result<Transaction> { let trx = self.inner.inner; let f = unsafe { fdb::fdb_transaction_commit(trx) }; let (trx, _) = self.new_fut_trx(f).await?; Ok(trx) } /// Cancels the transaction. All pending or future uses of the transaction will return a /// transaction_cancelled error. The transaction can be used again after it is reset. /// /// # Warning /// /// * Be careful if you are using fdb_transaction_reset() and fdb_transaction_cancel() /// concurrently with the same transaction. Since they negate each other’s effects, a race /// condition between these calls will leave the transaction in an unknown state. /// /// * If your program attempts to cancel a transaction after fdb_transaction_commit() has been /// called but before it returns, unpredictable behavior will result. While it is guaranteed /// that the transaction will eventually end up in a cancelled state, the commit may or may not /// occur. Moreover, even if the call to fdb_transaction_commit() appears to return a /// transaction_cancelled error, the commit may have occurred or may occur in the future. This /// can make it more difficult to reason about the order in which transactions occur. pub fn cancel(self) { let trx = self.inner.inner; unsafe { fdb::fdb_transaction_cancel(trx) } } /// Retrieves the database version number at which a given transaction was committed. /// fdb_transaction_commit() must have been called on transaction and the resulting future must /// be ready and not an error before this function is called, or the behavior is undefined. /// Read-only transactions do not modify the database when committed and will have a committed /// version of -1. Keep in mind that a transaction which reads keys and then sets them to their /// current values may be optimized to a read-only transaction. /// /// Note that database versions are not necessarily unique to a given transaction and so cannot /// be used to determine in what order two transactions completed. The only use for this /// function is to manually enforce causal consistency when calling /// fdb_transaction_set_read_version() on another subsequent transaction. /// /// Most applications will not call this function. pub fn committed_version(&self) -> Result<i64> { let trx = self.inner.inner; let mut version: i64 = 0; let e = unsafe { fdb::fdb_transaction_get_committed_version(trx, &mut version as *mut _) }; error::eval(e)?; Ok(version) } /// Returns a list of public network addresses as strings, one for each of the storage servers /// responsible for storing key_name and its associated value. /// /// Returns an FDBFuture which will be set to an array of strings. You must first wait for the /// FDBFuture to be ready, check for errors, call fdb_future_get_string_array() to extract the /// string array, and then destroy the FDBFuture with fdb_future_destroy(). pub async fn get_addresses_for_key(&self, key: &[u8]) -> Result<GetAddressResult> { let trx = self.inner.inner; let f = unsafe { fdb::fdb_transaction_get_addresses_for_key( trx, key.as_ptr() as *const _, key.len() as i32, ) }; let (trx, inner) = self.new_fut_trx(f).await?; Ok(GetAddressResult { trx, inner }) } /// A watch’s behavior is relative to the transaction that created it. A watch will report a /// change in relation to the key’s value as readable by that transaction. The initial value /// used for comparison is either that of the transaction’s read version or the value as /// modified by the transaction itself prior to the creation of the watch. If the value changes /// and then changes back to its initial value, the watch might not report the change. /// /// Until the transaction that created it has been committed, a watch will not report changes /// made by other transactions. In contrast, a watch will immediately report changes made by /// the transaction itself. Watches cannot be created if the transaction has set the /// READ_YOUR_WRITES_DISABLE transaction option, and an attempt to do so will return an /// watches_disabled error. /// /// If the transaction used to create a watch encounters an error during commit, then the watch /// will be set with that error. A transaction whose commit result is unknown will set all of /// its watches with the commit_unknown_result error. If an uncommitted transaction is reset or /// destroyed, then any watches it created will be set with the transaction_cancelled error. /// /// Returns an FDBFuture representing an empty value that will be set once the watch has /// detected a change to the value at the specified key. You must first wait for the FDBFuture /// to be ready, check for errors, and then destroy the FDBFuture with fdb_future_destroy(). /// /// By default, each database connection can have no more than 10,000 watches that have not yet /// reported a change. When this number is exceeded, an attempt to create a watch will return a /// too_many_watches error. This limit can be changed using the MAX_WATCHES database option. /// Because a watch outlives the transaction that creates it, any watch that is no longer /// needed should be cancelled by calling fdb_future_cancel() on its returned future. pub fn watch(&self, key: &[u8]) -> impl Future<Output = Result<()>> { let trx = self.inner.inner; let f = unsafe { fdb::fdb_transaction_watch(trx, key.as_ptr() as *const _, key.len() as i32) }; let f = self.new_fut_non_trx(f); WatchFuture::new(f) } /// Returns an FDBFuture which will be set to the versionstamp which was used by any /// versionstamp operations in this transaction. You must first wait for the FDBFuture to be /// ready, check for errors, call fdb_future_get_key() to extract the key, and then destroy the /// FDBFuture with fdb_future_destroy(). /// /// The future will be ready only after the successful completion of a call to /// fdb_transaction_commit() on this Transaction. Read-only transactions do not modify the /// database when committed and will result in the future completing with an error. Keep in /// mind that a transaction which reads keys and then sets them to their current values may be /// optimized to a read-only transaction. /// /// Most applications will not call this function. pub fn get_versionstamp(&self) -> impl Future<Output = Result<Versionstamp>> { let trx = self.inner.inner; let f = unsafe { fdb::fdb_transaction_get_versionstamp(trx) }; VersionstampFuture::new(self.new_fut_non_trx(f)) } /// The transaction obtains a snapshot read version automatically at the time of the first call /// to fdb_transaction_get_*() (including this one) and (unless causal consistency has been /// deliberately compromised by transaction options) is guaranteed to represent all /// transactions which were reported committed before that call. pub async fn get_read_version(&self) -> Result<i64> { let trx = self.inner.inner; let f = unsafe { fdb::fdb_transaction_get_read_version(trx) }; let (_, res) = self.new_fut_trx(f).await?; res.get_version() } /// Sets the snapshot read version used by a transaction. This is not needed in simple cases. /// If the given version is too old, subsequent reads will fail with error_code_past_version; /// if it is too new, subsequent reads may be delayed indefinitely and/or fail with /// error_code_future_version. If any of fdb_transaction_get_*() have been called on this /// transaction already, the result is undefined. pub fn set_read_version(&self, version: i64) { let trx = self.inner.inner; unsafe { fdb::fdb_transaction_set_read_version(trx, version) } } /// Reset transaction to its initial state. This is similar to calling /// fdb_transaction_destroy() followed by fdb_database_create_transaction(). It is not /// necessary to call fdb_transaction_reset() when handling an error with /// fdb_transaction_on_error() since the transaction has already been reset. /// /// # Warning /// /// The API is exposed mainly for `bindingtester`, and it is not recommended to call the API /// directly from application. #[doc(hidden)] pub fn reset(&self) { let trx = self.inner.inner; unsafe { fdb::fdb_transaction_reset(trx) } } /// Implements the recommended retry and backoff behavior for a transaction. This function /// knows which of the error codes generated by other fdb_transaction_*() functions represent /// temporary error conditions and which represent application errors that should be handled by /// the application. It also implements an exponential backoff strategy to avoid swamping the /// database cluster with excessive retries when there is a high level of conflict between /// transactions. /// /// # Warning /// /// The API is exposed mainly for `bindingtester`, and it is not recommended to call the API /// directly from application. Use `Database::transact` instead. #[doc(hidden)] pub fn on_error(&self, error: Error) -> TrxErrFuture { TrxErrFuture::new(self.clone(), error) } /// Adds a conflict range to a transaction without performing the associated read or write. /// /// # Note /// /// Most applications will use the serializable isolation that transactions provide by default /// and will not need to manipulate conflict ranges. pub fn add_conflict_range( &self, begin: &[u8], end: &[u8], ty: options::ConflictRangeType, ) -> Result<()> { let trx = self.inner.inner; unsafe { eval(fdb::fdb_transaction_add_conflict_range( trx, begin.as_ptr() as *const _, begin.len() as i32, end.as_ptr() as *const _, end.len() as i32, ty.code(), )) } } fn new_fut_trx(&self, f: *mut fdb::FDBFuture) -> TrxFuture { TrxFuture::new(self.clone(), f) } fn new_fut_non_trx(&self, f: *mut fdb::FDBFuture) -> NonTrxFuture { NonTrxFuture::new(self.database(), f) } } struct TransactionInner { inner: *mut fdb::FDBTransaction, } impl TransactionInner { fn new(inner: *mut fdb::FDBTransaction) -> Self { Self { inner } } } impl Drop for TransactionInner { fn drop(&mut self) { unsafe { fdb::fdb_transaction_destroy(self.inner); } } } unsafe impl Send for TransactionInner {} unsafe impl Sync for TransactionInner {} /// Represents the data of a `Transaction::get` pub struct GetResult { trx: Transaction, inner: FdbFutureResult, } impl GetResult { /// Returns a clone of the Transaction this get is a part of pub fn transaction(&self) -> Transaction { self.trx.clone() } /// Returns the values associated with this get pub fn value(&self) -> Option<&[u8]> { self.inner .get_value() .expect("inner should resolve into value") } } impl std::fmt::Debug for GetResult { fn fmt(&self, f: &mut fmt::Formatter) -> StdResult<(), fmt::Error> { f.write_str("GetResult")?; Ok(()) } } /// A future results of a get operation pub struct TrxGet { inner: TrxFuture, } impl Future for TrxGet { type Output = error::Result<GetResult>; fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context) -> Poll<Self::Output> { let (trx, inner) = ready!(self.inner.poll_unpin(cx))?; inner.get_value()?; Poll::Ready(Ok(GetResult { trx, inner })) } } /// Represents the data of a `Transaction::get_key` pub struct GetKeyResult { trx: Transaction, inner: FdbFutureResult, } impl GetKeyResult { /// Returns a clone of the Transaction this get is a part of pub fn transaction(&self) -> Transaction { self.trx.clone() } /// Returns the values associated with this get pub fn value(&self) -> &[u8] { self.inner.get_key().expect("inner should resolve into key") } } /// A future results of a `get_key` operation pub struct TrxGetKey { inner: TrxFuture, } impl Future for TrxGetKey { type Output = Result<GetKeyResult>; fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context) -> Poll<Self::Output> { let (trx, inner) = ready!(self.inner.poll_unpin(cx))?; inner.get_key()?; Poll::Ready(Ok(GetKeyResult { trx, inner })) } } /// Represents the data of a `Transaction::get_range`. The result might not contains all results /// specified by `Transaction::get_range`. A caller can test if the result is complete by either /// checking `GetRangeResult::key_values().more()` is `true`, or checking `GetRangeResult::next` is /// not `None`. /// If a caller wants to fetch all matching results, they should call `Transcation::get_range` with /// following `RangeOption` returned by `GetRangeResult::next`. The caller might want to use /// `Transaction::get_ranges` which will fetch all results until it finishes. pub struct GetRangeResult { trx: Transaction, opt: RangeOption, // This future should always resolves to keyvalue array. inner: FdbFutureResult, } impl GetRangeResult { /// Returns a clone of the Transaction this get is a part of pub fn transaction(&self) -> Transaction { self.trx.clone() } /// Returns the values associated with this get pub fn key_values(&self) -> KeyValues<'_> { self.inner .get_keyvalue_array() .expect("inner should resolve into keyvalue array") } /// Returns `None` if all results are returned, and returns `Some(_)` if there are more results /// to fetch. In this case, user can fetch remaining results by calling /// `Transaction::get_range` with returned `RangeOption`. pub fn next(&self) -> Option<RangeOption> { let kva = self.key_values(); if !kva.more() { return None; } let slice = kva.as_ref(); if slice.is_empty() { return None; } let last = slice.last().unwrap(); let last_key = last.key(); let mut opt = self.opt.clone(); if let Some(limit) = opt.limit.as_mut() { *limit -= slice.len(); if *limit == 0 { return None; } } if opt.reverse { opt.end = KeySelector::first_greater_or_equal(last_key).to_owned(); } else { opt.begin = KeySelector::first_greater_than(last_key).to_owned(); } Some(opt) } } /// A future results of a `get_range` operation pub struct TrxGetRange { inner: TrxFuture, opt: Option<RangeOption>, } impl Future for TrxGetRange { type Output = Result<GetRangeResult>; fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context) -> Poll<Self::Output> { let (trx, inner) = ready!(self.inner.poll_unpin(cx))?; inner.get_keyvalue_array()?; let opt = self.opt.take().expect("should not poll after ready"); Poll::Ready(Ok(GetRangeResult { trx, inner, opt })) } } //TODO: proper naming /// `RangeStream` represents a stream of `GetRangeResult` pub struct RangeStream { iteration: usize, trx: Transaction, inner: Option<TrxGetRange>, } impl RangeStream { fn update_inner(&mut self, opt: RangeOption) { self.iteration += 1; self.inner = Some(self.trx.get_range(opt, self.iteration)); } fn advance(&mut self, res: &GetRangeResult) { if let Some(opt) = res.next() { self.update_inner(opt) } } } impl From<(RangeOption, Error)> for error::Error { fn from(error: (RangeOption, Error)) -> Self { error.1 } } impl<'a> Stream for RangeStream { type Item = std::result::Result<GetRangeResult, (RangeOption, Error)>; fn poll_next(self: Pin<&mut Self>, cx: &mut task::Context) -> Poll<Option<Self::Item>> { if self.inner.is_none() { return Poll::Ready(None); } let stream = self.get_mut(); let mut inner = stream.inner.take().unwrap(); match Pin::new(&mut inner).poll(cx) { Poll::Pending => { stream.inner = Some(inner); Poll::Pending } Poll::Ready(Ok(res)) => { stream.advance(&res); Poll::Ready(Some(Ok(res))) } Poll::Ready(Err(e)) => { // `inner.opt == None` after it resolves, so `inner.opt.unwrap()` should not fail. Poll::Ready(Some(Err((inner.opt.unwrap(), e)))) } } } } /// Represents the data of a `Transaction::get_addresses_for_key` pub struct GetAddressResult { trx: Transaction, inner: FdbFutureResult, } impl GetAddressResult { /// Returns a clone of the Transaction this get is a part of pub fn transaction(&self) -> Transaction { self.trx.clone() } /// Returns the addresses for the key pub fn address(&self) -> Vec<&[u8]> { self.inner .get_string_array() .expect("inner should resolve into string array") } } struct WatchFuture { inner: NonTrxFuture, } impl WatchFuture { fn new(inner: NonTrxFuture) -> Self { Self { inner } } } impl Future for WatchFuture { type Output = Result<()>; fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context) -> Poll<Self::Output> { ready!(self.inner.poll_unpin(cx))?; Poll::Ready(Ok(())) } } struct VersionstampFuture { inner: NonTrxFuture, } impl VersionstampFuture { fn new(inner: NonTrxFuture) -> Self { Self { inner } } } impl Future for VersionstampFuture { type Output = Result<Versionstamp>; fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context) -> Poll<Self::Output> { let res = ready!(self.inner.poll_unpin(cx))?; let key = res.get_key()?; let mut buf: [u8; 10] = Default::default(); buf.copy_from_slice(key); Poll::Ready(Ok(Versionstamp(buf))) } } /// A versionstamp is a 10 byte, unique, monotonically (but not sequentially) increasing value for /// each committed transaction. The first 8 bytes are the committed version of the database. The /// last 2 bytes are monotonic in the serialization order for transactions. #[derive(Clone, Copy)] pub struct Versionstamp([u8; 10]); impl Versionstamp { /// get versionstamp pub fn versionstamp(self) -> [u8; 10] { self.0 } } /// Futures that could be outlive transaction. struct NonTrxFuture { // Order of fields should not be changed, because Rust drops field top-to-bottom, and future // should be dropped before database. inner: FdbFuture, // We should maintain refcount for database, to make FdbFuture not outlive database. #[allow(unused)] db: Database, } impl NonTrxFuture { fn new(db: Database, f: *mut fdb::FDBFuture) -> Self { let inner = unsafe { FdbFuture::new(f) }; Self { inner, db } } } impl Future for NonTrxFuture { type Output = Result<FdbFutureResult>; fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context) -> Poll<Self::Output> { self.inner.poll_unpin(cx) } } /// Abstraction over `fdb_transaction_on_err`. pub struct TrxErrFuture { // A future from `fdb_transaction_on_err`. It resolves to `Ok(_)` after backoff interval if // undering transaction should be retried, and resolved to `Err(e)` if the error should be // reported to the user without retry. inner: NonTrxFuture, err: Option<Error>, } impl TrxErrFuture { fn new(trx: Transaction, err: Error) -> Self { let inner = unsafe { fdb::fdb_transaction_on_error(trx.inner.inner, err.code()) }; Self { inner: NonTrxFuture::new(trx.into_database(), inner), err: Some(err), } } } impl Future for TrxErrFuture { type Output = Error; fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context) -> Poll<Self::Output> { match ready!(self.inner.poll_unpin(cx)) { Ok(_) => { let mut e = self.err.take().expect("should not poll after ready"); e.set_should_retry(true); Poll::Ready(e) } Err(e) => Poll::Ready(e), } } } /// Futures for transaction, which supports retry/backoff with `Database::transact`. struct TrxFuture { // Order of fields should not be changed, because Rust drops field top-to-bottom, and future // should be dropped before transaction. inner: FdbFuture, trx: Option<Transaction>, f_err: Option<TrxErrFuture>, } impl TrxFuture { fn new(trx: Transaction, f: *mut fdb::FDBFuture) -> Self { let inner = unsafe { FdbFuture::new(f) }; Self { inner, trx: Some(trx), f_err: None, } } } impl Future for TrxFuture { type Output = Result<(Transaction, FdbFutureResult)>; fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context) -> Poll<Self::Output> { if self.f_err.is_none() { match self.inner.poll_unpin(cx) { Poll::Pending => return Poll::Pending, Poll::Ready(Ok(res)) => { return Poll::Ready(Ok(( self.trx.take().expect("should not poll after ready"), res, ))); } Poll::Ready(Err(e)) => { // The transaction will be dropped on `TrxErrFuture::new`. The `trx` is a last // reference for the transaction, undering transaction will be destroyed at // this point. let trx = self.trx.take().expect("should not poll after error"); self.f_err = Some(TrxErrFuture::new(trx, e)); return self.poll_unpin(cx); } } } match self.f_err.as_mut().unwrap().poll_unpin(cx) { Poll::Pending => Poll::Pending, Poll::Ready(e) => Poll::Ready(Err(e)), } } }
38.184676
520
0.636191
f827a43019fc3afba222570e13b3510fb7d4f5dc
115
mod find_record; mod save_record; pub use self::find_record::find_record; pub use self::save_record::save_record;
19.166667
39
0.8
1d3ead1905cba897acfc361df058524ade72688e
3,639
#![allow(clippy::module_inception)] #![allow(clippy::upper_case_acronyms)] #![allow(clippy::large_enum_variant)] #![allow(clippy::wrong_self_convention)] #![allow(clippy::should_implement_trait)] #![allow(clippy::blacklisted_name)] #![allow(clippy::vec_init_then_push)] #![allow(rustdoc::bare_urls)] #![warn(missing_docs)] //! <important> //! <p> //! <b>Product update</b> //! </p> //! <p>As of March 31, 2022, Amazon Web Services will discontinue Server Migration Service (Amazon Web Services SMS). //! Going forward, we recommend <a href="http://aws.amazon.com/application-migration-service">Amazon Web Services Application Migration Service</a> (Amazon Web Services MGN) as the primary migration //! service for lift-and-shift migrations.</p> //! <p>You can initiate new migration jobs in Server Migration Service until January 1, 2022. //! Complete these active migration projects by March 31, 2022. For more information, see //! <a href="http://aws.amazon.com/application-migration-service/when-to-choose-aws-mgn/">When //! to Choose AWS Application Migration Service</a>.</p> //! </important> //! //! <p>Server Migration Service (Server Migration Service) makes it easier and faster for you to migrate your //! on-premises workloads to Amazon Web Services. To learn more about Server Migration Service, see the following //! resources:</p> //! <ul> //! <li> //! <p> //! <a href="http://aws.amazon.com/server-migration-service/">Server Migration Service //! product page</a> //! </p> //! </li> //! <li> //! <p> //! <a href="https://docs.aws.amazon.com/server-migration-service/latest/userguide/">Server Migration Service User Guide</a> //! </p> //! </li> //! </ul> //! //! # Crate Organization //! //! The entry point for most customers will be [`Client`]. [`Client`] exposes one method for each API offered //! by the service. //! //! Some APIs require complex or nested arguments. These exist in [`model`](crate::model). //! //! Lastly, errors that can be returned by the service are contained within [`error`]. [`Error`] defines a meta //! error encompassing all possible errors that can be returned by the service. //! //! The other modules within this crate are not required for normal usage. // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. pub use error_meta::Error; #[doc(inline)] pub use config::Config; mod aws_endpoint; /// Client and fluent builders for calling the service. pub mod client; /// Configuration for the service. pub mod config; /// Errors that can occur when calling the service. pub mod error; mod error_meta; /// Input structures for operations. pub mod input; mod json_deser; mod json_errors; mod json_ser; /// Generated accessors for nested fields mod lens; pub mod middleware; /// Data structures used by operation inputs/outputs. pub mod model; mod no_credentials; /// All operations that this crate can perform. pub mod operation; mod operation_deser; mod operation_ser; /// Output structures for operations. pub mod output; /// Paginators for the service pub mod paginator; /// Crate version number. pub static PKG_VERSION: &str = env!("CARGO_PKG_VERSION"); /// Re-exported types from supporting crates. pub mod types { pub use aws_smithy_http::result::SdkError; pub use aws_smithy_types::DateTime; } static API_METADATA: aws_http::user_agent::ApiMetadata = aws_http::user_agent::ApiMetadata::new("sms", PKG_VERSION); pub use aws_smithy_http::endpoint::Endpoint; pub use aws_smithy_types::retry::RetryConfig; pub use aws_types::app_name::AppName; pub use aws_types::region::Region; pub use aws_types::Credentials; #[doc(inline)] pub use client::Client;
36.029703
198
0.73152
56566def294296863f6a28863aa43a42178d2d29
1,601
#![cfg_attr(feature = "cargo-clippy", allow(blacklisted_name))] extern crate seckey; use seckey::{ TempKey, CmpKey }; #[test] fn cmpkey_cmp_test() { #[derive(Debug)] struct I32Array([u8; 4]); impl I32Array { fn new(n: i32) -> I32Array { I32Array(n.to_le_bytes()) } } impl AsRef<[u8]> for I32Array { fn as_ref(&self) -> &[u8] { &self.0[..] } } assert!(CmpKey(I32Array::new(1)) > I32Array::new(0)); assert!(CmpKey(I32Array::new(0)) < I32Array::new(1)); assert_eq!(CmpKey(I32Array::new(0)), I32Array::new(0)); assert_ne!(CmpKey(I32Array::new(1)), I32Array::new(0)); assert!(CmpKey(I32Array::new(-1)) > I32Array::new(0)); // ^- NOTE 4294967295 > 0 let a = [2; 3]; let b = [1; 4]; assert_eq!(&a[..] > &b[..], CmpKey(&a[..]) > CmpKey(&b[..])); } #[test] fn tempkey_slice_test() { // fixed size let mut key = [42u8; 32]; { let mut tempkey = TempKey::new(&mut key); assert_eq!(CmpKey(&tempkey[..]), &[42u8; 32][..]); tempkey[1] = 0; let mut res = [42u8; 32]; res[1] = 0; assert_eq!(CmpKey(&tempkey[..]), &res[..]); } assert_eq!(key, [0; 32]); // dyn size let mut key = [42u8; 32]; { let mut tempkey = TempKey::new(&mut key[1..7]); assert_eq!(CmpKey(&tempkey[..]), &[42; 6][..]); tempkey[1] = 0; assert_eq!(CmpKey(&tempkey[..]), &[42, 0, 42, 42, 42, 42][..]); } assert_eq!(&key[1..7], [0; 6]); assert_eq!(key[0], 42); assert_eq!(key[7], 42); }
22.549296
71
0.501562
16a3a37779088a9ef07fcbdf0faabe98c1632805
362
fn main() { println!("Hello, Latex in docs!"); println!( r#"Set environmental variable RUSTDOCFLAGS to "--html-in-header src/docs-header.html" ."# ); println!("if you cloned the repo, then it is not necessary because you already have this configured in .cargo/config ."); println!("And execute:"); println!("cargo doc --open.") }
36.2
125
0.643646
d5522ff5a1f56f713434d57e9d183a0dafb616d1
3,735
use crate::error; use scroll::{Pread, Pwrite, SizeWith}; #[repr(C)] #[derive(Debug, PartialEq, Copy, Clone, Default)] #[derive(Pread, Pwrite, SizeWith)] pub struct DataDirectory { pub virtual_address: u32, pub size: u32, } pub const SIZEOF_DATA_DIRECTORY: usize = 8; const NUM_DATA_DIRECTORIES: usize = 16; impl DataDirectory { pub fn parse(bytes: &[u8], offset: &mut usize) -> error::Result<Self> { let dd = bytes.gread_with(offset, scroll::LE)?; Ok (dd) } } #[derive(Debug, PartialEq, Copy, Clone, Default)] pub struct DataDirectories { pub data_directories: [Option<DataDirectory>; NUM_DATA_DIRECTORIES], } impl DataDirectories { pub fn parse(bytes: &[u8], count: usize, offset: &mut usize) -> error::Result<Self> { let mut data_directories = [None; NUM_DATA_DIRECTORIES]; if count > NUM_DATA_DIRECTORIES { return Err (error::Error::Malformed(format!("data directory count ({}) is greater than maximum number of data directories ({})", count, NUM_DATA_DIRECTORIES))) } for dir in data_directories.iter_mut().take(count) { let dd = DataDirectory::parse(bytes, offset)?; let dd = if dd.virtual_address == 0 && dd.size == 0 { None } else { Some (dd) }; *dir = dd; } Ok (DataDirectories { data_directories }) } pub fn get_export_table(&self) -> &Option<DataDirectory> { let idx = 0; unsafe { self.data_directories.get_unchecked(idx) } } pub fn get_import_table(&self) -> &Option<DataDirectory> { let idx = 1; unsafe { self.data_directories.get_unchecked(idx) } } pub fn get_resource_table(&self) -> &Option<DataDirectory> { let idx = 2; unsafe { self.data_directories.get_unchecked(idx) } } pub fn get_exception_table(&self) -> &Option<DataDirectory> { let idx = 3; unsafe { self.data_directories.get_unchecked(idx) } } pub fn get_certificate_table(&self) -> &Option<DataDirectory> { let idx = 4; unsafe { self.data_directories.get_unchecked(idx) } } pub fn get_base_relocation_table(&self) -> &Option<DataDirectory> { let idx = 5; unsafe { self.data_directories.get_unchecked(idx) } } pub fn get_debug_table(&self) -> &Option<DataDirectory> { let idx = 6; unsafe { self.data_directories.get_unchecked(idx) } } pub fn get_architecture(&self) -> &Option<DataDirectory> { let idx = 7; unsafe { self.data_directories.get_unchecked(idx) } } pub fn get_global_ptr(&self) -> &Option<DataDirectory> { let idx = 8; unsafe { self.data_directories.get_unchecked(idx) } } pub fn get_tls_table(&self) -> &Option<DataDirectory> { let idx = 9; unsafe { self.data_directories.get_unchecked(idx) } } pub fn get_load_config_table(&self) -> &Option<DataDirectory> { let idx = 10; unsafe { self.data_directories.get_unchecked(idx) } } pub fn get_bound_import_table(&self) -> &Option<DataDirectory> { let idx = 11; unsafe { self.data_directories.get_unchecked(idx) } } pub fn get_import_address_table(&self) -> &Option<DataDirectory> { let idx = 12; unsafe { self.data_directories.get_unchecked(idx) } } pub fn get_delay_import_descriptor(&self) -> &Option<DataDirectory> { let idx = 13; unsafe { self.data_directories.get_unchecked(idx) } } pub fn get_clr_runtime_header(&self) -> &Option<DataDirectory> { let idx = 14; unsafe { self.data_directories.get_unchecked(idx) } } }
37.727273
203
0.621687
b975151b1ce1c24c232a04a679f6abaac28c03be
1,945
use std::{io, fmt, error}; use protocol::{self, ntt}; use hyper; use cbor_event; #[derive(Debug)] pub enum Error { IoError(io::Error), NttError(ntt::Error), ProtocolError(protocol::Error), CborError(cbor_event::Error), HyperError(hyper::Error), ConnectionTimedOut, HttpError(String, hyper::StatusCode), } impl From<io::Error> for Error { fn from(e: io::Error) -> Self { Error::IoError(e) } } impl From<protocol::Error> for Error { fn from(e: protocol::Error) -> Self { Error::ProtocolError(e) } } impl From<hyper::Error> for Error { fn from(e: hyper::Error) -> Self { Error::HyperError(e) } } impl From<ntt::Error> for Error { fn from(e: ntt::Error) -> Self { Error::NttError(e) } } impl From<cbor_event::Error> for Error { fn from(e: cbor_event::Error) -> Self { Error::CborError(e) } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Error::IoError(_) => write!(f, "I/O Error"), Error::NttError(_) => write!(f, "Low level protocol error"), Error::ProtocolError(_) => write!(f, "Blockchain protocol error"), Error::CborError(_) => write!(f, "Data encoding error"), Error::HyperError(_) => write!(f, "Error in HTTP engine"), Error::ConnectionTimedOut => write!(f, "connection time out"), Error::HttpError(err, code) => write!(f, "HTTP error {}: {}", code, err), } } } impl error::Error for Error { fn cause(&self) -> Option<& error::Error> { match self { Error::IoError(ref err) => Some(err), Error::NttError(ref err) => Some(err), Error::ProtocolError(ref err) => Some(err), Error::CborError(ref err) => Some(err), Error::HyperError(ref err) => Some(err), Error::ConnectionTimedOut => None, Error::HttpError(_, _) => None, } } }
34.122807
85
0.582005
ff0170f305bca2886941f0e46bfa28277dfa232e
302
extern crate heapsize; use self::heapsize::HeapSizeOf; use std::hash::{Hash, BuildHasher}; use LruCache; impl<K: Eq + Hash + HeapSizeOf, V: HeapSizeOf, S: BuildHasher> HeapSizeOf for LruCache<K, V, S> { fn heap_size_of_children(&self) -> usize { self.map.heap_size_of_children() } }
23.230769
97
0.688742
76e4b5f01b775cd4c099ccac8f6c7e6c938e9bd2
9,503
use crate::hir::map::definitions::Definitions; use crate::hir::map::DefPathHash; use crate::ich::{self, CachingSourceMapView}; use crate::middle::cstore::CrateStore; use crate::session::Session; use crate::ty::{fast_reject, TyCtxt}; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, ToStableHashKey}; use rustc_data_structures::sync::Lrc; use rustc_hir as hir; use rustc_hir::def_id::{DefId, DefIndex}; use rustc_span::source_map::SourceMap; use rustc_span::symbol::Symbol; use rustc_span::{BytePos, SourceFile}; use syntax::ast; use smallvec::SmallVec; use std::cmp::Ord; fn compute_ignored_attr_names() -> FxHashSet<Symbol> { debug_assert!(ich::IGNORED_ATTRIBUTES.len() > 0); ich::IGNORED_ATTRIBUTES.iter().map(|&s| s).collect() } /// This is the context state available during incr. comp. hashing. It contains /// enough information to transform `DefId`s and `HirId`s into stable `DefPath`s (i.e., /// a reference to the `TyCtxt`) and it holds a few caches for speeding up various /// things (e.g., each `DefId`/`DefPath` is only hashed once). #[derive(Clone)] pub struct StableHashingContext<'a> { sess: &'a Session, definitions: &'a Definitions, cstore: &'a dyn CrateStore, pub(super) body_resolver: BodyResolver<'a>, hash_spans: bool, hash_bodies: bool, pub(super) node_id_hashing_mode: NodeIdHashingMode, // Very often, we are hashing something that does not need the // `CachingSourceMapView`, so we initialize it lazily. raw_source_map: &'a SourceMap, caching_source_map: Option<CachingSourceMapView<'a>>, } #[derive(PartialEq, Eq, Clone, Copy)] pub enum NodeIdHashingMode { Ignore, HashDefPath, } /// The `BodyResolver` allows mapping a `BodyId` to the corresponding `hir::Body`. /// We could also just store a plain reference to the `hir::Crate` but we want /// to avoid that the crate is used to get untracked access to all of the HIR. #[derive(Clone, Copy)] pub(super) struct BodyResolver<'tcx>(&'tcx hir::Crate<'tcx>); impl<'tcx> BodyResolver<'tcx> { /// Returns a reference to the `hir::Body` with the given `BodyId`. /// **Does not do any tracking**; use carefully. pub(super) fn body(self, id: hir::BodyId) -> &'tcx hir::Body<'tcx> { self.0.body(id) } } impl<'a> StableHashingContext<'a> { /// The `krate` here is only used for mapping `BodyId`s to `Body`s. /// Don't use it for anything else or you'll run the risk of /// leaking data out of the tracking system. #[inline] pub fn new( sess: &'a Session, krate: &'a hir::Crate<'a>, definitions: &'a Definitions, cstore: &'a dyn CrateStore, ) -> Self { let hash_spans_initial = !sess.opts.debugging_opts.incremental_ignore_spans; StableHashingContext { sess, body_resolver: BodyResolver(krate), definitions, cstore, caching_source_map: None, raw_source_map: sess.source_map(), hash_spans: hash_spans_initial, hash_bodies: true, node_id_hashing_mode: NodeIdHashingMode::HashDefPath, } } #[inline] pub fn sess(&self) -> &'a Session { self.sess } #[inline] pub fn while_hashing_hir_bodies<F: FnOnce(&mut Self)>(&mut self, hash_bodies: bool, f: F) { let prev_hash_bodies = self.hash_bodies; self.hash_bodies = hash_bodies; f(self); self.hash_bodies = prev_hash_bodies; } #[inline] pub fn while_hashing_spans<F: FnOnce(&mut Self)>(&mut self, hash_spans: bool, f: F) { let prev_hash_spans = self.hash_spans; self.hash_spans = hash_spans; f(self); self.hash_spans = prev_hash_spans; } #[inline] pub fn with_node_id_hashing_mode<F: FnOnce(&mut Self)>( &mut self, mode: NodeIdHashingMode, f: F, ) { let prev = self.node_id_hashing_mode; self.node_id_hashing_mode = mode; f(self); self.node_id_hashing_mode = prev; } #[inline] pub fn def_path_hash(&self, def_id: DefId) -> DefPathHash { if def_id.is_local() { self.definitions.def_path_hash(def_id.index) } else { self.cstore.def_path_hash(def_id) } } #[inline] pub fn local_def_path_hash(&self, def_index: DefIndex) -> DefPathHash { self.definitions.def_path_hash(def_index) } #[inline] pub fn node_to_hir_id(&self, node_id: ast::NodeId) -> hir::HirId { self.definitions.node_to_hir_id(node_id) } #[inline] pub fn hash_bodies(&self) -> bool { self.hash_bodies } #[inline] pub fn source_map(&mut self) -> &mut CachingSourceMapView<'a> { match self.caching_source_map { Some(ref mut cm) => cm, ref mut none => { *none = Some(CachingSourceMapView::new(self.raw_source_map)); none.as_mut().unwrap() } } } #[inline] pub fn is_ignored_attr(&self, name: Symbol) -> bool { thread_local! { static IGNORED_ATTRIBUTES: FxHashSet<Symbol> = compute_ignored_attr_names(); } IGNORED_ATTRIBUTES.with(|attrs| attrs.contains(&name)) } pub fn hash_hir_item_like<F: FnOnce(&mut Self)>(&mut self, f: F) { let prev_hash_node_ids = self.node_id_hashing_mode; self.node_id_hashing_mode = NodeIdHashingMode::Ignore; f(self); self.node_id_hashing_mode = prev_hash_node_ids; } } /// Something that can provide a stable hashing context. pub trait StableHashingContextProvider<'a> { fn get_stable_hashing_context(&self) -> StableHashingContext<'a>; } impl<'a, 'b, T: StableHashingContextProvider<'a>> StableHashingContextProvider<'a> for &'b T { fn get_stable_hashing_context(&self) -> StableHashingContext<'a> { (**self).get_stable_hashing_context() } } impl<'a, 'b, T: StableHashingContextProvider<'a>> StableHashingContextProvider<'a> for &'b mut T { fn get_stable_hashing_context(&self) -> StableHashingContext<'a> { (**self).get_stable_hashing_context() } } impl StableHashingContextProvider<'tcx> for TyCtxt<'tcx> { fn get_stable_hashing_context(&self) -> StableHashingContext<'tcx> { (*self).create_stable_hashing_context() } } impl<'a> StableHashingContextProvider<'a> for StableHashingContext<'a> { fn get_stable_hashing_context(&self) -> StableHashingContext<'a> { self.clone() } } impl<'a> crate::dep_graph::DepGraphSafe for StableHashingContext<'a> {} impl<'a> ToStableHashKey<StableHashingContext<'a>> for hir::HirId { type KeyType = (DefPathHash, hir::ItemLocalId); #[inline] fn to_stable_hash_key( &self, hcx: &StableHashingContext<'a>, ) -> (DefPathHash, hir::ItemLocalId) { let def_path_hash = hcx.local_def_path_hash(self.owner); (def_path_hash, self.local_id) } } impl<'a> HashStable<StableHashingContext<'a>> for ast::NodeId { fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { match hcx.node_id_hashing_mode { NodeIdHashingMode::Ignore => { // Don't do anything. } NodeIdHashingMode::HashDefPath => { hcx.definitions.node_to_hir_id(*self).hash_stable(hcx, hasher); } } } } impl<'a> ToStableHashKey<StableHashingContext<'a>> for ast::NodeId { type KeyType = (DefPathHash, hir::ItemLocalId); #[inline] fn to_stable_hash_key( &self, hcx: &StableHashingContext<'a>, ) -> (DefPathHash, hir::ItemLocalId) { hcx.definitions.node_to_hir_id(*self).to_stable_hash_key(hcx) } } impl<'a> rustc_span::HashStableContext for StableHashingContext<'a> { fn hash_spans(&self) -> bool { self.hash_spans } #[inline] fn hash_def_id(&mut self, def_id: DefId, hasher: &mut StableHasher) { let hcx = self; hcx.def_path_hash(def_id).hash_stable(hcx, hasher); } fn byte_pos_to_line_and_col( &mut self, byte: BytePos, ) -> Option<(Lrc<SourceFile>, usize, BytePos)> { self.source_map().byte_pos_to_line_and_col(byte) } } pub fn hash_stable_trait_impls<'a>( hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher, blanket_impls: &[DefId], non_blanket_impls: &FxHashMap<fast_reject::SimplifiedType, Vec<DefId>>, ) { { let mut blanket_impls: SmallVec<[_; 8]> = blanket_impls.iter().map(|&def_id| hcx.def_path_hash(def_id)).collect(); if blanket_impls.len() > 1 { blanket_impls.sort_unstable(); } blanket_impls.hash_stable(hcx, hasher); } { let mut keys: SmallVec<[_; 8]> = non_blanket_impls.keys().map(|k| (k, k.map_def(|d| hcx.def_path_hash(d)))).collect(); keys.sort_unstable_by(|&(_, ref k1), &(_, ref k2)| k1.cmp(k2)); keys.len().hash_stable(hcx, hasher); for (key, ref stable_key) in keys { stable_key.hash_stable(hcx, hasher); let mut impls: SmallVec<[_; 8]> = non_blanket_impls[key].iter().map(|&impl_id| hcx.def_path_hash(impl_id)).collect(); if impls.len() > 1 { impls.sort_unstable(); } impls.hash_stable(hcx, hasher); } } }
31.571429
99
0.635694
9b04a604a3fa43ed86a250849a87bbb7a296feb5
746
pub struct MyCalendar { books: Vec<(i32, i32)>, } impl MyCalendar { pub fn new() -> Self { Self { books: vec![] } } pub fn book(&mut self, start: i32, end: i32) -> bool { let mut i = 0; for (j, (s, e)) in self.books.iter().enumerate() { if *e <= start { continue; }; if end > *s { return false; } i = j; } self.books.insert(i, (start, end)); true } } #[cfg(test)] mod tests { use super::*; #[test] fn example_1() { let mut obj = MyCalendar::new(); assert!(obj.book(10, 20)); assert!(!obj.book(15, 25)); assert!(obj.book(20, 30)); } }
19.631579
58
0.424933
5d7b0042c18864ee0a9d8cadd383c281b770c2a3
10,990
use syn::*; /// Traverse the AST in pre-order, which also happens to be the order of subtrees in the /// pretty-printed output. pub trait Traversal: Sized { fn traverse_stmt(&mut self, s: Stmt) -> Stmt { traverse_stmt_def(self, s) } fn traverse_expr(&mut self, s: Expr) -> Expr { traverse_expr_def(self, s) } fn traverse_trait_item(&mut self, ti: TraitItem) -> TraitItem { traverse_trait_item_def(self, ti) } fn traverse_impl_item(&mut self, ii: ImplItem) -> ImplItem { traverse_impl_item_def(self, ii) } fn traverse_block(&mut self, b: Block) -> Block { traverse_block_def(self, b) } fn traverse_local(&mut self, l: Local) -> Local { traverse_local_def(self, l) } fn traverse_arm(&mut self, a: Arm) -> Arm { traverse_arm_def(self, a) } fn traverse_field(&mut self, f: FieldValue) -> FieldValue { traverse_field_def(self, f) } fn traverse_mod(&mut self, m: ItemMod) -> ItemMod { traverse_mod_def(self, m) } fn traverse_foreign_mod(&mut self, m: ItemForeignMod) -> ItemForeignMod { traverse_foreign_mod_def(self, m) } fn traverse_item(&mut self, i: Item) -> Item { traverse_item_def(self, i) } fn traverse_foreign_item(&mut self, i: ForeignItem) -> ForeignItem { i } } /// Apply a `Traversal` to an AST node. trait Traversable { fn traverse<T: Traversal>(self, t: &mut T) -> Self; } macro_rules! traversable_impl { ( $traversable_ty:ty, $method_name:ident ) => { impl Traversable for $traversable_ty { fn traverse<T: Traversal>(self, t: &mut T) -> Self { t.$method_name(self) } } }; } traversable_impl!(Stmt, traverse_stmt); traversable_impl!(Expr, traverse_expr); traversable_impl!(TraitItem, traverse_trait_item); traversable_impl!(ImplItem, traverse_impl_item); traversable_impl!(Block, traverse_block); traversable_impl!(Local, traverse_local); traversable_impl!(Arm, traverse_arm); traversable_impl!(FieldValue, traverse_field); traversable_impl!(ItemMod, traverse_mod); traversable_impl!(ItemForeignMod, traverse_foreign_mod); traversable_impl!(Item, traverse_item); traversable_impl!(ForeignItem, traverse_foreign_item); impl<A: Traversable> Traversable for Vec<A> { fn traverse<T: Traversal>(self, t: &mut T) -> Self { self.into_iter().map(|x| x.traverse(t)).collect() } } impl<A: Clone + Traversable, B> Traversable for syn::punctuated::Punctuated<A, B> { fn traverse<T: Traversal>(mut self, t: &mut T) -> Self { self.iter_mut().for_each(|x| *x = x.clone().traverse(t)); self } } impl<A: Traversable> Traversable for Option<A> { fn traverse<T: Traversal>(self, t: &mut T) -> Self { self.map(|x| x.traverse(t)) } } impl<A, B: Traversable> Traversable for Option<(A, B)> { fn traverse<T: Traversal>(self, t: &mut T) -> Self { self.map(|x| (x.0, x.1.traverse(t))) } } impl<A: Traversable + 'static> Traversable for Box<A> { fn traverse<T: Traversal>(self, t: &mut T) -> Self { Box::new((*self).traverse(t)) } } pub fn traverse_stmt_def<W: Traversal>(walk: &mut W, s: Stmt) -> Stmt { match s { Stmt::Local(p_local) => Stmt::Local(p_local.traverse(walk)), Stmt::Item(p_item) => Stmt::Item(p_item.traverse(walk)), Stmt::Expr(p_expr) => Stmt::Expr(p_expr.traverse(walk)), Stmt::Semi(p_expr, semi) => Stmt::Semi(p_expr.traverse(walk), semi), } } pub fn traverse_expr_def<W: Traversal>(walk: &mut W, e: Expr) -> Expr { match e { Expr::Box(e) => Expr::Box(ExprBox {expr: Box::new(walk.traverse_expr(*e.expr)), ..e}), Expr::Array(e) => Expr::Array(ExprArray { elems: e.elems.traverse(walk), ..e }), Expr::Call(e) => Expr::Call(ExprCall { func: e.func.traverse(walk), args: e.args.traverse(walk), ..e }), Expr::MethodCall(e) => { Expr::MethodCall(ExprMethodCall { receiver: e.receiver.traverse(walk), args: e.args.traverse(walk), ..e }) }, Expr::Tuple(e) => Expr::Tuple(ExprTuple { elems: e.elems.traverse(walk), ..e }), Expr::Binary(e) => { Expr::Binary(ExprBinary { left: e.left.traverse(walk), right: e.right.traverse(walk), ..e }) } Expr::Unary(e) => Expr::Unary(ExprUnary { expr: e.expr.traverse(walk), ..e }), Expr::Cast(e) => Expr::Cast(ExprCast { expr: e.expr.traverse(walk), ..e }), Expr::Type(e) => Expr::Type(ExprType { expr: e.expr.traverse(walk), ..e }), Expr::Let(e) => Expr::Let(ExprLet { expr: e.expr.traverse(walk), ..e }), Expr::If(e) => { Expr::If(ExprIf { cond: e.cond.traverse(walk), then_branch: e.then_branch.traverse(walk), else_branch: e.else_branch.traverse(walk), ..e }) } Expr::While(e) => { Expr::While(ExprWhile { cond: e.cond.traverse(walk), body: e.body.traverse(walk), ..e }) } Expr::ForLoop(e) => { Expr::ForLoop(ExprForLoop { expr: e.expr.traverse(walk), body: e.body.traverse(walk), ..e }) } Expr::Loop(e) => Expr::Loop(ExprLoop { body: e.body.traverse(walk), ..e }), Expr::Match(e) => Expr::Match(ExprMatch { expr: e.expr.traverse(walk), arms: e.arms.traverse(walk), ..e }), Expr::Closure(e) => { Expr::Closure(ExprClosure { body: e.body.traverse(walk), ..e }) } Expr::Block(e) => Expr::Block(ExprBlock { block: e.block.traverse(walk), ..e }), Expr::Assign(e) => Expr::Assign(ExprAssign { left: e.left.traverse(walk), right: e.right.traverse(walk), ..e }), Expr::AssignOp(e) => { Expr::AssignOp(ExprAssignOp { left: e.left.traverse(walk), right: e.right.traverse(walk), ..e }) } Expr::Field(e) => Expr::Field(ExprField { base: e.base.traverse(walk), ..e }), Expr::Index(e) => Expr::Index(ExprIndex { expr: e.expr.traverse(walk), index: e.index.traverse(walk), ..e }), Expr::Range(e) => Expr::Range(ExprRange { from: e.from.traverse(walk), to: e.to.traverse(walk), ..e }), Expr::Path(e) => Expr::Path(ExprPath { ..e }), Expr::Reference(e) => Expr::Reference(ExprReference { expr: e.expr.traverse(walk), ..e }), Expr::Break(e) => Expr::Break(ExprBreak { expr: e.expr.traverse(walk), ..e }), Expr::Continue(e) => Expr::Continue(ExprContinue { ..e }), Expr::Return(e) => Expr::Return(ExprReturn { expr: e.expr.traverse(walk), ..e }), //Expr::InlineAsm(e) => Expr::InlineAsm(ExprInlineAsm { asm, ..e }), Expr::Macro(e) => Expr::Macro(ExprMacro { ..e }), Expr::Struct(e) => Expr::Struct(ExprStruct { fields: e.fields.traverse(walk), rest: e.rest.traverse(walk), ..e }), Expr::Repeat(e) => Expr::Repeat(ExprRepeat { expr: e.expr.traverse(walk), len: e.len.traverse(walk), ..e }), Expr::Paren(e) => Expr::Paren(ExprParen { expr: e.expr.traverse(walk), ..e }), Expr::Try(e) => Expr::Try(ExprTry { expr: e.expr.traverse(walk), ..e }), Expr::Yield(e) => Expr::Yield(ExprYield { expr: e.expr.traverse(walk), ..e }), Expr::Lit(e) => Expr::Lit(ExprLit { ..e }), Expr::Async(e) => Expr::Async(ExprAsync { block: e.block.traverse(walk), ..e }), Expr::TryBlock(e) => Expr::TryBlock(ExprTryBlock { block: e.block.traverse(walk), ..e }), Expr::Unsafe(e) => Expr::Unsafe(ExprUnsafe { block: e.block.traverse(walk), ..e }), Expr::Verbatim(_tokens) => unimplemented!(), Expr::Await(e) => Expr::Await(ExprAwait { base: e.base.traverse(walk), ..e }), _ => unimplemented!(), } } pub fn traverse_trait_item_def<W: Traversal>(walk: &mut W, ti: TraitItem) -> TraitItem { match ti { TraitItem::Const(ti) => TraitItem::Const(TraitItemConst {default: ti.default.traverse(walk), ..ti}), TraitItem::Method(ti) => TraitItem::Method(TraitItemMethod {default: ti.default.traverse(walk), ..ti}), TraitItem::Type(ti) => TraitItem::Type(TraitItemType {..ti}), TraitItem::Macro(ti) => TraitItem::Macro(TraitItemMacro {..ti}), _ => unimplemented!(), } } pub fn traverse_impl_item_def<W: Traversal>(walk: &mut W, ii: ImplItem) -> ImplItem { match ii { ImplItem::Const(ii) => ImplItem::Const(ImplItemConst {expr: ii.expr.traverse(walk), ..ii}), ImplItem::Method(ii) => ImplItem::Method(ImplItemMethod {block: ii.block.traverse(walk), ..ii}), ImplItem::Type(ii) => ImplItem::Type(ImplItemType {..ii}), ImplItem::Macro(ii) => ImplItem::Macro(ImplItemMacro {..ii}), _ => unimplemented!(), } } pub fn traverse_block_def<W: Traversal>(walk: &mut W, mut b: Block) -> Block { b.stmts = b.stmts.traverse(walk); b } pub fn traverse_local_def<W: Traversal>(walk: &mut W, mut l: Local) -> Local { l.init = l.init.traverse(walk); l } pub fn traverse_arm_def<W: Traversal>(walk: &mut W, mut a: Arm) -> Arm { a.guard = a.guard.traverse(walk); a.body = a.body.traverse(walk); a } pub fn traverse_field_def<W: Traversal>(walk: &mut W, mut f: FieldValue) -> FieldValue { f.expr = f.expr.traverse(walk); f } pub fn traverse_mod_def<W: Traversal>(walk: &mut W, mut m: ItemMod) -> ItemMod { m.content = m.content.traverse(walk); m } pub fn traverse_foreign_mod_def<W: Traversal>(walk: &mut W, mut m: ItemForeignMod) -> ItemForeignMod { m.items = m.items.traverse(walk); m } pub fn traverse_item_def<W: Traversal>(walk: &mut W, i: Item) -> Item { match i { Item::Static(item) => Item::Static(ItemStatic { expr: item.expr.traverse(walk), ..item}), Item::Const(item) => Item::Const(ItemConst { expr: item.expr.traverse(walk), ..item}), Item::Fn(item) => Item::Fn(ItemFn { block: item.block.traverse(walk), ..item}), Item::Mod(item) => Item::Mod(ItemMod { content: item.content.traverse(walk), ..item}), Item::ForeignMod(item) => Item::ForeignMod(ItemForeignMod { items: item.items.traverse(walk), ..item}), Item::Trait(item) => Item::Trait(ItemTrait { items: item.items.traverse(walk), ..item}), Item::Impl(item) => Item::Impl(ItemImpl { items: item.items.traverse(walk), ..item}), Item::Use(item) => Item::Use(ItemUse { ..item}), Item::ExternCrate(item) => Item::ExternCrate(ItemExternCrate { ..item}), //Item::GlobalAsm(ItemGlobalAsm { u}) => Item::GlobalAsm(ItemGlobalAsm { u}), Item::Type(item) => Item::Type(ItemType { ..item}), Item::Enum(item) => Item::Enum(ItemEnum { ..item}), Item::Struct(item) => Item::Struct(ItemStruct { ..item}), Item::Union(item) => Item::Union(ItemUnion { ..item}), Item::TraitAlias(item) => Item::TraitAlias(ItemTraitAlias { ..item}), Item::Macro(item) => Item::Macro(ItemMacro { ..item}), Item::Macro2(item) => Item::Macro2(ItemMacro2 { ..item}), _ => unimplemented!() } }
42.762646
122
0.607188
766180c22bc6526debd5aee37722a976c1fe0eaa
211
//! @ The |align_state| and |preamble| variables are initialized elsewhere. //! //! @<Set init...@>= //! align_ptr:=null; cur_align:=null; cur_span:=null; cur_loop:=null; //! cur_head:=null; cur_tail:=null; //!
30.142857
75
0.654028
09afe5bf21af44850861fed61cd256c9823d50b1
7,670
use rand::Rng; use serde::Serialize; use crate::geo::Point; #[derive(Serialize, Debug)] pub struct SpacePartitionStep { pub base_segment: (Point, Point), pub mid_point: Option<Point>, pub split_line: Option<(Point, Point)>, pub left_points: Vec<Point>, pub right_points: Vec<Point>, pub current_chain_len: usize, pub is_leaf: bool, } #[derive(Serialize, Debug, Default)] pub struct SpacePartitionSteppingResult { pub points: Vec<Point>, pub polygon: Vec<Point>, pub steps: Vec<SpacePartitionStep>, } impl SpacePartitionSteppingResult { pub fn new(points: &[Point]) -> Self { Self { points: points.to_vec(), polygon: vec![], steps: vec![], } } fn get_current_chain_len(&self) -> usize { if let Some(last) = self.steps.last() { if last.is_leaf { last.current_chain_len + 1 } else { last.current_chain_len } } else { 1 } } pub fn push_unsplitted(&mut self, base_segment: (Point, Point), sub_points: Vec<Point>) { self.steps.push(SpacePartitionStep { base_segment, mid_point: None, split_line: None, left_points: sub_points, right_points: vec![], current_chain_len: self.get_current_chain_len(), is_leaf: false, }); } pub fn push_splitted( &mut self, base_segment: (Point, Point), mid_point: Point, split_line: (Point, Point), left_points: Vec<Point>, right_points: Vec<Point>, ) { self.steps.push(SpacePartitionStep { base_segment, mid_point: Some(mid_point), split_line: Some(split_line), left_points, right_points, current_chain_len: self.get_current_chain_len(), is_leaf: false, }); } pub fn push_leaf(&mut self, base_segment: (Point, Point)) { self.steps.push(SpacePartitionStep { base_segment, mid_point: None, split_line: None, left_points: vec![], right_points: vec![], current_chain_len: self.get_current_chain_len(), is_leaf: true, }); } } struct SpacePartitionGenerator<'a, R: Rng> { n: usize, points: &'a [Point], idx: Vec<usize>, rng: &'a mut R, stepping: bool, res: SpacePartitionSteppingResult, } impl<'a, R: Rng> SpacePartitionGenerator<'a, R> { pub fn new(points: &'a [Point], rng: &'a mut R, stepping: bool) -> Self { let n = points.len(); Self { n, idx: (0..n).collect::<Vec<_>>(), points, rng, stepping, res: if stepping { SpacePartitionSteppingResult::new(points) } else { Default::default() }, } } fn collect_points(&self, first: usize, last: usize) -> Vec<Point> { self.idx[first..last + 1] .iter() .map(|&i| self.points[i]) .collect() } /// Returns a random point between the segment AB. fn random_point_between(a: &Point, b: &Point, rng: &mut impl Rng) -> Point { *a + (*b - *a) * rng.gen::<f64>() } /// Split the point set into a left and a right set by the `line`, returns /// the position of `p` (which the line passes through) after partition. fn split_points( &mut self, first: usize, last: usize, p: usize, line: &(Point, Point), ) -> usize { self.idx.swap(first, p); let mut i = first + 1; let mut j = last; while i <= j { while i <= j && self.points[self.idx[i]].to_left(&line.0, &line.1) > 0 { i += 1; } while i <= j && self.points[self.idx[j]].to_left(&line.0, &line.1) < 0 { j -= 1; } if i > j { break; } self.idx.swap(i, j); i += 1; j -= 1; } self.idx.swap(first, i - 1); i - 1 } fn partition(&mut self, first: usize, last: usize) { let pf = self.points[self.idx[first]]; let pl = self.points[self.idx[last]]; if self.stepping { self.res .push_unsplitted((pf, pl), self.collect_points(first, last)); } if first + 1 >= last { if self.stepping { self.res.push_leaf((pf, pl)); } return; } let mid = self.rng.gen_range(first + 1, last); // pick A randomly. let pm = self.points[self.idx[mid]]; let line_point = if Point::collinear(&pf, &pl, &pm) { // the first and the last points are collinear, any line through A is OK. pm + Point::new(self.rng.gen_range(-1.0, 1.0), self.rng.gen_range(-1.0, 1.0)) } else { // a random line passes through A and intersects the segment. Self::random_point_between(&pf, &pl, &mut self.rng) }; // make sure the first point is to the left of the line. let line = if pf.to_left(&line_point, &pm) > 0 { (line_point, pm) } else { (pm, line_point) }; let new_mid = self.split_points(first + 1, last - 1, mid, &line); if self.stepping { let left_points = self.collect_points(first + 1, new_mid - 1); let right_points = self.collect_points(new_mid + 1, last - 1); self.res.push_splitted( (pf, pl), pm, line, left_points.clone(), right_points.clone(), ); self.partition(first, new_mid); self.res .push_splitted((pf, pl), pm, line, left_points, right_points); self.partition(new_mid, last); } else { self.partition(first, new_mid); self.partition(new_mid, last); } } pub fn generate(&mut self) { let mut a; let mut b; loop { // make sure no other points are collinear with the initial two points. a = self.rng.gen_range(0, self.n); b = self.rng.gen_range(0, self.n); if a >= b { continue; } let mut ok = true; for i in 0..self.n { if i == a || i == b { continue; } if Point::collinear(&self.points[a], &self.points[b], &self.points[i]) { ok = false; break; } } if ok { break; } } self.idx.swap(0, a); self.idx.push(self.idx[0]); let new_b = self.split_points(1, self.n - 1, b, &(self.points[a], self.points[b])); self.partition(0, new_b); self.partition(new_b, self.n); self.idx.pop(); if self.stepping { self.res.polygon = self.collect_points(0, self.n - 1); } } } pub fn generate(points: &[Point], rng: &mut impl Rng) -> Vec<usize> { let mut sp = SpacePartitionGenerator::new(points, rng, false); sp.generate(); sp.idx } pub fn generate_stepping( points: &[Point], rng: &mut impl Rng, ) -> (Vec<usize>, SpacePartitionSteppingResult) { let mut sp = SpacePartitionGenerator::new(points, rng, true); sp.generate(); (sp.idx, sp.res) }
28.619403
93
0.501695
d547e1d8c7412de43a1c7d0103c2c76c82aff860
8,199
use wasm_bindgen::prelude::*; use iota::crypto::Kerl; use iota::signing::{IotaSeed, Seed}; use iota::ternary::{T1B1Buf, TryteBuf}; use iota::bundle::{Address, TransactionField}; use iota::client::response::Transfer; use iota_conversion::Trinary; use iota_bundle_preview::{Hash, Transaction}; use serde::{Serialize, Deserialize}; #[wasm_bindgen] extern { #[wasm_bindgen(js_namespace = console)] pub fn log(s: &str); #[wasm_bindgen(js_namespace = console)] pub fn error(s: &str); } macro_rules! console_log { ($($t:tt)*) => (log(&format_args!($($t)*).to_string())) } macro_rules! console_error { ($($t:tt)*) => (error(&format_args!($($t)*).to_string())) } #[wasm_bindgen] #[derive(Serialize)] pub struct NewAddress { pub index: u64, address: String, } #[wasm_bindgen] impl NewAddress { #[wasm_bindgen(getter)] pub fn address(&self) -> String { self.address.clone() } } #[wasm_bindgen] #[derive(Deserialize)] pub struct NewTransfer { value: u64 } #[wasm_bindgen] #[derive(Serialize)] pub struct SentTransaction { #[serde(rename = "isTail")] is_tail: bool } #[wasm_bindgen] pub struct Client { client: iota::Client, } fn response_to_js_value<T: Serialize>(response: T) -> Result<JsValue, JsValue> { JsValue::from_serde(&response) .map_err(js_error) } fn js_error<T: std::fmt::Debug>(e: T) -> JsValue { JsValue::from(format!("{:?}", e)) } fn create_hash(bytes: &[i8]) -> Hash { let mut array = [0; 243]; let bytes = &bytes[..array.len()]; array.copy_from_slice(bytes); Hash(array) } #[wasm_bindgen] impl Client { #[wasm_bindgen(constructor)] pub fn new(uri: &str) -> Result<Client, JsValue> { console_error_panic_hook::set_once(); let client = Client { client: iota::Client::new(uri) .map_err(|e| e.to_string())? }; Ok(client) } #[wasm_bindgen(js_name = "getNodeInfo")] pub async fn get_node_info(self) -> Result<JsValue, JsValue> { let node_info = self.client.get_node_info() .await .map_err(|e| JsValue::from(e.to_string()))?; let res = response_to_js_value(node_info)?; Ok(res) } #[wasm_bindgen(js_name = "getNewAddress")] pub async fn get_new_address(self, seed: String, index: Option<u64>, security: Option<u8>) -> Result<JsValue, JsValue> { let encoded_seed = IotaSeed::<Kerl>::from_buf( TryteBuf::try_from_str(&seed) .map_err(js_error)? .as_trits() .encode::<T1B1Buf>(), ) .map_err(js_error)?; let mut builder = self.client.get_new_address(); builder = builder.seed(&encoded_seed); if let Some(index) = index { builder = builder.index(index); } if let Some(security) = security { builder = builder.security(security); } let (index, address) = builder .generate() .await .map_err(js_error)?; let new_address = NewAddress { index, address: address .to_inner() .as_i8_slice() .trytes() .map_err(js_error)? }; let res = response_to_js_value(new_address)?; Ok(res) } #[wasm_bindgen(js_name = "addNeighbors")] pub async fn add_neighbors(self, uris: JsValue) -> Result<JsValue, JsValue> { let uris: Vec<String> = uris.into_serde().map_err(js_error)?; let builder = self.client.add_neighbors() .uris(uris) .map_err(js_error)?; let added_neighbords = builder .send() .await .map_err(js_error)?; let res = response_to_js_value(added_neighbords)?; Ok(res) } #[wasm_bindgen(js_name = "attachToTangle")] pub async fn attach_to_tangle( self, trunk_transaction_hash_bytes: JsValue, branch_transaction_hash_bytes: JsValue, min_weight_magnitude: Option<u8>, transactions_trytes: JsValue, ) -> Result<JsValue, JsValue> { let mut builder = self.client.attach_to_tangle(); if trunk_transaction_hash_bytes.is_truthy() { let hash_vec: Vec<i8> = trunk_transaction_hash_bytes.into_serde().map_err(js_error)?; let hash = create_hash(&hash_vec[..]); builder = builder.trunk_transaction(&hash); } if branch_transaction_hash_bytes.is_truthy() { let hash_vec: Vec<i8> = branch_transaction_hash_bytes.into_serde().map_err(js_error)?; let hash = create_hash(&hash_vec[..]); builder = builder.branch_transaction(&hash); } if transactions_trytes.is_truthy() { // let transactions_trytes: Vec<Transaction> = transactions_trytes.into_serde().map_err(js_error)?; } if let Some(min_weight_magnitude) = min_weight_magnitude { builder = builder.min_weight_magnitude(min_weight_magnitude); } let attach_response = builder .send() .await .map_err(js_error)?; // TODO this needs impl Serialize on bee > bundle > Transaction // let response = response_to_js_value(&attach_response)?; Ok(JsValue::from("")) } #[wasm_bindgen(js_name = "broadcastBundle")] pub async fn broadcast_bundle(self, tail_transaction_hash_bytes: JsValue) -> Result<JsValue, JsValue> { let tail_transaction_hash_vec: Vec<i8> = tail_transaction_hash_bytes.into_serde().map_err(js_error)?; let tail_transaction_hash = create_hash(&tail_transaction_hash_vec); let broadcast_response = self.client.broadcast_bundle(&tail_transaction_hash) .await .map_err(js_error)?; // TODO this needs impl Serialize on bee > bundle > Transaction // let response = response_to_js_value(&broadcast_response)?; Ok(JsValue::from("")) } #[wasm_bindgen(js_name = "checkConsistency")] pub async fn check_consistency(self, tails: JsValue) -> Result<JsValue, JsValue> { let tails_vec: Vec<Vec<i8>> = tails.into_serde().map_err(js_error)?; let mut tails = Vec::new(); for tail_vec in tails_vec { tails.push(create_hash(&tail_vec)); } let builder = self.client.check_consistency() .tails(&tails); let consistency_response = builder .send() .await .map_err(js_error)?; let response = response_to_js_value(consistency_response)?; Ok(response) } #[wasm_bindgen(js_name = "sendTransfers")] pub async fn send_transfers(self, seed: String, transfers: JsValue, min_weight_magnitude: Option<u8>) -> Result<JsValue, JsValue> { let encoded_seed = IotaSeed::<Kerl>::from_buf( TryteBuf::try_from_str(&seed) .map_err(js_error)? .as_trits() .encode::<T1B1Buf>(), ) .map_err(js_error)?; let address = Address::from_inner_unchecked( TryteBuf::try_from_str(&seed) .map_err(js_error)? .as_trits() .encode(), ); let js_transfers: Vec<NewTransfer> = transfers.into_serde().map_err(js_error)?; let transfers = js_transfers.iter().map(|transfer| Transfer { address: address.clone(), value: transfer.value, message: None, tag: None, }).collect(); let mut builder = self.client.send_transfers() .seed(&encoded_seed) .transfers(transfers); if let Some(min_weight_magnitude) = min_weight_magnitude { builder = builder.min_weight_magnitude(min_weight_magnitude); } let transactions = builder .send() .await .map_err(js_error)?; let response: Vec<SentTransaction> = transactions.iter().map(|transaction| SentTransaction { is_tail: transaction.is_tail() }).collect(); let res = response_to_js_value(response)?; Ok(res) } }
29.923358
135
0.599585
dd53a1c71a5bce538a8474502bc15c60457d5957
617
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![cfg_attr(stage0, feature(macro_vis_matcher))] macro_rules! foo { ($($p:vis)*) => {} //~ ERROR repetition matches empty token tree } foo!(a);
34.277778
68
0.719611
18d78c514b83affdaa21e571693498e69000cb2b
66,027
// Copyright 2018-2020 the Deno authors. All rights reserved. MIT license. use crate::deno_error::DenoError; use crate::deno_error::ErrorKind; use crate::deno_error::GetErrorKind; use crate::disk_cache::DiskCache; use crate::http_util; use crate::http_util::create_http_client; use crate::http_util::FetchOnceResult; use crate::http_util::ResultPayload; use crate::msg; use crate::progress::Progress; use deno_core::ErrBox; use deno_core::ModuleSpecifier; use futures::future::Either; use futures::future::FutureExt; use regex::Regex; use reqwest; use serde_json; use std; use std::collections::HashMap; use std::fs; use std::future::Future; use std::path::Path; use std::path::PathBuf; use std::pin::Pin; use std::result::Result; use std::str; use std::sync::Arc; use std::sync::Mutex; use url; use url::Url; pub fn source_header_cache_failed_error( module_name: &str, reason: &str, ) -> ErrBox { DenoError::new( ErrorKind::Other, format!( "Source code header cache failed for '{}': {}", module_name, reason ), ) .into() } pub fn source_cache_failed_error(module_name: &str, reason: &str) -> ErrBox { DenoError::new( ErrorKind::Other, format!("Source code cache failed for '{}': {}", module_name, reason), ) .into() } /// Structure representing local or remote file. /// /// In case of remote file `url` might be different than originally requested URL, if so /// `redirect_source_url` will contain original URL and `url` will be equal to final location. #[derive(Debug, Clone)] pub struct SourceFile { pub url: Url, pub filename: PathBuf, pub types_url: Option<Url>, pub media_type: msg::MediaType, pub source_code: Vec<u8>, } pub type SourceFileFuture = dyn Future<Output = Result<SourceFile, ErrBox>> + Send; /// Simple struct implementing in-process caching to prevent multiple /// fs reads/net fetches for same file. #[derive(Clone, Default)] pub struct SourceFileCache(Arc<Mutex<HashMap<String, SourceFile>>>); impl SourceFileCache { pub fn set(&self, key: String, source_file: SourceFile) { let mut c = self.0.lock().unwrap(); c.insert(key, source_file); } pub fn get(&self, key: String) -> Option<SourceFile> { let c = self.0.lock().unwrap(); match c.get(&key) { Some(source_file) => Some(source_file.clone()), None => None, } } } const SUPPORTED_URL_SCHEMES: [&str; 3] = ["http", "https", "file"]; /// `DenoDir` serves as coordinator for multiple `DiskCache`s containing them /// in single directory that can be controlled with `$DENO_DIR` env variable. #[derive(Clone)] pub struct SourceFileFetcher { deps_cache: DiskCache, progress: Progress, source_file_cache: SourceFileCache, cache_blacklist: Vec<String>, use_disk_cache: bool, no_remote: bool, cached_only: bool, http_client: reqwest::Client, } impl SourceFileFetcher { pub fn new( deps_cache: DiskCache, progress: Progress, use_disk_cache: bool, cache_blacklist: Vec<String>, no_remote: bool, cached_only: bool, ) -> std::io::Result<Self> { let file_fetcher = Self { deps_cache, progress, source_file_cache: SourceFileCache::default(), cache_blacklist, use_disk_cache, no_remote, cached_only, http_client: create_http_client(), }; Ok(file_fetcher) } fn check_if_supported_scheme(url: &Url) -> Result<(), ErrBox> { if !SUPPORTED_URL_SCHEMES.contains(&url.scheme()) { return Err( DenoError::new( ErrorKind::Other, format!("Unsupported scheme \"{}\" for module \"{}\". Supported schemes: {:#?}", url.scheme(), url, SUPPORTED_URL_SCHEMES), ).into() ); } Ok(()) } /// Required for TS compiler and source maps. pub fn fetch_cached_source_file( &self, specifier: &ModuleSpecifier, ) -> Option<SourceFile> { let maybe_source_file = self.source_file_cache.get(specifier.to_string()); if maybe_source_file.is_some() { return maybe_source_file; } // If file is not in memory cache check if it can be found // in local cache - which effectively means trying to fetch // using "--cached-only" flag. We can safely block on this // future, because it doesn't do any asynchronous action // it that path. let fut = self.get_source_file_async(specifier.as_url(), true, false, true); futures::executor::block_on(fut).ok() } pub fn fetch_source_file_async( &self, specifier: &ModuleSpecifier, maybe_referrer: Option<ModuleSpecifier>, ) -> Pin<Box<SourceFileFuture>> { let module_url = specifier.as_url().to_owned(); debug!("fetch_source_file. specifier {} ", &module_url); // Check if this file was already fetched and can be retrieved from in-process cache. if let Some(source_file) = self.source_file_cache.get(specifier.to_string()) { return Box::pin(async { Ok(source_file) }); } let source_file_cache = self.source_file_cache.clone(); let specifier_ = specifier.clone(); let source_file = self.get_source_file_async( &module_url, self.use_disk_cache, self.no_remote, self.cached_only, ); Box::pin(async move { match source_file.await { Ok(mut file) => { // TODO: move somewhere? if file.source_code.starts_with(b"#!") { file.source_code = filter_shebang(file.source_code); } // Cache in-process for subsequent access. source_file_cache.set(specifier_.to_string(), file.clone()); Ok(file) } Err(err) => { let err_kind = err.kind(); let referrer_suffix = if let Some(referrer) = maybe_referrer { format!(r#" from "{}""#, referrer) } else { "".to_owned() }; // Hack: Check error message for "--cached-only" because the kind // conflicts with other errors. let err = if err.to_string().contains("--cached-only") { let msg = format!( r#"Cannot find module "{}"{} in cache, --cached-only is specified"#, module_url, referrer_suffix ); DenoError::new(ErrorKind::NotFound, msg).into() } else if err_kind == ErrorKind::NotFound { let msg = format!( r#"Cannot resolve module "{}"{}"#, module_url, referrer_suffix ); DenoError::new(ErrorKind::NotFound, msg).into() } else { err }; Err(err) } } }) } /// This is main method that is responsible for fetching local or remote files. /// /// If this is a remote module, and it has not yet been cached, the resulting /// download will be cached on disk for subsequent access. /// /// If `use_disk_cache` is true then remote files are fetched from disk cache. /// /// If `no_remote` is true then this method will fail for remote files. /// /// If `cached_only` is true then this method will fail for remote files /// not already cached. fn get_source_file_async( &self, module_url: &Url, use_disk_cache: bool, no_remote: bool, cached_only: bool, ) -> impl Future<Output = Result<SourceFile, ErrBox>> { let url_scheme = module_url.scheme(); let is_local_file = url_scheme == "file"; if let Err(err) = SourceFileFetcher::check_if_supported_scheme(&module_url) { return Either::Left(futures::future::err(err)); } // Local files are always fetched from disk bypassing cache entirely. if is_local_file { match self.fetch_local_file(&module_url) { Ok(source_file) => { return Either::Left(futures::future::ok(source_file)); } Err(err) => { return Either::Left(futures::future::err(err)); } } } // The file is remote, fail if `no_remote` is true. if no_remote { return Either::Left(futures::future::err( std::io::Error::new( std::io::ErrorKind::NotFound, format!( "Not allowed to get remote file '{}'", module_url.to_string() ), ) .into(), )); } // Fetch remote file and cache on-disk for subsequent access Either::Right(self.fetch_remote_source_async( &module_url, use_disk_cache, cached_only, 10, )) } /// Fetch local source file. fn fetch_local_file(&self, module_url: &Url) -> Result<SourceFile, ErrBox> { let filepath = module_url.to_file_path().map_err(|()| { ErrBox::from(DenoError::new( ErrorKind::InvalidPath, "File URL contains invalid path".to_owned(), )) })?; let source_code = match fs::read(filepath.clone()) { Ok(c) => c, Err(e) => return Err(e.into()), }; let media_type = map_content_type(&filepath, None); let types_url = match media_type { msg::MediaType::JavaScript | msg::MediaType::JSX => { get_types_url(&module_url, &source_code, None) } _ => None, }; Ok(SourceFile { url: module_url.clone(), filename: filepath, media_type, source_code, types_url, }) } /// Fetch cached remote file. /// /// This is a recursive operation if source file has redirections. /// /// It will keep reading <filename>.headers.json for information about redirection. /// `module_initial_source_name` would be None on first call, /// and becomes the name of the very first module that initiates the call /// in subsequent recursions. /// /// AKA if redirection occurs, module_initial_source_name is the source path /// that user provides, and the final module_name is the resolved path /// after following all redirections. fn fetch_cached_remote_source( &self, module_url: &Url, ) -> Result<Option<SourceFile>, ErrBox> { let source_code_headers = self.get_source_code_headers(&module_url); // If source code headers says that it would redirect elsewhere, // (meaning that the source file might not exist; only .headers.json is present) // Abort reading attempts to the cached source file and and follow the redirect. if let Some(redirect_to) = source_code_headers.redirect_to { // E.g. // module_name https://import-meta.now.sh/redirect.js // filename /Users/kun/Library/Caches/deno/deps/https/import-meta.now.sh/redirect.js // redirect_to https://import-meta.now.sh/sub/final1.js // real_filename /Users/kun/Library/Caches/deno/deps/https/import-meta.now.sh/sub/final1.js // real_module_name = https://import-meta.now.sh/sub/final1.js let redirect_url = Url::parse(&redirect_to).expect("Should be valid URL"); // Recurse. // TODO(bartlomieju): I'm pretty sure we should call `fetch_remote_source_async` here. // Should we expect that all redirects are cached? return self.fetch_cached_remote_source(&redirect_url); } // No redirect needed or end of redirects. // We can try read the file let filepath = self .deps_cache .location .join(self.deps_cache.get_cache_filename(&module_url)); let source_code = match fs::read(filepath.clone()) { Err(e) => { if e.kind() == std::io::ErrorKind::NotFound { return Ok(None); } else { return Err(e.into()); } } Ok(c) => c, }; let media_type = map_content_type( &filepath, source_code_headers.mime_type.as_ref().map(String::as_str), ); let types_url = match media_type { msg::MediaType::JavaScript | msg::MediaType::JSX => get_types_url( &module_url, &source_code, source_code_headers .x_typescript_types .as_ref() .map(String::as_str), ), _ => None, }; Ok(Some(SourceFile { url: module_url.clone(), filename: filepath, media_type, source_code, types_url, })) } /// Asynchronously fetch remote source file specified by the URL following redirects. fn fetch_remote_source_async( &self, module_url: &Url, use_disk_cache: bool, cached_only: bool, redirect_limit: i64, ) -> Pin<Box<SourceFileFuture>> { if redirect_limit < 0 { let e = DenoError::new(ErrorKind::Http, "too many redirects".to_string()); return futures::future::err(e.into()).boxed(); } let is_blacklisted = check_cache_blacklist(module_url, self.cache_blacklist.as_ref()); // First try local cache if use_disk_cache && !is_blacklisted { match self.fetch_cached_remote_source(&module_url) { Ok(Some(source_file)) => { return futures::future::ok(source_file).boxed(); } Ok(None) => { // there's no cached version } Err(err) => { return futures::future::err(err).boxed(); } } } // If file wasn't found in cache check if we can fetch it if cached_only { // We can't fetch remote file - bail out return futures::future::err( std::io::Error::new( std::io::ErrorKind::NotFound, format!( "Cannot find remote file '{}' in cache, --cached-only is specified", module_url.to_string() ), ) .into(), ) .boxed(); } let download_job = self.progress.add("Download", &module_url.to_string()); let dir = self.clone(); let module_url = module_url.clone(); let headers = self.get_source_code_headers(&module_url); let module_etag = headers.etag; let http_client = self.http_client.clone(); // Single pass fetch, either yields code or yields redirect. let f = async move { match http_util::fetch_once(http_client, &module_url, module_etag).await? { FetchOnceResult::NotModified => { let source_file = dir.fetch_cached_remote_source(&module_url)?.unwrap(); // Explicit drop to keep reference alive until future completes. drop(download_job); Ok(source_file) } FetchOnceResult::Redirect(new_module_url) => { // If redirects, update module_name and filename for next looped call. if let Err(e) = dir.save_source_code_headers( &module_url, None, Some(new_module_url.to_string()), None, None, ) { return Err(source_header_cache_failed_error( module_url.as_str(), &e.to_string(), )); } // Explicit drop to keep reference alive until future completes. drop(download_job); // Recurse dir .fetch_remote_source_async( &new_module_url, use_disk_cache, cached_only, redirect_limit - 1, ) .await } FetchOnceResult::Code(ResultPayload { body: source, content_type: maybe_content_type, etag, x_typescript_types, }) => { // We land on the code. if let Err(e) = dir.save_source_code_headers( &module_url, maybe_content_type.clone(), None, etag, x_typescript_types.clone(), ) { return Err(source_header_cache_failed_error( module_url.as_str(), &e.to_string(), )); } if let Err(e) = dir.save_source_code(&module_url, &source) { return Err(source_cache_failed_error( module_url.as_str(), &e.to_string(), )); } let filepath = dir .deps_cache .location .join(dir.deps_cache.get_cache_filename(&module_url)); let media_type = map_content_type( &filepath, maybe_content_type.as_ref().map(String::as_str), ); let types_url = match media_type { msg::MediaType::JavaScript | msg::MediaType::JSX => get_types_url( &module_url, &source, x_typescript_types.as_ref().map(String::as_str), ), _ => None, }; let source_file = SourceFile { url: module_url.clone(), filename: filepath, media_type, source_code: source, types_url, }; // Explicit drop to keep reference alive until future completes. drop(download_job); Ok(source_file) } } }; f.boxed() } /// Get header metadata associated with a remote file. /// /// NOTE: chances are that the source file was downloaded due to redirects. /// In this case, the headers file provides info about where we should go and get /// the file that redirect eventually points to. fn get_source_code_headers(&self, url: &Url) -> SourceCodeHeaders { let cache_key = self .deps_cache .get_cache_filename_with_extension(url, "headers.json"); if let Ok(bytes) = self.deps_cache.get(&cache_key) { if let Ok(json_string) = std::str::from_utf8(&bytes) { return SourceCodeHeaders::from_json_string(json_string.to_string()); } } SourceCodeHeaders::default() } /// Save contents of downloaded remote file in on-disk cache for subsequent access. fn save_source_code(&self, url: &Url, source: &[u8]) -> std::io::Result<()> { let cache_key = self.deps_cache.get_cache_filename(url); // May not exist. DON'T unwrap. let _ = self.deps_cache.remove(&cache_key); self.deps_cache.set(&cache_key, source) } /// Save headers related to source file to {filename}.headers.json file, /// only when there is actually something necessary to save. /// /// For example, if the extension ".js" already mean JS file and we have /// content type of "text/javascript", then we would not save the mime type. /// /// If nothing needs to be saved, the headers file is not created. fn save_source_code_headers( &self, url: &Url, mime_type: Option<String>, redirect_to: Option<String>, etag: Option<String>, x_typescript_types: Option<String>, ) -> std::io::Result<()> { let cache_key = self .deps_cache .get_cache_filename_with_extension(url, "headers.json"); // Remove possibly existing stale .headers.json file. // May not exist. DON'T unwrap. let _ = self.deps_cache.remove(&cache_key); let headers = SourceCodeHeaders { mime_type, redirect_to, etag, x_typescript_types, }; let cache_filename = self.deps_cache.get_cache_filename(url); if let Ok(maybe_json_string) = headers.to_json_string(&cache_filename) { if let Some(json_string) = maybe_json_string { return self.deps_cache.set(&cache_key, json_string.as_bytes()); } } Ok(()) } } fn map_file_extension(path: &Path) -> msg::MediaType { match path.extension() { None => msg::MediaType::Unknown, Some(os_str) => match os_str.to_str() { Some("ts") => msg::MediaType::TypeScript, Some("tsx") => msg::MediaType::TSX, Some("js") => msg::MediaType::JavaScript, Some("jsx") => msg::MediaType::JSX, Some("mjs") => msg::MediaType::JavaScript, Some("json") => msg::MediaType::Json, Some("wasm") => msg::MediaType::Wasm, _ => msg::MediaType::Unknown, }, } } // convert a ContentType string into a enumerated MediaType fn map_content_type(path: &Path, content_type: Option<&str>) -> msg::MediaType { match content_type { Some(content_type) => { // sometimes there is additional data after the media type in // Content-Type so we have to do a bit of manipulation so we are only // dealing with the actual media type let ct_vector: Vec<&str> = content_type.split(';').collect(); let ct: &str = ct_vector.first().unwrap(); match ct.to_lowercase().as_ref() { "application/typescript" | "text/typescript" | "video/vnd.dlna.mpeg-tts" | "video/mp2t" | "application/x-typescript" => { map_js_like_extension(path, msg::MediaType::TypeScript) } "application/javascript" | "text/javascript" | "application/ecmascript" | "text/ecmascript" | "application/x-javascript" => { map_js_like_extension(path, msg::MediaType::JavaScript) } "application/json" | "text/json" => msg::MediaType::Json, // Handle plain and possibly webassembly "text/plain" | "application/octet-stream" => map_file_extension(path), _ => { debug!("unknown content type: {}", content_type); msg::MediaType::Unknown } } } None => map_file_extension(path), } } fn map_js_like_extension( path: &Path, default: msg::MediaType, ) -> msg::MediaType { match path.extension() { None => default, Some(os_str) => match os_str.to_str() { None => default, Some("jsx") => msg::MediaType::JSX, Some("tsx") => msg::MediaType::TSX, Some(_) => default, }, } } /// Take a module URL and source code and determines if the source code contains /// a type directive, and if so, returns the parsed URL for that type directive. fn get_types_url( module_url: &Url, source_code: &[u8], maybe_types_header: Option<&str>, ) -> Option<Url> { lazy_static! { /// Matches reference type directives in strings, which provide /// type files that should be used by the compiler instead of the /// JavaScript file. static ref DIRECTIVE_TYPES: Regex = Regex::new( r#"(?m)^/{3}\s*<reference\s+types\s*=\s*["']([^"']+)["']\s*/>"# ) .unwrap(); } match maybe_types_header { Some(types_header) => match Url::parse(&types_header) { Ok(url) => Some(url), _ => Some(module_url.join(&types_header).unwrap()), }, _ => match DIRECTIVE_TYPES.captures(str::from_utf8(source_code).unwrap()) { Some(cap) => { let val = cap.get(1).unwrap().as_str(); match Url::parse(&val) { Ok(url) => Some(url), _ => Some(module_url.join(&val).unwrap()), } } _ => None, }, } } fn filter_shebang(bytes: Vec<u8>) -> Vec<u8> { let string = str::from_utf8(&bytes).unwrap(); if let Some(i) = string.find('\n') { let (_, rest) = string.split_at(i); rest.as_bytes().to_owned() } else { Vec::new() } } fn check_cache_blacklist(url: &Url, black_list: &[String]) -> bool { let mut url_without_fragmets = url.clone(); url_without_fragmets.set_fragment(None); if black_list.contains(&String::from(url_without_fragmets.as_str())) { return true; } let mut url_without_query_strings = url_without_fragmets; url_without_query_strings.set_query(None); let mut path_buf = PathBuf::from(url_without_query_strings.as_str()); loop { if black_list.contains(&String::from(path_buf.to_str().unwrap())) { return true; } if !path_buf.pop() { break; } } false } #[derive(Debug, Default)] /// Header metadata associated with a particular "symbolic" source code file. /// (the associated source code file might not be cached, while remaining /// a user accessible entity through imports (due to redirects)). pub struct SourceCodeHeaders { /// MIME type of the source code. pub mime_type: Option<String>, /// Where should we actually look for source code. /// This should be an absolute path! pub redirect_to: Option<String>, /// ETag of the remote source file pub etag: Option<String>, /// X-TypeScript-Types defines the location of a .d.ts file pub x_typescript_types: Option<String>, } static MIME_TYPE: &str = "mime_type"; static REDIRECT_TO: &str = "redirect_to"; static ETAG: &str = "etag"; static X_TYPESCRIPT_TYPES: &str = "x_typescript_types"; impl SourceCodeHeaders { pub fn from_json_string(headers_string: String) -> Self { // TODO: use serde for deserialization let maybe_headers_json: serde_json::Result<serde_json::Value> = serde_json::from_str(&headers_string); if let Ok(headers_json) = maybe_headers_json { let mime_type = headers_json[MIME_TYPE].as_str().map(String::from); let redirect_to = headers_json[REDIRECT_TO].as_str().map(String::from); let etag = headers_json[ETAG].as_str().map(String::from); let x_typescript_types = headers_json[X_TYPESCRIPT_TYPES].as_str().map(String::from); return SourceCodeHeaders { mime_type, redirect_to, etag, x_typescript_types, }; } SourceCodeHeaders::default() } // TODO: remove this nonsense `cache_filename` param, this should be // done when instantiating SourceCodeHeaders pub fn to_json_string( &self, cache_filename: &Path, ) -> Result<Option<String>, serde_json::Error> { // TODO(kevinkassimo): consider introduce serde::Deserialize to make things simpler. // This is super ugly at this moment... // Had trouble to make serde_derive work: I'm unable to build proc-macro2. let mut value_map = serde_json::map::Map::new(); if let Some(mime_type) = &self.mime_type { let resolved_mime_type = map_content_type(Path::new(""), Some(mime_type.clone().as_str())); // TODO: fix this let ext_based_mime_type = map_file_extension(cache_filename); // Add mime to headers only when content type is different from extension. if ext_based_mime_type == msg::MediaType::Unknown || resolved_mime_type != ext_based_mime_type { value_map.insert(MIME_TYPE.to_string(), json!(mime_type)); } } if let Some(redirect_to) = &self.redirect_to { value_map.insert(REDIRECT_TO.to_string(), json!(redirect_to)); } if let Some(etag) = &self.etag { value_map.insert(ETAG.to_string(), json!(etag)); } if let Some(x_typescript_types) = &self.x_typescript_types { value_map .insert(X_TYPESCRIPT_TYPES.to_string(), json!(x_typescript_types)); } if value_map.is_empty() { return Ok(None); } serde_json::to_string(&value_map) .and_then(|serialized| Ok(Some(serialized))) } } #[cfg(test)] mod tests { use super::*; use crate::fs as deno_fs; use tempfile::TempDir; fn setup_file_fetcher(dir_path: &Path) -> SourceFileFetcher { SourceFileFetcher::new( DiskCache::new(&dir_path.to_path_buf().join("deps")), Progress::new(), true, vec![], false, false, ) .expect("setup fail") } fn test_setup() -> (TempDir, SourceFileFetcher) { let temp_dir = TempDir::new().expect("tempdir fail"); let fetcher = setup_file_fetcher(temp_dir.path()); (temp_dir, fetcher) } macro_rules! file_url { ($path:expr) => { if cfg!(target_os = "windows") { concat!("file:///C:", $path) } else { concat!("file://", $path) } }; } #[test] fn test_cache_blacklist() { let args = crate::flags::resolve_urls(vec![ String::from("http://deno.land/std"), String::from("http://github.com/example/mod.ts"), String::from("http://fragment.com/mod.ts#fragment"), String::from("http://query.com/mod.ts?foo=bar"), String::from("http://queryandfragment.com/mod.ts?foo=bar#fragment"), ]); let u: Url = "http://deno.land/std/fs/mod.ts".parse().unwrap(); assert_eq!(check_cache_blacklist(&u, &args), true); let u: Url = "http://github.com/example/file.ts".parse().unwrap(); assert_eq!(check_cache_blacklist(&u, &args), false); let u: Url = "http://github.com/example/mod.ts".parse().unwrap(); assert_eq!(check_cache_blacklist(&u, &args), true); let u: Url = "http://github.com/example/mod.ts?foo=bar".parse().unwrap(); assert_eq!(check_cache_blacklist(&u, &args), true); let u: Url = "http://github.com/example/mod.ts#fragment".parse().unwrap(); assert_eq!(check_cache_blacklist(&u, &args), true); let u: Url = "http://fragment.com/mod.ts".parse().unwrap(); assert_eq!(check_cache_blacklist(&u, &args), true); let u: Url = "http://query.com/mod.ts".parse().unwrap(); assert_eq!(check_cache_blacklist(&u, &args), false); let u: Url = "http://fragment.com/mod.ts#fragment".parse().unwrap(); assert_eq!(check_cache_blacklist(&u, &args), true); let u: Url = "http://query.com/mod.ts?foo=bar".parse().unwrap(); assert_eq!(check_cache_blacklist(&u, &args), true); let u: Url = "http://queryandfragment.com/mod.ts".parse().unwrap(); assert_eq!(check_cache_blacklist(&u, &args), false); let u: Url = "http://queryandfragment.com/mod.ts?foo=bar" .parse() .unwrap(); assert_eq!(check_cache_blacklist(&u, &args), true); let u: Url = "http://queryandfragment.com/mod.ts#fragment" .parse() .unwrap(); assert_eq!(check_cache_blacklist(&u, &args), false); let u: Url = "http://query.com/mod.ts?foo=bar#fragment".parse().unwrap(); assert_eq!(check_cache_blacklist(&u, &args), true); let u: Url = "http://fragment.com/mod.ts?foo=bar#fragment" .parse() .unwrap(); assert_eq!(check_cache_blacklist(&u, &args), true); } #[test] fn test_source_code_headers_get_and_save() { let (_temp_dir, fetcher) = test_setup(); let url = Url::parse("http://example.com/f.js").unwrap(); let headers_filepath = fetcher.deps_cache.location.join( fetcher .deps_cache .get_cache_filename_with_extension(&url, "headers.json"), ); if let Some(ref parent) = headers_filepath.parent() { fs::create_dir_all(parent).unwrap(); }; let _ = deno_fs::write_file( headers_filepath.as_path(), "{\"mime_type\":\"text/javascript\",\"redirect_to\":\"http://example.com/a.js\"}", 0o666, ); let headers = fetcher.get_source_code_headers(&url); assert_eq!(headers.mime_type.clone().unwrap(), "text/javascript"); assert_eq!(headers.redirect_to.unwrap(), "http://example.com/a.js"); assert_eq!(headers.etag, None); assert_eq!(headers.x_typescript_types, None); let _ = fetcher.save_source_code_headers( &url, Some("text/typescript".to_owned()), Some("http://deno.land/a.js".to_owned()), Some("W/\"04572f4749af993f4961a7e5daa1e4d5\"".to_owned()), Some("./a.d.ts".to_owned()), ); let headers2 = fetcher.get_source_code_headers(&url); assert_eq!(headers2.mime_type.clone().unwrap(), "text/typescript"); assert_eq!(headers2.redirect_to.unwrap(), "http://deno.land/a.js"); assert_eq!( headers2.etag.unwrap(), "W/\"04572f4749af993f4961a7e5daa1e4d5\"" ); assert_eq!(headers2.x_typescript_types.unwrap(), "./a.d.ts") } #[test] fn test_fetch_local_file_no_panic() { let (_temp_dir, fetcher) = test_setup(); if cfg!(windows) { // Should fail: missing drive letter. let u = Url::parse("file:///etc/passwd").unwrap(); fetcher.fetch_local_file(&u).unwrap_err(); } else { // Should fail: local network paths are not supported on unix. let u = Url::parse("file://server/etc/passwd").unwrap(); fetcher.fetch_local_file(&u).unwrap_err(); } } #[tokio::test] async fn test_get_source_code_1() { let http_server_guard = crate::test_util::http_server(); let (temp_dir, fetcher) = test_setup(); let fetcher_1 = fetcher.clone(); let fetcher_2 = fetcher.clone(); let module_url = Url::parse("http://localhost:4545/cli/tests/subdir/mod2.ts").unwrap(); let module_url_1 = module_url.clone(); let module_url_2 = module_url.clone(); let headers_file_name = fetcher.deps_cache.location.join( fetcher .deps_cache .get_cache_filename_with_extension(&module_url, "headers.json"), ); let headers_file_name_1 = headers_file_name.clone(); let headers_file_name_2 = headers_file_name.clone(); let headers_file_name_3 = headers_file_name; let result = fetcher .get_source_file_async(&module_url, true, false, false) .await; assert!(result.is_ok()); let r = result.unwrap(); assert_eq!( r.source_code, &b"export { printHello } from \"./print_hello.ts\";\n"[..] ); assert_eq!(&(r.media_type), &msg::MediaType::TypeScript); // Should not create .headers.json file due to matching ext assert!(fs::read_to_string(&headers_file_name_1).is_err()); // Modify .headers.json, write using fs write and read using save_source_code_headers let _ = fs::write( &headers_file_name_1, "{ \"mime_type\": \"text/javascript\" }", ); let result2 = fetcher_1 .get_source_file_async(&module_url, true, false, false) .await; assert!(result2.is_ok()); let r2 = result2.unwrap(); assert_eq!( r2.source_code, &b"export { printHello } from \"./print_hello.ts\";\n"[..] ); // If get_source_file_async does not call remote, this should be JavaScript // as we modified before! (we do not overwrite .headers.json due to no http fetch) assert_eq!(&(r2.media_type), &msg::MediaType::JavaScript); assert_eq!( fetcher_2 .get_source_code_headers(&module_url_1) .mime_type .unwrap(), "text/javascript" ); // Modify .headers.json again, but the other way around let _ = fetcher_2.save_source_code_headers( &module_url_1, Some("application/json".to_owned()), None, None, None, ); let result3 = fetcher_2 .get_source_file_async(&module_url_1, true, false, false) .await; assert!(result3.is_ok()); let r3 = result3.unwrap(); assert_eq!( r3.source_code, &b"export { printHello } from \"./print_hello.ts\";\n"[..] ); // If get_source_file_async does not call remote, this should be JavaScript // as we modified before! (we do not overwrite .headers.json due to no http fetch) assert_eq!(&(r3.media_type), &msg::MediaType::Json); assert!(fs::read_to_string(&headers_file_name_2) .unwrap() .contains("application/json")); // let's create fresh instance of DenoDir (simulating another freshh Deno process) // and don't use cache let fetcher = setup_file_fetcher(temp_dir.path()); let result4 = fetcher .get_source_file_async(&module_url_2, false, false, false) .await; assert!(result4.is_ok()); let r4 = result4.unwrap(); let expected4 = &b"export { printHello } from \"./print_hello.ts\";\n"[..]; assert_eq!(r4.source_code, expected4); // Now the old .headers.json file should have gone! Resolved back to TypeScript assert_eq!(&(r4.media_type), &msg::MediaType::TypeScript); assert!(fs::read_to_string(&headers_file_name_3).is_err()); drop(http_server_guard); } #[tokio::test] async fn test_get_source_code_2() { let http_server_guard = crate::test_util::http_server(); let (temp_dir, fetcher) = test_setup(); let fetcher_1 = fetcher.clone(); let module_url = Url::parse("http://localhost:4545/cli/tests/subdir/mismatch_ext.ts") .unwrap(); let module_url_1 = module_url.clone(); let module_url_2 = module_url.clone(); let headers_file_name = fetcher.deps_cache.location.join( fetcher .deps_cache .get_cache_filename_with_extension(&module_url, "headers.json"), ); let result = fetcher .get_source_file_async(&module_url, true, false, false) .await; assert!(result.is_ok()); let r = result.unwrap(); let expected = b"export const loaded = true;\n"; assert_eq!(r.source_code, expected); // Mismatch ext with content type, create .headers.json assert_eq!(&(r.media_type), &msg::MediaType::JavaScript); assert_eq!( fetcher .get_source_code_headers(&module_url) .mime_type .unwrap(), "text/javascript" ); // Modify .headers.json let _ = fetcher.save_source_code_headers( &module_url, Some("text/typescript".to_owned()), None, None, None, ); let result2 = fetcher .get_source_file_async(&module_url, true, false, false) .await; assert!(result2.is_ok()); let r2 = result2.unwrap(); let expected2 = b"export const loaded = true;\n"; assert_eq!(r2.source_code, expected2); // If get_source_file_async does not call remote, this should be TypeScript // as we modified before! (we do not overwrite .headers.json due to no http // fetch) assert_eq!(&(r2.media_type), &msg::MediaType::TypeScript); assert!(fs::read_to_string(&headers_file_name).is_err()); // let's create fresh instance of DenoDir (simulating another freshh Deno // process) and don't use cache let fetcher = setup_file_fetcher(temp_dir.path()); let result3 = fetcher .get_source_file_async(&module_url_1, false, false, false) .await; assert!(result3.is_ok()); let r3 = result3.unwrap(); let expected3 = b"export const loaded = true;\n"; assert_eq!(r3.source_code, expected3); // Now the old .headers.json file should be overwritten back to JavaScript! // (due to http fetch) assert_eq!(&(r3.media_type), &msg::MediaType::JavaScript); assert_eq!( fetcher_1 .get_source_code_headers(&module_url_2) .mime_type .unwrap(), "text/javascript" ); drop(http_server_guard); } #[tokio::test] async fn test_get_source_code_multiple_downloads_of_same_file() { let http_server_guard = crate::test_util::http_server(); let (_temp_dir, fetcher) = test_setup(); let specifier = ModuleSpecifier::resolve_url( "http://localhost:4545/cli/tests/subdir/mismatch_ext.ts", ) .unwrap(); let headers_file_name = fetcher.deps_cache.location.join( fetcher .deps_cache .get_cache_filename_with_extension(specifier.as_url(), "headers.json"), ); // first download let r = fetcher.fetch_source_file_async(&specifier, None).await; assert!(r.is_ok()); let result = fs::File::open(&headers_file_name); assert!(result.is_ok()); let headers_file = result.unwrap(); // save modified timestamp for headers file let headers_file_metadata = headers_file.metadata().unwrap(); let headers_file_modified = headers_file_metadata.modified().unwrap(); // download file again, it should use already fetched file even though // `use_disk_cache` is set to false, this can be verified using source // header file creation timestamp (should be the same as after first // download) let r = fetcher.fetch_source_file_async(&specifier, None).await; assert!(r.is_ok()); let result = fs::File::open(&headers_file_name); assert!(result.is_ok()); let headers_file_2 = result.unwrap(); // save modified timestamp for headers file let headers_file_metadata_2 = headers_file_2.metadata().unwrap(); let headers_file_modified_2 = headers_file_metadata_2.modified().unwrap(); assert_eq!(headers_file_modified, headers_file_modified_2); drop(http_server_guard); } #[tokio::test] async fn test_get_source_code_3() { let http_server_guard = crate::test_util::http_server(); let (_temp_dir, fetcher) = test_setup(); let redirect_module_url = Url::parse( "http://localhost:4546/cli/tests/subdir/redirects/redirect1.js", ) .unwrap(); let redirect_source_filepath = fetcher .deps_cache .location .join("http/localhost_PORT4546/cli/tests/subdir/redirects/redirect1.js"); let redirect_source_filename = redirect_source_filepath.to_str().unwrap().to_string(); let target_module_url = Url::parse( "http://localhost:4545/cli/tests/subdir/redirects/redirect1.js", ) .unwrap(); let redirect_target_filepath = fetcher .deps_cache .location .join("http/localhost_PORT4545/cli/tests/subdir/redirects/redirect1.js"); let redirect_target_filename = redirect_target_filepath.to_str().unwrap().to_string(); // Test basic follow and headers recording let result = fetcher .get_source_file_async(&redirect_module_url, true, false, false) .await; assert!(result.is_ok()); let mod_meta = result.unwrap(); // File that requires redirection is not downloaded. assert!(fs::read_to_string(&redirect_source_filename).is_err()); // ... but its .headers.json is created. let redirect_source_headers = fetcher.get_source_code_headers(&redirect_module_url); assert_eq!( redirect_source_headers.redirect_to.unwrap(), "http://localhost:4545/cli/tests/subdir/redirects/redirect1.js" ); // The target of redirection is downloaded instead. assert_eq!( fs::read_to_string(&redirect_target_filename).unwrap(), "export const redirect = 1;\n" ); let redirect_target_headers = fetcher.get_source_code_headers(&target_module_url); assert!(redirect_target_headers.redirect_to.is_none()); // Examine the meta result. assert_eq!(mod_meta.url, target_module_url); drop(http_server_guard); } #[tokio::test] async fn test_get_source_code_4() { let http_server_guard = crate::test_util::http_server(); let (_temp_dir, fetcher) = test_setup(); let double_redirect_url = Url::parse( "http://localhost:4548/cli/tests/subdir/redirects/redirect1.js", ) .unwrap(); let double_redirect_path = fetcher .deps_cache .location .join("http/localhost_PORT4548/cli/tests/subdir/redirects/redirect1.js"); let redirect_url = Url::parse( "http://localhost:4546/cli/tests/subdir/redirects/redirect1.js", ) .unwrap(); let redirect_path = fetcher .deps_cache .location .join("http/localhost_PORT4546/cli/tests/subdir/redirects/redirect1.js"); let target_url = Url::parse( "http://localhost:4545/cli/tests/subdir/redirects/redirect1.js", ) .unwrap(); let target_path = fetcher .deps_cache .location .join("http/localhost_PORT4545/cli/tests/subdir/redirects/redirect1.js"); // Test double redirects and headers recording let result = fetcher .get_source_file_async(&double_redirect_url, true, false, false) .await; assert!(result.is_ok()); let mod_meta = result.unwrap(); assert!(fs::read_to_string(&double_redirect_path).is_err()); assert!(fs::read_to_string(&redirect_path).is_err()); let double_redirect_headers = fetcher.get_source_code_headers(&double_redirect_url); assert_eq!( double_redirect_headers.redirect_to.unwrap(), redirect_url.to_string() ); let redirect_headers = fetcher.get_source_code_headers(&redirect_url); assert_eq!( redirect_headers.redirect_to.unwrap(), target_url.to_string() ); // The target of redirection is downloaded instead. assert_eq!( fs::read_to_string(&target_path).unwrap(), "export const redirect = 1;\n" ); let redirect_target_headers = fetcher.get_source_code_headers(&target_url); assert!(redirect_target_headers.redirect_to.is_none()); // Examine the meta result. assert_eq!(mod_meta.url, target_url); drop(http_server_guard); } #[tokio::test] async fn test_get_source_code_5() { let http_server_guard = crate::test_util::http_server(); let (_temp_dir, fetcher) = test_setup(); let double_redirect_url = Url::parse( "http://localhost:4548/cli/tests/subdir/redirects/redirect1.js", ) .unwrap(); let redirect_url = Url::parse( "http://localhost:4546/cli/tests/subdir/redirects/redirect1.js", ) .unwrap(); let target_path = fetcher .deps_cache .location .join("http/localhost_PORT4545/cli/tests/subdir/redirects/redirect1.js"); let target_path_ = target_path.clone(); // Test that redirect target is not downloaded twice for different redirect source. let result = fetcher .get_source_file_async(&double_redirect_url, true, false, false) .await; assert!(result.is_ok()); let result = fs::File::open(&target_path); assert!(result.is_ok()); let file = result.unwrap(); // save modified timestamp for headers file of redirect target let file_metadata = file.metadata().unwrap(); let file_modified = file_metadata.modified().unwrap(); // When another file is fetched that also point to redirect target, then // redirect target shouldn't be downloaded again. It can be verified // using source header file creation timestamp (should be the same as // after first `get_source_file`) let result = fetcher .get_source_file_async(&redirect_url, true, false, false) .await; assert!(result.is_ok()); let result = fs::File::open(&target_path_); assert!(result.is_ok()); let file_2 = result.unwrap(); // save modified timestamp for headers file let file_metadata_2 = file_2.metadata().unwrap(); let file_modified_2 = file_metadata_2.modified().unwrap(); assert_eq!(file_modified, file_modified_2); drop(http_server_guard); } #[tokio::test] async fn test_get_source_code_6() { let http_server_guard = crate::test_util::http_server(); let (_temp_dir, fetcher) = test_setup(); let double_redirect_url = Url::parse( "http://localhost:4548/cli/tests/subdir/redirects/redirect1.js", ) .unwrap(); // Test that redirections can be limited let result = fetcher .fetch_remote_source_async(&double_redirect_url, false, false, 2) .await; assert!(result.is_ok()); let result = fetcher .fetch_remote_source_async(&double_redirect_url, false, false, 1) .await; assert!(result.is_err()); let err = result.err().unwrap(); assert_eq!(err.kind(), ErrorKind::Http); drop(http_server_guard); } #[tokio::test] async fn test_get_source_no_remote() { let http_server_guard = crate::test_util::http_server(); let (_temp_dir, fetcher) = test_setup(); let module_url = Url::parse("http://localhost:4545/cli/tests/002_hello.ts").unwrap(); // Remote modules are not allowed let result = fetcher .get_source_file_async(&module_url, true, true, false) .await; assert!(result.is_err()); let err = result.err().unwrap(); assert_eq!(err.kind(), ErrorKind::NotFound); drop(http_server_guard); } #[tokio::test] async fn test_get_source_cached_only() { let http_server_guard = crate::test_util::http_server(); let (_temp_dir, fetcher) = test_setup(); let fetcher_1 = fetcher.clone(); let fetcher_2 = fetcher.clone(); let module_url = Url::parse("http://localhost:4545/cli/tests/002_hello.ts").unwrap(); let module_url_1 = module_url.clone(); let module_url_2 = module_url.clone(); // file hasn't been cached before let result = fetcher .get_source_file_async(&module_url, true, false, true) .await; assert!(result.is_err()); let err = result.err().unwrap(); assert_eq!(err.kind(), ErrorKind::NotFound); // download and cache file let result = fetcher_1 .get_source_file_async(&module_url_1, true, false, false) .await; assert!(result.is_ok()); // module is already cached, should be ok even with `cached_only` let result = fetcher_2 .get_source_file_async(&module_url_2, true, false, true) .await; assert!(result.is_ok()); drop(http_server_guard); } #[tokio::test] async fn test_fetch_source_async_1() { let http_server_guard = crate::test_util::http_server(); let (_temp_dir, fetcher) = test_setup(); let module_url = Url::parse("http://127.0.0.1:4545/cli/tests/subdir/mt_video_mp2t.t3.ts") .unwrap(); let headers_file_name = fetcher.deps_cache.location.join( fetcher .deps_cache .get_cache_filename_with_extension(&module_url, "headers.json"), ); let result = fetcher .fetch_remote_source_async(&module_url, false, false, 10) .await; assert!(result.is_ok()); let r = result.unwrap(); assert_eq!(r.source_code, b"export const loaded = true;\n"); assert_eq!(&(r.media_type), &msg::MediaType::TypeScript); // matching ext, no .headers.json file created assert!(fs::read_to_string(&headers_file_name).is_err()); // Modify .headers.json, make sure read from local let _ = fetcher.save_source_code_headers( &module_url, Some("text/javascript".to_owned()), None, None, None, ); let result2 = fetcher.fetch_cached_remote_source(&module_url); assert!(result2.is_ok()); let r2 = result2.unwrap().unwrap(); assert_eq!(r2.source_code, b"export const loaded = true;\n"); // Not MediaType::TypeScript due to .headers.json modification assert_eq!(&(r2.media_type), &msg::MediaType::JavaScript); drop(http_server_guard); } #[tokio::test] async fn test_fetch_source_1() { let http_server_guard = crate::test_util::http_server(); let (_temp_dir, fetcher) = test_setup(); let module_url = Url::parse("http://localhost:4545/cli/tests/subdir/mt_video_mp2t.t3.ts") .unwrap(); let headers_file_name = fetcher.deps_cache.location.join( fetcher .deps_cache .get_cache_filename_with_extension(&module_url, "headers.json"), ); let result = fetcher .fetch_remote_source_async(&module_url, false, false, 10) .await; assert!(result.is_ok()); let r = result.unwrap(); assert_eq!(r.source_code, b"export const loaded = true;\n"); assert_eq!(&(r.media_type), &msg::MediaType::TypeScript); // matching ext, no .headers.json file created assert!(fs::read_to_string(&headers_file_name).is_err()); // Modify .headers.json, make sure read from local let _ = fetcher.save_source_code_headers( &module_url, Some("text/javascript".to_owned()), None, None, None, ); let result2 = fetcher.fetch_cached_remote_source(&module_url); assert!(result2.is_ok()); let r2 = result2.unwrap().unwrap(); assert_eq!(r2.source_code, b"export const loaded = true;\n"); // Not MediaType::TypeScript due to .headers.json modification assert_eq!(&(r2.media_type), &msg::MediaType::JavaScript); drop(http_server_guard); } #[tokio::test] async fn test_fetch_source_2() { let http_server_guard = crate::test_util::http_server(); let (_temp_dir, fetcher) = test_setup(); let fetcher_1 = fetcher.clone(); let fetcher_2 = fetcher.clone(); let fetcher_3 = fetcher.clone(); let module_url = Url::parse("http://localhost:4545/cli/tests/subdir/no_ext").unwrap(); let module_url_2 = Url::parse("http://localhost:4545/cli/tests/subdir/mismatch_ext.ts") .unwrap(); let module_url_2_ = module_url_2.clone(); let module_url_3 = Url::parse("http://localhost:4545/cli/tests/subdir/unknown_ext.deno") .unwrap(); let module_url_3_ = module_url_3.clone(); let result = fetcher .fetch_remote_source_async(&module_url, false, false, 10) .await; assert!(result.is_ok()); let r = result.unwrap(); assert_eq!(r.source_code, b"export const loaded = true;\n"); assert_eq!(&(r.media_type), &msg::MediaType::TypeScript); // no ext, should create .headers.json file assert_eq!( fetcher_1 .get_source_code_headers(&module_url) .mime_type .unwrap(), "text/typescript" ); let result = fetcher_1 .fetch_remote_source_async(&module_url_2, false, false, 10) .await; assert!(result.is_ok()); let r2 = result.unwrap(); assert_eq!(r2.source_code, b"export const loaded = true;\n"); assert_eq!(&(r2.media_type), &msg::MediaType::JavaScript); // mismatch ext, should create .headers.json file assert_eq!( fetcher_2 .get_source_code_headers(&module_url_2_) .mime_type .unwrap(), "text/javascript" ); // test unknown extension let result = fetcher_2 .fetch_remote_source_async(&module_url_3, false, false, 10) .await; assert!(result.is_ok()); let r3 = result.unwrap(); assert_eq!(r3.source_code, b"export const loaded = true;\n"); assert_eq!(&(r3.media_type), &msg::MediaType::TypeScript); // unknown ext, should create .headers.json file assert_eq!( fetcher_3 .get_source_code_headers(&module_url_3_) .mime_type .unwrap(), "text/typescript" ); drop(http_server_guard); } #[tokio::test] async fn test_fetch_source_file() { let (_temp_dir, fetcher) = test_setup(); // Test failure case. let specifier = ModuleSpecifier::resolve_url(file_url!("/baddir/hello.ts")).unwrap(); let r = fetcher.fetch_source_file_async(&specifier, None).await; assert!(r.is_err()); let p = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("js/main.ts"); let specifier = ModuleSpecifier::resolve_url_or_path(p.to_str().unwrap()).unwrap(); let r = fetcher.fetch_source_file_async(&specifier, None).await; assert!(r.is_ok()); } #[tokio::test] async fn test_fetch_source_file_1() { /*recompile ts file*/ let (_temp_dir, fetcher) = test_setup(); // Test failure case. let specifier = ModuleSpecifier::resolve_url(file_url!("/baddir/hello.ts")).unwrap(); let r = fetcher.fetch_source_file_async(&specifier, None).await; assert!(r.is_err()); let p = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("js/main.ts"); let specifier = ModuleSpecifier::resolve_url_or_path(p.to_str().unwrap()).unwrap(); let r = fetcher.fetch_source_file_async(&specifier, None).await; assert!(r.is_ok()); } #[tokio::test] async fn test_fetch_source_file_2() { /*recompile ts file*/ let (_temp_dir, fetcher) = test_setup(); let p = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")) .join("tests/001_hello.js"); let specifier = ModuleSpecifier::resolve_url_or_path(p.to_str().unwrap()).unwrap(); let r = fetcher.fetch_source_file_async(&specifier, None).await; assert!(r.is_ok()); } #[test] fn test_resolve_module_3() { // unsupported schemes let test_cases = [ "ftp://localhost:4545/testdata/subdir/print_hello.ts", "blob:https://whatwg.org/d0360e2f-caee-469f-9a2f-87d5b0456f6f", ]; for &test in test_cases.iter() { let url = Url::parse(test).unwrap(); assert_eq!( SourceFileFetcher::check_if_supported_scheme(&url) .unwrap_err() .kind(), ErrorKind::Other ); } } #[test] fn test_map_file_extension() { assert_eq!( map_file_extension(Path::new("foo/bar.ts")), msg::MediaType::TypeScript ); assert_eq!( map_file_extension(Path::new("foo/bar.tsx")), msg::MediaType::TSX ); assert_eq!( map_file_extension(Path::new("foo/bar.d.ts")), msg::MediaType::TypeScript ); assert_eq!( map_file_extension(Path::new("foo/bar.js")), msg::MediaType::JavaScript ); assert_eq!( map_file_extension(Path::new("foo/bar.jsx")), msg::MediaType::JSX ); assert_eq!( map_file_extension(Path::new("foo/bar.json")), msg::MediaType::Json ); assert_eq!( map_file_extension(Path::new("foo/bar.wasm")), msg::MediaType::Wasm ); assert_eq!( map_file_extension(Path::new("foo/bar.txt")), msg::MediaType::Unknown ); assert_eq!( map_file_extension(Path::new("foo/bar")), msg::MediaType::Unknown ); } #[test] fn test_map_content_type_extension_only() { // Extension only assert_eq!( map_content_type(Path::new("foo/bar.ts"), None), msg::MediaType::TypeScript ); assert_eq!( map_content_type(Path::new("foo/bar.tsx"), None), msg::MediaType::TSX ); assert_eq!( map_content_type(Path::new("foo/bar.d.ts"), None), msg::MediaType::TypeScript ); assert_eq!( map_content_type(Path::new("foo/bar.js"), None), msg::MediaType::JavaScript ); assert_eq!( map_content_type(Path::new("foo/bar.txt"), None), msg::MediaType::Unknown ); assert_eq!( map_content_type(Path::new("foo/bar.jsx"), None), msg::MediaType::JSX ); assert_eq!( map_content_type(Path::new("foo/bar.json"), None), msg::MediaType::Json ); assert_eq!( map_content_type(Path::new("foo/bar.wasm"), None), msg::MediaType::Wasm ); assert_eq!( map_content_type(Path::new("foo/bar"), None), msg::MediaType::Unknown ); } #[test] fn test_map_content_type_media_type_with_no_extension() { // Media Type assert_eq!( map_content_type(Path::new("foo/bar"), Some("application/typescript")), msg::MediaType::TypeScript ); assert_eq!( map_content_type(Path::new("foo/bar"), Some("text/typescript")), msg::MediaType::TypeScript ); assert_eq!( map_content_type(Path::new("foo/bar"), Some("video/vnd.dlna.mpeg-tts")), msg::MediaType::TypeScript ); assert_eq!( map_content_type(Path::new("foo/bar"), Some("video/mp2t")), msg::MediaType::TypeScript ); assert_eq!( map_content_type(Path::new("foo/bar"), Some("application/x-typescript")), msg::MediaType::TypeScript ); assert_eq!( map_content_type(Path::new("foo/bar"), Some("application/javascript")), msg::MediaType::JavaScript ); assert_eq!( map_content_type(Path::new("foo/bar"), Some("text/javascript")), msg::MediaType::JavaScript ); assert_eq!( map_content_type(Path::new("foo/bar"), Some("application/ecmascript")), msg::MediaType::JavaScript ); assert_eq!( map_content_type(Path::new("foo/bar"), Some("text/ecmascript")), msg::MediaType::JavaScript ); assert_eq!( map_content_type(Path::new("foo/bar"), Some("application/x-javascript")), msg::MediaType::JavaScript ); assert_eq!( map_content_type(Path::new("foo/bar"), Some("application/json")), msg::MediaType::Json ); assert_eq!( map_content_type(Path::new("foo/bar"), Some("text/json")), msg::MediaType::Json ); } #[test] fn test_map_file_extension_media_type_with_extension() { assert_eq!( map_content_type(Path::new("foo/bar.ts"), Some("text/plain")), msg::MediaType::TypeScript ); assert_eq!( map_content_type(Path::new("foo/bar.ts"), Some("foo/bar")), msg::MediaType::Unknown ); assert_eq!( map_content_type( Path::new("foo/bar.tsx"), Some("application/typescript"), ), msg::MediaType::TSX ); assert_eq!( map_content_type( Path::new("foo/bar.tsx"), Some("application/javascript"), ), msg::MediaType::TSX ); assert_eq!( map_content_type( Path::new("foo/bar.tsx"), Some("application/x-typescript"), ), msg::MediaType::TSX ); assert_eq!( map_content_type( Path::new("foo/bar.tsx"), Some("video/vnd.dlna.mpeg-tts"), ), msg::MediaType::TSX ); assert_eq!( map_content_type(Path::new("foo/bar.tsx"), Some("video/mp2t")), msg::MediaType::TSX ); assert_eq!( map_content_type( Path::new("foo/bar.jsx"), Some("application/javascript"), ), msg::MediaType::JSX ); assert_eq!( map_content_type( Path::new("foo/bar.jsx"), Some("application/x-typescript"), ), msg::MediaType::JSX ); assert_eq!( map_content_type( Path::new("foo/bar.jsx"), Some("application/ecmascript"), ), msg::MediaType::JSX ); assert_eq!( map_content_type(Path::new("foo/bar.jsx"), Some("text/ecmascript")), msg::MediaType::JSX ); assert_eq!( map_content_type( Path::new("foo/bar.jsx"), Some("application/x-javascript"), ), msg::MediaType::JSX ); } #[test] fn test_filter_shebang() { assert_eq!(filter_shebang(b"#!"[..].to_owned()), b""); assert_eq!(filter_shebang(b"#!\n\n"[..].to_owned()), b"\n\n"); let code = b"#!/usr/bin/env deno\nconsole.log('hello');\n"[..].to_owned(); assert_eq!(filter_shebang(code), b"\nconsole.log('hello');\n"); } #[tokio::test] async fn test_fetch_with_etag() { let http_server_guard = crate::test_util::http_server(); let (_temp_dir, fetcher) = test_setup(); let module_url = Url::parse("http://127.0.0.1:4545/etag_script.ts").unwrap(); let source = fetcher .fetch_remote_source_async(&module_url, false, false, 1) .await; assert!(source.is_ok()); let source = source.unwrap(); assert_eq!(source.source_code, b"console.log('etag')"); assert_eq!(&(source.media_type), &msg::MediaType::TypeScript); let headers = fetcher.get_source_code_headers(&module_url); assert_eq!(headers.etag, Some("33a64df551425fcc55e".to_string())); let header_path = fetcher.deps_cache.location.join( fetcher .deps_cache .get_cache_filename_with_extension(&module_url, "headers.json"), ); let modified1 = header_path.metadata().unwrap().modified().unwrap(); // Forcibly change the contents of the cache file and request // it again with the cache parameters turned off. // If the fetched content changes, the cached content is used. fetcher .save_source_code(&module_url, b"changed content") .unwrap(); let cached_source = fetcher .fetch_remote_source_async(&module_url, false, false, 1) .await .unwrap(); assert_eq!(cached_source.source_code, b"changed content"); let modified2 = header_path.metadata().unwrap().modified().unwrap(); // Assert that the file has not been modified assert_eq!(modified1, modified2); drop(http_server_guard); } #[test] fn test_get_types_url_1() { let module_url = Url::parse("https://example.com/mod.js").unwrap(); let source_code = b"console.log(\"foo\");".to_owned(); let result = get_types_url(&module_url, &source_code, None); assert_eq!(result, None); } #[test] fn test_get_types_url_2() { let module_url = Url::parse("https://example.com/mod.js").unwrap(); let source_code = r#"/// <reference types="./mod.d.ts" /> console.log("foo");"# .as_bytes() .to_owned(); let result = get_types_url(&module_url, &source_code, None); assert_eq!( result, Some(Url::parse("https://example.com/mod.d.ts").unwrap()) ); } #[test] fn test_get_types_url_3() { let module_url = Url::parse("https://example.com/mod.js").unwrap(); let source_code = r#"/// <reference types="https://deno.land/mod.d.ts" /> console.log("foo");"# .as_bytes() .to_owned(); let result = get_types_url(&module_url, &source_code, None); assert_eq!( result, Some(Url::parse("https://deno.land/mod.d.ts").unwrap()) ); } #[test] fn test_get_types_url_4() { let module_url = Url::parse("file:///foo/bar/baz.js").unwrap(); let source_code = r#"/// <reference types="../qat/baz.d.ts" /> console.log("foo");"# .as_bytes() .to_owned(); let result = get_types_url(&module_url, &source_code, None); assert_eq!( result, Some(Url::parse("file:///foo/qat/baz.d.ts").unwrap()) ); } #[test] fn test_get_types_url_5() { let module_url = Url::parse("https://example.com/mod.js").unwrap(); let source_code = b"console.log(\"foo\");".to_owned(); let result = get_types_url(&module_url, &source_code, Some("./mod.d.ts")); assert_eq!( result, Some(Url::parse("https://example.com/mod.d.ts").unwrap()) ); } #[test] fn test_get_types_url_6() { let module_url = Url::parse("https://example.com/mod.js").unwrap(); let source_code = r#"/// <reference types="./mod.d.ts" /> console.log("foo");"# .as_bytes() .to_owned(); let result = get_types_url( &module_url, &source_code, Some("https://deno.land/mod.d.ts"), ); assert_eq!( result, Some(Url::parse("https://deno.land/mod.d.ts").unwrap()) ); } #[tokio::test] async fn test_fetch_with_types_header() { let http_server_guard = crate::test_util::http_server(); let (_temp_dir, fetcher) = test_setup(); let module_url = Url::parse("http://127.0.0.1:4545/xTypeScriptTypes.js").unwrap(); let source = fetcher .fetch_remote_source_async(&module_url, false, false, 1) .await; assert!(source.is_ok()); let source = source.unwrap(); assert_eq!(source.source_code, b"export const foo = 'foo';"); assert_eq!(&(source.media_type), &msg::MediaType::JavaScript); assert_eq!( source.types_url, Some(Url::parse("http://127.0.0.1:4545/xTypeScriptTypes.d.ts").unwrap()) ); drop(http_server_guard); } #[tokio::test] async fn test_fetch_with_types_reference() { let http_server_guard = crate::test_util::http_server(); let (_temp_dir, fetcher) = test_setup(); let module_url = Url::parse("http://127.0.0.1:4545/referenceTypes.js").unwrap(); let source = fetcher .fetch_remote_source_async(&module_url, false, false, 1) .await; assert!(source.is_ok()); let source = source.unwrap(); assert_eq!(&(source.media_type), &msg::MediaType::JavaScript); assert_eq!( source.types_url, Some(Url::parse("http://127.0.0.1:4545/xTypeScriptTypes.d.ts").unwrap()) ); drop(http_server_guard); } }
31.866313
133
0.636194
9cf872f8f0295c8458b7d2b53ad114aeab1aaaa8
4,194
#[macro_use] pub mod build; mod expect; pub use expect::*; use gdnative::{ api::{GDNativeLibrary, NativeScript, Node}, prelude::Unique, GodotObject, Ref, TRef, }; static mut ROOT_NODE: Option<Ref<Node>> = None; pub fn get_root_node() -> Result<TRef<'static, Node>, String> { unsafe { ROOT_NODE.map(|n| n.assume_safe()) }.ok_or_else(|| "ERR: Unable to get root node".to_string()) } pub fn set_root_node(node: Ref<Node>) { unsafe { ROOT_NODE = Some(node) }; } pub fn process_frame(node: &Node) { node.notification(Node::NOTIFICATION_PROCESS, false); node.notification(Node::NOTIFICATION_PHYSICS_PROCESS, false); } pub fn process_frames(n: i32, node: &Node) { for _ in 0..n { process_frame(node); } } pub fn cleanup(root: TRef<'static, Node>) { while root.get_child_count() > 0 { let child = root.get_child(0).unwrap(); root.remove_child(child); unsafe { child.assume_safe().queue_free() }; } } pub fn get_script(class_name: &str) -> Ref<NativeScript, Unique> { let gdn = GDNativeLibrary::current_library(); let ns = NativeScript::new(); ns.set_class_name(class_name); ns.set_library(unsafe { gdn.assume_shared() }); ns } #[macro_export] macro_rules! get_path { () => { stdext::function_name!().to_string().replace("quintessence_tests::", "") }; } #[macro_export] macro_rules! run_tests { { $( $test:ident ; )* } => { $(mod $test;)* fn _run_tests(pattern: &Option<String>) -> godot_testicles::AssertionResult { $($test::run(stringify!($test), pattern)?;)* Ok(()) } #[no_mangle] pub extern "C" fn run_tests( _data: *mut gdnative::libc::c_void, args: *const gdnative::core_types::VariantArray, ) -> gdnative::sys::godot_variant { #![allow(clippy::not_unsafe_ptr_arg_deref)] let args = unsafe { args.as_ref() }; let args = args.unwrap(); // Passed args let root = args.get(0).try_to_object::<gdnative::api::Node>().unwrap(); let test_pattern = args.get(1).try_to_string(); if let Some(p) = &test_pattern { gdnative::godot_print!(">> Running tests matching \"{}\"\n", p); } // Run tests godot_testicles::set_root_node(root); let status = _run_tests(&test_pattern); // Error forwarding if let Err(msg) = &status { gdnative::godot_print!("{}", msg); } gdnative::core_types::Variant::from_bool(status.is_ok()).forget() } } } #[macro_export] macro_rules! d { ($val:expr) => { gdnative::godot_print!("[ {} ] => {}", godot_testicles::get_path!(), $val); }; } #[macro_export] macro_rules! testicles { ($(fn $fn_name:ident() $body:block)*) => { $(#[inline] fn $fn_name() -> godot_testicles::AssertionResult { $body godot_testicles::cleanup(godot_testicles::get_root_node()?); Ok(()) })* pub fn run(prefix: &str, pattern: &Option<String>) -> godot_testicles::AssertionResult { let skip_prefix = "__skip_"; match pattern { Some(pattern) => { let pattern = regex::Regex::new(&pattern[..]).map_err(|_| "Invalid test path pattern".to_string())?; $( let path = format!("{}::{}", prefix, stringify!($fn_name)); if !stringify!($fn_name).starts_with(skip_prefix) && pattern.is_match(&path[..]) { $fn_name()?; } )* } None => { $( if !stringify!($fn_name).starts_with(skip_prefix) { $fn_name()?; } )* } } Ok(()) } }; } #[macro_export] macro_rules! node { ( $type:ty, { $($key:ident : $value:expr),* $(,)? }, [ $($child:expr),* $(,)? ] ) => {{ let node = unsafe { <$type>::new().into_shared().assume_safe() }; $(node.set(stringify!($key), $value);)* $(node.add_child($child, false);)* node }}; ( $type:ty, { $($key:ident : $value:expr),* $(,)? }, $setup: expr, [ $($child:expr),* $(,)? ] ) => {{ let node = unsafe { <$type>::new().into_shared().assume_safe() }; $(node.set(stringify!($key), $value);)* $setup(node); $(node.add_child($child, false);)* node }}; }
25.113772
110
0.575584
f89f8d15b3f7e9fd9aaa2670d557cd558c7a5154
6,001
// Copyright 2020 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use diagnostics_message::fx_log_packet_t; use fidl::{ endpoints::{ClientEnd, ProtocolMarker, ServerEnd}, Socket, SocketOpts, }; use fidl_fuchsia_diagnostics_test::ControllerMarker; use fidl_fuchsia_logger::{LogFilterOptions, LogLevelFilter, LogMarker, LogMessage, LogSinkMarker}; use fidl_fuchsia_sys::LauncherMarker; use fidl_fuchsia_sys_internal::{ LogConnection, LogConnectionListenerMarker, LogConnectorMarker, LogConnectorRequest, LogConnectorRequestStream, SourceIdentity, }; use fuchsia_async as fasync; use fuchsia_component::{ client::{connect_to_protocol, launch_with_options, LaunchOptions}, server::ServiceFs, }; use fuchsia_syslog::levels::INFO; use fuchsia_syslog_listener::{run_log_listener_with_proxy, LogProcessor}; use fuchsia_zircon as zx; use futures::{channel::mpsc, SinkExt, StreamExt, TryStreamExt}; #[fuchsia::test] async fn same_log_sink_simultaneously_via_connector() { let (client, server) = zx::Channel::create().unwrap(); let mut serverend = Some(ServerEnd::new(server)); // connect multiple identical log sinks let mut sockets = Vec::new(); for _ in 0..50 { let (message_client, message_server) = Socket::create(SocketOpts::DATAGRAM).unwrap(); sockets.push(message_server); // each with the same message repeated multiple times let mut packet = fx_log_packet_t::default(); packet.metadata.pid = 1000; packet.metadata.tid = 2000; packet.metadata.severity = LogLevelFilter::Info.into_primitive().into(); packet.data[0] = 0; packet.add_data(1, "repeated log".as_bytes()); for _ in 0..5 { message_client.write(&mut packet.as_bytes()).unwrap(); } } { let listener = ClientEnd::<LogConnectionListenerMarker>::new(client).into_proxy().unwrap(); for socket in sockets { let (client, server) = zx::Channel::create().unwrap(); let log_request = ServerEnd::<LogSinkMarker>::new(server); let source_identity = SourceIdentity { realm_path: Some(vec![]), component_name: Some("testing123".to_string()), instance_id: Some("0".to_string()), component_url: Some( "fuchsia-pkg://fuchsia.com/test-logs-connector#meta/test-logs-connector.cmx" .to_string(), ), ..SourceIdentity::EMPTY }; listener .on_new_connection(&mut LogConnection { log_request, source_identity }) .unwrap(); let log_sink = ClientEnd::<LogSinkMarker>::new(client).into_proxy().unwrap(); log_sink.connect(socket).unwrap(); } } let (dir_client, dir_server) = zx::Channel::create().unwrap(); let mut fs = ServiceFs::new(); let (take_log_listener_response_snd, mut take_log_listener_response_rcv) = futures::channel::mpsc::channel(1); fs.add_fidl_service_at( LogConnectorMarker::NAME, move |mut stream: LogConnectorRequestStream| { let mut serverend = serverend.take(); let mut sender_mut = Some(take_log_listener_response_snd.clone()); fasync::Task::spawn(async move { while let Some(LogConnectorRequest::TakeLogConnectionListener { responder }) = stream.try_next().await.unwrap() { responder.send(serverend.take()).unwrap(); if let Some(mut sender) = sender_mut.take() { sender.send(()).await.unwrap(); } } }) .detach() }, ) .serve_connection(dir_server) .unwrap(); fasync::Task::spawn(fs.collect()).detach(); // launch archivist-for-embedding.cmx let launcher = connect_to_protocol::<LauncherMarker>().unwrap(); let mut options = LaunchOptions::new(); options.set_additional_services(vec![LogConnectorMarker::NAME.to_string()], dir_client); let mut archivist = launch_with_options( &launcher, "fuchsia-pkg://fuchsia.com/archivist-for-embedding#meta/archivist-for-embedding.cmx" .to_owned(), None, options, ) .unwrap(); take_log_listener_response_rcv.next().await.unwrap(); // run log listener let log_proxy = archivist.connect_to_protocol::<LogMarker>().unwrap(); let (send_logs, recv_logs) = mpsc::unbounded(); fasync::Task::spawn(async move { let listen = Listener { send_logs }; let mut options = LogFilterOptions { filter_by_pid: true, pid: 1000, filter_by_tid: true, tid: 2000, verbosity: 0, min_severity: LogLevelFilter::None, tags: Vec::new(), }; run_log_listener_with_proxy(&log_proxy, listen, Some(&mut options), false, None) .await .unwrap(); }) .detach(); // connect to controller and call stop let controller = archivist.connect_to_protocol::<ControllerMarker>().unwrap(); controller.stop().unwrap(); // collect all logs let logs = recv_logs.map(|message| (message.severity, message.msg)).collect::<Vec<_>>().await; // recv_logs returned, means archivist_for_test must be dead. check. assert!(archivist.wait().await.unwrap().success()); assert_eq!( logs, std::iter::repeat((INFO, "repeated log".to_owned())).take(250).collect::<Vec<_>>() ); } struct Listener { send_logs: mpsc::UnboundedSender<LogMessage>, } impl LogProcessor for Listener { fn log(&mut self, message: LogMessage) { self.send_logs.unbounded_send(message).unwrap(); } fn done(&mut self) { panic!("this should not be called"); } }
38.22293
99
0.628062
c1794801e87ff2eb1e422812494c64e88611bc58
1,357
use ansi_term::Colour::Cyan; use serde::Serialize; use structopt::StructOpt; use rover_client::operations::subgraph::list::{self, SubgraphListInput}; use rover_client::shared::GraphRef; use crate::command::RoverOutput; use crate::utils::client::StudioClientConfig; use crate::Result; #[derive(Debug, Serialize, StructOpt)] pub struct List { /// <NAME>@<VARIANT> of graph in Apollo Studio to list subgraphs from. /// @<VARIANT> may be left off, defaulting to @current #[structopt(name = "GRAPH_REF")] #[serde(skip_serializing)] graph: GraphRef, /// Name of configuration profile to use #[structopt(long = "profile", default_value = "default")] #[serde(skip_serializing)] profile_name: String, } impl List { pub fn run(&self, client_config: StudioClientConfig) -> Result<RoverOutput> { let client = client_config.get_authenticated_client(&self.profile_name)?; eprintln!( "Listing subgraphs for {} using credentials from the {} profile.", Cyan.normal().paint(self.graph.to_string()), Cyan.normal().paint(&self.profile_name) ); let list_details = list::run( SubgraphListInput { graph_ref: self.graph.clone(), }, &client, )?; Ok(RoverOutput::SubgraphList(list_details)) } }
29.5
81
0.646279
1174c2c11e933b783e355f57de7f6ba360c7110f
880
// This file is auto generated by [`cg`] from [`schema`]. // // **DO NOT EDIT THIS FILE**, // // Edit `cg` or `schema` instead. // // [cg]: https://github.com/teloxide/cg // [`schema`]: https://github.com/WaffleLapkin/tg-methods-schema use serde::Serialize; use crate::types::{Chat, Recipient}; impl_payload! { /// Use this method to get up to date information about the chat (current name of the user for one-on-one conversations, current username of a user, group or channel, etc.). Returns a [`Chat`] object on success. /// /// [`Chat`]: crate::types::Chat #[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize)] pub GetChat (GetChatSetters) => Chat { required { /// Unique identifier for the target chat or username of the target channel (in the format `@channelusername`) pub chat_id: Recipient [into], } } }
35.2
215
0.642045
71fd5937d10e40d27d084050199fd53781d22fad
5,722
extern crate clap; extern crate noria; extern crate rand; extern crate slog; mod test_populate; use noria::{ControllerBuilder, DataType, LocalAuthority, LocalControllerHandle, ReuseConfigType}; use std::collections::HashMap; use std::fs::File; use std::io::Write; use std::{thread, time}; pub struct Backend { g: LocalControllerHandle<LocalAuthority>, } impl Backend { pub fn new(partial: bool, _shard: bool, reuse: &str) -> Backend { let mut cb = ControllerBuilder::default(); let log = noria::logger_pls(); let blender_log = log.clone(); if !partial { cb.disable_partial(); } cb.log_with(blender_log); match reuse.as_ref() { "finkelstein" => cb.set_reuse(ReuseConfigType::Finkelstein), "full" => cb.set_reuse(ReuseConfigType::Full), "noreuse" => cb.set_reuse(ReuseConfigType::NoReuse), "relaxed" => cb.set_reuse(ReuseConfigType::Relaxed), _ => panic!("reuse configuration not supported"), } let g = cb.build_local().unwrap(); Backend { g: g } } fn login(&mut self, user_context: HashMap<String, DataType>) -> Result<(), String> { self.g.create_universe(user_context.clone()); Ok(()) } fn set_security_config(&mut self, config_file: &str) { use std::io::Read; let mut config = String::new(); let mut cf = File::open(config_file).unwrap(); cf.read_to_string(&mut config).unwrap(); // Install recipe with policies self.g.set_security_config(config); } fn migrate(&mut self, schema_file: &str, query_file: Option<&str>) -> Result<(), String> { use std::fs::File; use std::io::Read; // Read schema file let mut sf = File::open(schema_file).unwrap(); let mut s = String::new(); sf.read_to_string(&mut s).unwrap(); let mut rs = s.clone(); s.clear(); // Read query file match query_file { None => (), Some(qf) => { let mut qf = File::open(qf).unwrap(); qf.read_to_string(&mut s).unwrap(); rs.push_str("\n"); rs.push_str(&s); } } // Install recipe self.g.install_recipe(&rs).unwrap(); Ok(()) } } fn make_user(name: &str) -> HashMap<String, DataType> { let mut user = HashMap::new(); user.insert(String::from("id"), name.into()); user } fn main() { use clap::{App, Arg}; let args = App::new("SecureCRP") .version("0.1") .about("Benchmarks HotCRP-like application with security policies.") .arg( Arg::with_name("schema") .short("s") .required(true) .default_value("benchmarks/securecrp/jeeves_schema.sql") .help("SQL schema file"), ) .arg( Arg::with_name("queries") .short("q") .required(true) .default_value("benchmarks/securecrp/jeeves_queries.sql") .help("SQL query file"), ) .arg( Arg::with_name("policies") .long("policies") .required(true) .default_value("benchmarks/securecrp/jeeves_policies.json") .help("Security policies file"), ) .arg( Arg::with_name("graph") .short("g") .default_value("graph.gv") .help("File to dump graph"), ) .arg( Arg::with_name("reuse") .long("reuse") .default_value("full") .possible_values(&["noreuse", "finkelstein", "relaxed", "full"]) .help("Query reuse algorithm"), ) .arg( Arg::with_name("shard") .long("shard") .help("Enable sharding"), ) .arg( Arg::with_name("partial") .long("partial") .help("Enable partial materialization"), ) .arg( Arg::with_name("populate") .long("populate") .help("Populate app with randomly generated data"), ) .arg(Arg::with_name("user").long("user").default_value("malte")) .get_matches(); println!("Starting SecureCRP..."); // Read arguments let sloc = args.value_of("schema").unwrap(); let qloc = args.value_of("queries").unwrap(); let ploc = args.value_of("policies").unwrap(); let gloc = args.value_of("graph"); let partial = args.is_present("partial"); let shard = args.is_present("shard"); let reuse = args.value_of("reuse").unwrap(); let user = args.value_of("user").unwrap(); let mut backend = Backend::new(partial, shard, reuse); backend.migrate(sloc, None).unwrap(); backend.set_security_config(ploc); backend.migrate(sloc, Some(qloc)).unwrap(); if args.is_present("populate") { test_populate::create_users(&mut backend); } thread::sleep(time::Duration::from_millis(2000)); backend.login(make_user(user)).is_ok(); if args.is_present("populate") { test_populate::create_papers(&mut backend); test_populate::dump_papers(&mut backend, user); } test_populate::dump_all_papers(&mut backend); if gloc.is_some() { let graph_fname = gloc.unwrap(); let mut gf = File::create(graph_fname).unwrap(); assert!(write!(gf, "{}", backend.g.graphviz().unwrap()).is_ok()); } // sleep "forever" thread::sleep(time::Duration::from_millis(200000000)); }
29.647668
97
0.545963
677b9d1b73d92bf937e58bba7705b85be6ae7257
2,290
// structs3.rs // Structs contain data, but can also have logic. In this exercise we have // defined the Package struct and we want to test some logic attached to it. // Make the code compile and the tests pass! // If you have issues execute `rustlings hint structs3` #[derive(Debug)] struct Package { sender_country: String, recipient_country: String, weight_in_grams: i32, } impl Package { fn new(sender_country: String, recipient_country: String, weight_in_grams: i32) -> Package { if weight_in_grams <= 0 { // Something goes here... panic!("fail_creating_weightless_package") } else { Package { sender_country, recipient_country, weight_in_grams, } } } fn is_international(&self) -> bool { // Something goes here... self.sender_country != self.recipient_country } fn get_fees(&self, cents_per_gram: i32) -> i32 { // Something goes here... cents_per_gram * self.weight_in_grams } } #[cfg(test)] mod tests { use super::*; #[test] #[should_panic] fn fail_creating_weightless_package() { let sender_country = String::from("Spain"); let recipient_country = String::from("Austria"); Package::new(sender_country, recipient_country, -2210); } #[test] fn create_international_package() { let sender_country = String::from("Spain"); let recipient_country = String::from("Russia"); let package = Package::new(sender_country, recipient_country, 1200); assert!(package.is_international()); } #[test] fn create_local_package() { let sender_country = String::from("Canada"); let recipient_country = sender_country.clone(); let package = Package::new(sender_country, recipient_country, 1200); assert!(!package.is_international()); } #[test] fn calculate_transport_fees() { let sender_country = String::from("Spain"); let recipient_country = String::from("Spain"); let cents_per_gram = 3; let package = Package::new(sender_country, recipient_country, 1500); assert_eq!(package.get_fees(cents_per_gram), 4500); } }
27.261905
96
0.625764
db51d59d80f5ceb75f71c5e565f6968076af3d91
32,021
#![doc = "generated by AutoRust 0.1.0"] #![allow(non_camel_case_types)] #![allow(unused_imports)] use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationListResult { #[serde(skip_serializing)] pub value: Vec<OperationValue>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationValue { #[serde(skip_serializing)] pub origin: Option<String>, #[serde(skip_serializing)] pub name: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub display: Option<OperationValueDisplay>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationValueDisplay { #[serde(skip_serializing)] pub operation: Option<String>, #[serde(skip_serializing)] pub resource: Option<String>, #[serde(skip_serializing)] pub description: Option<String>, #[serde(skip_serializing)] pub provider: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Resource { #[serde(skip_serializing)] pub id: Option<String>, #[serde(skip_serializing)] pub name: Option<String>, #[serde(rename = "type", skip_serializing)] pub type_: Option<String>, pub location: String, #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SubResource { #[serde(skip_serializing)] pub id: Option<String>, #[serde(skip_serializing)] pub name: Option<String>, #[serde(rename = "type", skip_serializing)] pub type_: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TagsObject { #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ContainerServiceOsDisk {} #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ContainerServiceStorageProfile { StorageAccount, ManagedDisks, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ContainerServiceVnetSubnetId {} #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ContainerServiceVmSize { #[serde(rename = "Standard_A1")] StandardA1, #[serde(rename = "Standard_A10")] StandardA10, #[serde(rename = "Standard_A11")] StandardA11, #[serde(rename = "Standard_A1_v2")] StandardA1V2, #[serde(rename = "Standard_A2")] StandardA2, #[serde(rename = "Standard_A2_v2")] StandardA2V2, #[serde(rename = "Standard_A2m_v2")] StandardA2mV2, #[serde(rename = "Standard_A3")] StandardA3, #[serde(rename = "Standard_A4")] StandardA4, #[serde(rename = "Standard_A4_v2")] StandardA4V2, #[serde(rename = "Standard_A4m_v2")] StandardA4mV2, #[serde(rename = "Standard_A5")] StandardA5, #[serde(rename = "Standard_A6")] StandardA6, #[serde(rename = "Standard_A7")] StandardA7, #[serde(rename = "Standard_A8")] StandardA8, #[serde(rename = "Standard_A8_v2")] StandardA8V2, #[serde(rename = "Standard_A8m_v2")] StandardA8mV2, #[serde(rename = "Standard_A9")] StandardA9, #[serde(rename = "Standard_B2ms")] StandardB2ms, #[serde(rename = "Standard_B2s")] StandardB2s, #[serde(rename = "Standard_B4ms")] StandardB4ms, #[serde(rename = "Standard_B8ms")] StandardB8ms, #[serde(rename = "Standard_D1")] StandardD1, #[serde(rename = "Standard_D11")] StandardD11, #[serde(rename = "Standard_D11_v2")] StandardD11V2, #[serde(rename = "Standard_D11_v2_Promo")] StandardD11V2Promo, #[serde(rename = "Standard_D12")] StandardD12, #[serde(rename = "Standard_D12_v2")] StandardD12V2, #[serde(rename = "Standard_D12_v2_Promo")] StandardD12V2Promo, #[serde(rename = "Standard_D13")] StandardD13, #[serde(rename = "Standard_D13_v2")] StandardD13V2, #[serde(rename = "Standard_D13_v2_Promo")] StandardD13V2Promo, #[serde(rename = "Standard_D14")] StandardD14, #[serde(rename = "Standard_D14_v2")] StandardD14V2, #[serde(rename = "Standard_D14_v2_Promo")] StandardD14V2Promo, #[serde(rename = "Standard_D15_v2")] StandardD15V2, #[serde(rename = "Standard_D16_v3")] StandardD16V3, #[serde(rename = "Standard_D16s_v3")] StandardD16sV3, #[serde(rename = "Standard_D1_v2")] StandardD1V2, #[serde(rename = "Standard_D2")] StandardD2, #[serde(rename = "Standard_D2_v2")] StandardD2V2, #[serde(rename = "Standard_D2_v2_Promo")] StandardD2V2Promo, #[serde(rename = "Standard_D2_v3")] StandardD2V3, #[serde(rename = "Standard_D2s_v3")] StandardD2sV3, #[serde(rename = "Standard_D3")] StandardD3, #[serde(rename = "Standard_D32_v3")] StandardD32V3, #[serde(rename = "Standard_D32s_v3")] StandardD32sV3, #[serde(rename = "Standard_D3_v2")] StandardD3V2, #[serde(rename = "Standard_D3_v2_Promo")] StandardD3V2Promo, #[serde(rename = "Standard_D4")] StandardD4, #[serde(rename = "Standard_D4_v2")] StandardD4V2, #[serde(rename = "Standard_D4_v2_Promo")] StandardD4V2Promo, #[serde(rename = "Standard_D4_v3")] StandardD4V3, #[serde(rename = "Standard_D4s_v3")] StandardD4sV3, #[serde(rename = "Standard_D5_v2")] StandardD5V2, #[serde(rename = "Standard_D5_v2_Promo")] StandardD5V2Promo, #[serde(rename = "Standard_D64_v3")] StandardD64V3, #[serde(rename = "Standard_D64s_v3")] StandardD64sV3, #[serde(rename = "Standard_D8_v3")] StandardD8V3, #[serde(rename = "Standard_D8s_v3")] StandardD8sV3, #[serde(rename = "Standard_DS1")] StandardDs1, #[serde(rename = "Standard_DS11")] StandardDs11, #[serde(rename = "Standard_DS11_v2")] StandardDs11V2, #[serde(rename = "Standard_DS11_v2_Promo")] StandardDs11V2Promo, #[serde(rename = "Standard_DS12")] StandardDs12, #[serde(rename = "Standard_DS12_v2")] StandardDs12V2, #[serde(rename = "Standard_DS12_v2_Promo")] StandardDs12V2Promo, #[serde(rename = "Standard_DS13")] StandardDs13, #[serde(rename = "Standard_DS13-2_v2")] StandardDs132V2, #[serde(rename = "Standard_DS13-4_v2")] StandardDs134V2, #[serde(rename = "Standard_DS13_v2")] StandardDs13V2, #[serde(rename = "Standard_DS13_v2_Promo")] StandardDs13V2Promo, #[serde(rename = "Standard_DS14")] StandardDs14, #[serde(rename = "Standard_DS14-4_v2")] StandardDs144V2, #[serde(rename = "Standard_DS14-8_v2")] StandardDs148V2, #[serde(rename = "Standard_DS14_v2")] StandardDs14V2, #[serde(rename = "Standard_DS14_v2_Promo")] StandardDs14V2Promo, #[serde(rename = "Standard_DS15_v2")] StandardDs15V2, #[serde(rename = "Standard_DS1_v2")] StandardDs1V2, #[serde(rename = "Standard_DS2")] StandardDs2, #[serde(rename = "Standard_DS2_v2")] StandardDs2V2, #[serde(rename = "Standard_DS2_v2_Promo")] StandardDs2V2Promo, #[serde(rename = "Standard_DS3")] StandardDs3, #[serde(rename = "Standard_DS3_v2")] StandardDs3V2, #[serde(rename = "Standard_DS3_v2_Promo")] StandardDs3V2Promo, #[serde(rename = "Standard_DS4")] StandardDs4, #[serde(rename = "Standard_DS4_v2")] StandardDs4V2, #[serde(rename = "Standard_DS4_v2_Promo")] StandardDs4V2Promo, #[serde(rename = "Standard_DS5_v2")] StandardDs5V2, #[serde(rename = "Standard_DS5_v2_Promo")] StandardDs5V2Promo, #[serde(rename = "Standard_E16_v3")] StandardE16V3, #[serde(rename = "Standard_E16s_v3")] StandardE16sV3, #[serde(rename = "Standard_E2_v3")] StandardE2V3, #[serde(rename = "Standard_E2s_v3")] StandardE2sV3, #[serde(rename = "Standard_E32-16s_v3")] StandardE3216sV3, #[serde(rename = "Standard_E32-8s_v3")] StandardE328sV3, #[serde(rename = "Standard_E32_v3")] StandardE32V3, #[serde(rename = "Standard_E32s_v3")] StandardE32sV3, #[serde(rename = "Standard_E4_v3")] StandardE4V3, #[serde(rename = "Standard_E4s_v3")] StandardE4sV3, #[serde(rename = "Standard_E64-16s_v3")] StandardE6416sV3, #[serde(rename = "Standard_E64-32s_v3")] StandardE6432sV3, #[serde(rename = "Standard_E64_v3")] StandardE64V3, #[serde(rename = "Standard_E64s_v3")] StandardE64sV3, #[serde(rename = "Standard_E8_v3")] StandardE8V3, #[serde(rename = "Standard_E8s_v3")] StandardE8sV3, #[serde(rename = "Standard_F1")] StandardF1, #[serde(rename = "Standard_F16")] StandardF16, #[serde(rename = "Standard_F16s")] StandardF16s, #[serde(rename = "Standard_F16s_v2")] StandardF16sV2, #[serde(rename = "Standard_F1s")] StandardF1s, #[serde(rename = "Standard_F2")] StandardF2, #[serde(rename = "Standard_F2s")] StandardF2s, #[serde(rename = "Standard_F2s_v2")] StandardF2sV2, #[serde(rename = "Standard_F32s_v2")] StandardF32sV2, #[serde(rename = "Standard_F4")] StandardF4, #[serde(rename = "Standard_F4s")] StandardF4s, #[serde(rename = "Standard_F4s_v2")] StandardF4sV2, #[serde(rename = "Standard_F64s_v2")] StandardF64sV2, #[serde(rename = "Standard_F72s_v2")] StandardF72sV2, #[serde(rename = "Standard_F8")] StandardF8, #[serde(rename = "Standard_F8s")] StandardF8s, #[serde(rename = "Standard_F8s_v2")] StandardF8sV2, #[serde(rename = "Standard_G1")] StandardG1, #[serde(rename = "Standard_G2")] StandardG2, #[serde(rename = "Standard_G3")] StandardG3, #[serde(rename = "Standard_G4")] StandardG4, #[serde(rename = "Standard_G5")] StandardG5, #[serde(rename = "Standard_GS1")] StandardGs1, #[serde(rename = "Standard_GS2")] StandardGs2, #[serde(rename = "Standard_GS3")] StandardGs3, #[serde(rename = "Standard_GS4")] StandardGs4, #[serde(rename = "Standard_GS4-4")] StandardGs44, #[serde(rename = "Standard_GS4-8")] StandardGs48, #[serde(rename = "Standard_GS5")] StandardGs5, #[serde(rename = "Standard_GS5-16")] StandardGs516, #[serde(rename = "Standard_GS5-8")] StandardGs58, #[serde(rename = "Standard_H16")] StandardH16, #[serde(rename = "Standard_H16m")] StandardH16m, #[serde(rename = "Standard_H16mr")] StandardH16mr, #[serde(rename = "Standard_H16r")] StandardH16r, #[serde(rename = "Standard_H8")] StandardH8, #[serde(rename = "Standard_H8m")] StandardH8m, #[serde(rename = "Standard_L16s")] StandardL16s, #[serde(rename = "Standard_L32s")] StandardL32s, #[serde(rename = "Standard_L4s")] StandardL4s, #[serde(rename = "Standard_L8s")] StandardL8s, #[serde(rename = "Standard_M128-32ms")] StandardM12832ms, #[serde(rename = "Standard_M128-64ms")] StandardM12864ms, #[serde(rename = "Standard_M128ms")] StandardM128ms, #[serde(rename = "Standard_M128s")] StandardM128s, #[serde(rename = "Standard_M64-16ms")] StandardM6416ms, #[serde(rename = "Standard_M64-32ms")] StandardM6432ms, #[serde(rename = "Standard_M64ms")] StandardM64ms, #[serde(rename = "Standard_M64s")] StandardM64s, #[serde(rename = "Standard_NC12")] StandardNc12, #[serde(rename = "Standard_NC12s_v2")] StandardNc12sV2, #[serde(rename = "Standard_NC12s_v3")] StandardNc12sV3, #[serde(rename = "Standard_NC24")] StandardNc24, #[serde(rename = "Standard_NC24r")] StandardNc24r, #[serde(rename = "Standard_NC24rs_v2")] StandardNc24rsV2, #[serde(rename = "Standard_NC24rs_v3")] StandardNc24rsV3, #[serde(rename = "Standard_NC24s_v2")] StandardNc24sV2, #[serde(rename = "Standard_NC24s_v3")] StandardNc24sV3, #[serde(rename = "Standard_NC6")] StandardNc6, #[serde(rename = "Standard_NC6s_v2")] StandardNc6sV2, #[serde(rename = "Standard_NC6s_v3")] StandardNc6sV3, #[serde(rename = "Standard_ND12s")] StandardNd12s, #[serde(rename = "Standard_ND24rs")] StandardNd24rs, #[serde(rename = "Standard_ND24s")] StandardNd24s, #[serde(rename = "Standard_ND6s")] StandardNd6s, #[serde(rename = "Standard_NV12")] StandardNv12, #[serde(rename = "Standard_NV24")] StandardNv24, #[serde(rename = "Standard_NV6")] StandardNv6, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedClusterServicePrincipalProfile { #[serde(rename = "clientId")] pub client_id: String, #[serde(skip_serializing_if = "Option::is_none")] pub secret: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ContainerServiceMasterProfile { #[serde(skip_serializing_if = "Option::is_none")] pub count: Option<container_service_master_profile::Count>, #[serde(rename = "dnsPrefix")] pub dns_prefix: String, #[serde(rename = "vmSize")] pub vm_size: ContainerServiceVmSize, #[serde(rename = "osDiskSizeGB", skip_serializing_if = "Option::is_none")] pub os_disk_size_gb: Option<ContainerServiceOsDisk>, #[serde(rename = "vnetSubnetID", skip_serializing_if = "Option::is_none")] pub vnet_subnet_id: Option<ContainerServiceVnetSubnetId>, #[serde(rename = "firstConsecutiveStaticIP", skip_serializing_if = "Option::is_none")] pub first_consecutive_static_ip: Option<String>, #[serde(rename = "storageProfile", skip_serializing_if = "Option::is_none")] pub storage_profile: Option<ContainerServiceStorageProfile>, #[serde(skip_serializing)] pub fqdn: Option<String>, } pub mod container_service_master_profile { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Count {} } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedClusterAgentPoolProfileProperties { #[serde(skip_serializing_if = "Option::is_none")] pub count: Option<i32>, #[serde(rename = "vmSize", skip_serializing_if = "Option::is_none")] pub vm_size: Option<ContainerServiceVmSize>, #[serde(rename = "osDiskSizeGB", skip_serializing_if = "Option::is_none")] pub os_disk_size_gb: Option<ContainerServiceOsDisk>, #[serde(rename = "vnetSubnetID", skip_serializing_if = "Option::is_none")] pub vnet_subnet_id: Option<ContainerServiceVnetSubnetId>, #[serde(rename = "maxPods", skip_serializing_if = "Option::is_none")] pub max_pods: Option<i32>, #[serde(rename = "osType", skip_serializing_if = "Option::is_none")] pub os_type: Option<OsType>, #[serde(rename = "maxCount", skip_serializing_if = "Option::is_none")] pub max_count: Option<i32>, #[serde(rename = "minCount", skip_serializing_if = "Option::is_none")] pub min_count: Option<i32>, #[serde(rename = "enableAutoScaling", skip_serializing_if = "Option::is_none")] pub enable_auto_scaling: Option<bool>, #[serde(rename = "type", skip_serializing_if = "Option::is_none")] pub type_: Option<AgentPoolType>, #[serde(rename = "orchestratorVersion", skip_serializing_if = "Option::is_none")] pub orchestrator_version: Option<String>, #[serde(rename = "provisioningState", skip_serializing)] pub provisioning_state: Option<String>, #[serde(rename = "availabilityZones", skip_serializing_if = "Vec::is_empty")] pub availability_zones: Vec<String>, #[serde(rename = "enableNodePublicIP", skip_serializing_if = "Option::is_none")] pub enable_node_public_ip: Option<bool>, #[serde(rename = "scaleSetPriority", skip_serializing_if = "Option::is_none")] pub scale_set_priority: Option<ScaleSetPriority>, #[serde(rename = "scaleSetEvictionPolicy", skip_serializing_if = "Option::is_none")] pub scale_set_eviction_policy: Option<ScaleSetEvictionPolicy>, #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, #[serde(rename = "nodeLabels", skip_serializing_if = "Option::is_none")] pub node_labels: Option<serde_json::Value>, #[serde(rename = "nodeTaints", skip_serializing_if = "Vec::is_empty")] pub node_taints: Vec<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedClusterAgentPoolProfile { #[serde(flatten)] pub managed_cluster_agent_pool_profile_properties: ManagedClusterAgentPoolProfileProperties, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum AgentPoolType { VirtualMachineScaleSets, AvailabilitySet, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AgentPoolListResult { #[serde(skip_serializing_if = "Vec::is_empty")] pub value: Vec<AgentPool>, #[serde(rename = "nextLink", skip_serializing)] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AgentPool { #[serde(flatten)] pub sub_resource: SubResource, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedClusterWindowsProfile { #[serde(rename = "adminUsername")] pub admin_username: String, #[serde(rename = "adminPassword", skip_serializing_if = "Option::is_none")] pub admin_password: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ContainerServiceLinuxProfile { #[serde(rename = "adminUsername")] pub admin_username: String, pub ssh: ContainerServiceSshConfiguration, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ContainerServiceNetworkProfile { #[serde(rename = "networkPlugin", skip_serializing_if = "Option::is_none")] pub network_plugin: Option<container_service_network_profile::NetworkPlugin>, #[serde(rename = "networkPolicy", skip_serializing_if = "Option::is_none")] pub network_policy: Option<container_service_network_profile::NetworkPolicy>, #[serde(rename = "podCidr", skip_serializing_if = "Option::is_none")] pub pod_cidr: Option<String>, #[serde(rename = "serviceCidr", skip_serializing_if = "Option::is_none")] pub service_cidr: Option<String>, #[serde(rename = "dnsServiceIP", skip_serializing_if = "Option::is_none")] pub dns_service_ip: Option<String>, #[serde(rename = "dockerBridgeCidr", skip_serializing_if = "Option::is_none")] pub docker_bridge_cidr: Option<String>, #[serde(rename = "outboundType", skip_serializing_if = "Option::is_none")] pub outbound_type: Option<container_service_network_profile::OutboundType>, #[serde(rename = "loadBalancerSku", skip_serializing_if = "Option::is_none")] pub load_balancer_sku: Option<container_service_network_profile::LoadBalancerSku>, #[serde(rename = "loadBalancerProfile", skip_serializing_if = "Option::is_none")] pub load_balancer_profile: Option<ManagedClusterLoadBalancerProfile>, } pub mod container_service_network_profile { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum NetworkPlugin { #[serde(rename = "azure")] Azure, #[serde(rename = "kubenet")] Kubenet, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum NetworkPolicy { #[serde(rename = "calico")] Calico, #[serde(rename = "azure")] Azure, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum OutboundType { #[serde(rename = "loadBalancer")] LoadBalancer, #[serde(rename = "userDefinedRouting")] UserDefinedRouting, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum LoadBalancerSku { #[serde(rename = "standard")] Standard, #[serde(rename = "basic")] Basic, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedClusterLoadBalancerProfile { #[serde(rename = "managedOutboundIPs", skip_serializing_if = "Option::is_none")] pub managed_outbound_i_ps: Option<managed_cluster_load_balancer_profile::ManagedOutboundIPs>, #[serde(rename = "outboundIPPrefixes", skip_serializing_if = "Option::is_none")] pub outbound_ip_prefixes: Option<managed_cluster_load_balancer_profile::OutboundIpPrefixes>, #[serde(rename = "outboundIPs", skip_serializing_if = "Option::is_none")] pub outbound_i_ps: Option<managed_cluster_load_balancer_profile::OutboundIPs>, #[serde(rename = "effectiveOutboundIPs", skip_serializing_if = "Vec::is_empty")] pub effective_outbound_i_ps: Vec<ResourceReference>, #[serde(rename = "allocatedOutboundPorts", skip_serializing_if = "Option::is_none")] pub allocated_outbound_ports: Option<i32>, #[serde(rename = "idleTimeoutInMinutes", skip_serializing_if = "Option::is_none")] pub idle_timeout_in_minutes: Option<i32>, } pub mod managed_cluster_load_balancer_profile { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedOutboundIPs { #[serde(skip_serializing_if = "Option::is_none")] pub count: Option<i32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OutboundIpPrefixes { #[serde(rename = "publicIPPrefixes", skip_serializing_if = "Vec::is_empty")] pub public_ip_prefixes: Vec<ResourceReference>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OutboundIPs { #[serde(rename = "publicIPs", skip_serializing_if = "Vec::is_empty")] pub public_i_ps: Vec<ResourceReference>, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ResourceReference { #[serde(skip_serializing_if = "Option::is_none")] pub id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ContainerServiceSshConfiguration { #[serde(rename = "publicKeys")] pub public_keys: Vec<ContainerServiceSshPublicKey>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ContainerServiceSshPublicKey { #[serde(rename = "keyData")] pub key_data: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ContainerServiceDiagnosticsProfile { #[serde(rename = "vmDiagnostics")] pub vm_diagnostics: ContainerServiceVmDiagnostics, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ContainerServiceVmDiagnostics { pub enabled: bool, #[serde(rename = "storageUri", skip_serializing)] pub storage_uri: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedClusterListResult { #[serde(skip_serializing_if = "Vec::is_empty")] pub value: Vec<ManagedCluster>, #[serde(rename = "nextLink", skip_serializing)] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedCluster { #[serde(flatten)] pub resource: Resource, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedClusterProperties { #[serde(rename = "provisioningState", skip_serializing)] pub provisioning_state: Option<String>, #[serde(rename = "maxAgentPools", skip_serializing)] pub max_agent_pools: Option<i32>, #[serde(rename = "kubernetesVersion", skip_serializing_if = "Option::is_none")] pub kubernetes_version: Option<String>, #[serde(rename = "dnsPrefix", skip_serializing_if = "Option::is_none")] pub dns_prefix: Option<String>, #[serde(skip_serializing)] pub fqdn: Option<String>, #[serde(rename = "privateFQDN", skip_serializing)] pub private_fqdn: Option<String>, #[serde(rename = "agentPoolProfiles", skip_serializing_if = "Vec::is_empty")] pub agent_pool_profiles: Vec<ManagedClusterAgentPoolProfile>, #[serde(rename = "linuxProfile", skip_serializing_if = "Option::is_none")] pub linux_profile: Option<ContainerServiceLinuxProfile>, #[serde(rename = "windowsProfile", skip_serializing_if = "Option::is_none")] pub windows_profile: Option<ManagedClusterWindowsProfile>, #[serde(rename = "servicePrincipalProfile", skip_serializing_if = "Option::is_none")] pub service_principal_profile: Option<ManagedClusterServicePrincipalProfile>, #[serde(rename = "addonProfiles", skip_serializing_if = "Option::is_none")] pub addon_profiles: Option<serde_json::Value>, #[serde(rename = "nodeResourceGroup", skip_serializing_if = "Option::is_none")] pub node_resource_group: Option<String>, #[serde(rename = "enableRBAC", skip_serializing_if = "Option::is_none")] pub enable_rbac: Option<bool>, #[serde(rename = "enablePodSecurityPolicy", skip_serializing_if = "Option::is_none")] pub enable_pod_security_policy: Option<bool>, #[serde(rename = "networkProfile", skip_serializing_if = "Option::is_none")] pub network_profile: Option<ContainerServiceNetworkProfile>, #[serde(rename = "aadProfile", skip_serializing_if = "Option::is_none")] pub aad_profile: Option<ManagedClusterAadProfile>, #[serde(rename = "apiServerAccessProfile", skip_serializing_if = "Option::is_none")] pub api_server_access_profile: Option<ManagedClusterApiServerAccessProfile>, #[serde(rename = "identityProfile", skip_serializing_if = "Option::is_none")] pub identity_profile: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedClusterApiServerAccessProfile { #[serde(rename = "authorizedIPRanges", skip_serializing_if = "Vec::is_empty")] pub authorized_ip_ranges: Vec<String>, #[serde(rename = "enablePrivateCluster", skip_serializing_if = "Option::is_none")] pub enable_private_cluster: Option<bool>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedClusterIdentity { #[serde(rename = "principalId", skip_serializing)] pub principal_id: Option<String>, #[serde(rename = "tenantId", skip_serializing)] pub tenant_id: Option<String>, #[serde(rename = "type", skip_serializing_if = "Option::is_none")] pub type_: Option<managed_cluster_identity::Type>, } pub mod managed_cluster_identity { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Type { SystemAssigned, None, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct UserAssignedIdentity { #[serde(rename = "resourceId", skip_serializing_if = "Option::is_none")] pub resource_id: Option<String>, #[serde(rename = "clientId", skip_serializing_if = "Option::is_none")] pub client_id: Option<String>, #[serde(rename = "objectId", skip_serializing_if = "Option::is_none")] pub object_id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedClusterAccessProfile { #[serde(flatten)] pub resource: Resource, #[serde(flatten)] pub serde_json_value: serde_json::Value, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AccessProfile { #[serde(rename = "kubeConfig", skip_serializing_if = "Option::is_none")] pub kube_config: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedClusterPoolUpgradeProfile { #[serde(rename = "kubernetesVersion")] pub kubernetes_version: String, #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "osType")] pub os_type: OsType, #[serde(skip_serializing_if = "Vec::is_empty")] pub upgrades: Vec<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedClusterUpgradeProfileProperties { #[serde(rename = "controlPlaneProfile")] pub control_plane_profile: ManagedClusterPoolUpgradeProfile, #[serde(rename = "agentPoolProfiles")] pub agent_pool_profiles: Vec<ManagedClusterPoolUpgradeProfile>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedClusterAadProfile { #[serde(rename = "clientAppID")] pub client_app_id: String, #[serde(rename = "serverAppID")] pub server_app_id: String, #[serde(rename = "serverAppSecret", skip_serializing_if = "Option::is_none")] pub server_app_secret: Option<String>, #[serde(rename = "tenantID", skip_serializing_if = "Option::is_none")] pub tenant_id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedClusterAddonProfile { pub enabled: bool, #[serde(skip_serializing_if = "Option::is_none")] pub config: Option<serde_json::Value>, #[serde(skip_serializing)] pub identity: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedClusterUpgradeProfile { #[serde(skip_serializing)] pub id: Option<String>, #[serde(skip_serializing)] pub name: Option<String>, #[serde(rename = "type", skip_serializing)] pub type_: Option<String>, pub properties: ManagedClusterUpgradeProfileProperties, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AgentPoolUpgradeProfile { #[serde(skip_serializing)] pub id: Option<String>, #[serde(skip_serializing)] pub name: Option<String>, #[serde(rename = "type", skip_serializing)] pub type_: Option<String>, pub properties: AgentPoolUpgradeProfileProperties, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AgentPoolUpgradeProfileProperties { #[serde(rename = "kubernetesVersion")] pub kubernetes_version: String, #[serde(rename = "osType")] pub os_type: OsType, #[serde(skip_serializing_if = "Vec::is_empty")] pub upgrades: Vec<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AgentPoolAvailableVersions { #[serde(skip_serializing)] pub id: Option<String>, #[serde(skip_serializing)] pub name: Option<String>, #[serde(rename = "type", skip_serializing)] pub type_: Option<String>, pub properties: AgentPoolAvailableVersionsProperties, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AgentPoolAvailableVersionsProperties { #[serde(rename = "agentPoolVersions", skip_serializing_if = "Vec::is_empty")] pub agent_pool_versions: Vec<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum OsType { Linux, Windows, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ScaleSetPriority { Low, Regular, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ScaleSetEvictionPolicy { Delete, Deallocate, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CredentialResults { #[serde(skip_serializing)] pub kubeconfigs: Vec<CredentialResult>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CredentialResult { #[serde(skip_serializing)] pub name: Option<String>, #[serde(skip_serializing)] pub value: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CloudError { #[serde(skip_serializing_if = "Option::is_none")] pub error: Option<CloudErrorBody>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CloudErrorBody { #[serde(skip_serializing_if = "Option::is_none")] pub code: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub message: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub target: Option<String>, #[serde(skip_serializing_if = "Vec::is_empty")] pub details: Vec<CloudErrorBody>, }
37.233721
97
0.696824
acc6c8d5ba4820d0503b854a821966ca03243168
2,346
#[doc = "Register `TIMER` reader"] pub struct R(crate::R<TIMER_SPEC>); impl core::ops::Deref for R { type Target = crate::R<TIMER_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::convert::From<crate::R<TIMER_SPEC>> for R { fn from(reader: crate::R<TIMER_SPEC>) -> Self { R(reader) } } #[doc = "Field `VALUE` reader - Holds the current timer value of the down-counter. The initial value of the TIMERn register is loaded as IVALUE - 1 from the INTVALn register either at the end of the time interval or immediately in the following cases: INTVALn register is updated in the idle state. INTVALn register is updated with LOAD = 1. When the timer is in idle state, reading this bit fields returns -1 (0x00FF FFFF)."] pub struct VALUE_R(crate::FieldReader<u32, u32>); impl VALUE_R { pub(crate) fn new(bits: u32) -> Self { VALUE_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for VALUE_R { type Target = crate::FieldReader<u32, u32>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl R { #[doc = "Bits 0:23 - Holds the current timer value of the down-counter. The initial value of the TIMERn register is loaded as IVALUE - 1 from the INTVALn register either at the end of the time interval or immediately in the following cases: INTVALn register is updated in the idle state. INTVALn register is updated with LOAD = 1. When the timer is in idle state, reading this bit fields returns -1 (0x00FF FFFF)."] #[inline(always)] pub fn value(&self) -> VALUE_R { VALUE_R::new((self.bits & 0x00ff_ffff) as u32) } } #[doc = "MRT Timer register. This register reads the value of the down-counter.\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [timer](index.html) module"] pub struct TIMER_SPEC; impl crate::RegisterSpec for TIMER_SPEC { type Ux = u32; } #[doc = "`read()` method returns [timer::R](R) reader structure"] impl crate::Readable for TIMER_SPEC { type Reader = R; } #[doc = "`reset()` method sets TIMER to value 0x00ff_ffff"] impl crate::Resettable for TIMER_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0x00ff_ffff } }
45.115385
426
0.680733
4871320fdb5c08e11e1cbae159cb00407eaab561
7,385
use rustc_index::bit_set::BitSet; use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor}; use rustc_middle::mir::{self, Local, Location}; use crate::{AnalysisDomain, Backward, CallReturnPlaces, GenKill, GenKillAnalysis}; /// A [live-variable dataflow analysis][liveness]. /// /// This analysis considers references as being used only at the point of the /// borrow. In other words, this analysis does not track uses because of references that already /// exist. See [this `mir-dataflow` test][flow-test] for an example. You almost never want to use /// this analysis without also looking at the results of [`MaybeBorrowedLocals`]. /// /// ## Field-(in)sensitivity /// /// As the name suggests, this analysis is field insensitive. If a projection of a variable `x` is /// assigned to (e.g. `x.0 = 42`), it does not "define" `x` as far as liveness is concerned. In fact, /// such an assignment is currently marked as a "use" of `x` in an attempt to be maximally /// conservative. /// /// ## Enums and `SetDiscriminant` /// /// Assigning a literal value to an `enum` (e.g. `Option<i32>`), does not result in a simple /// assignment of the form `_1 = /*...*/` in the MIR. For example, the following assignment to `x`: /// /// ``` /// x = Some(4); /// ``` /// /// compiles to this MIR /// /// ``` /// ((_1 as Some).0: i32) = const 4_i32; /// discriminant(_1) = 1; /// ``` /// /// However, `MaybeLiveLocals` **does** mark `x` (`_1`) as "killed" after a statement like this. /// That's because it treats the `SetDiscriminant` operation as a definition of `x`, even though /// the writes that actually initialized the locals happened earlier. /// /// This makes `MaybeLiveLocals` unsuitable for certain classes of optimization normally associated /// with a live variables analysis, notably dead-store elimination. It's a dirty hack, but it works /// okay for the generator state transform (currently the main consumuer of this analysis). /// /// [`MaybeBorrowedLocals`]: super::MaybeBorrowedLocals /// [flow-test]: https://github.com/rust-lang/rust/blob/a08c47310c7d49cbdc5d7afb38408ba519967ecd/src/test/ui/mir-dataflow/liveness-ptr.rs /// [liveness]: https://en.wikipedia.org/wiki/Live_variable_analysis pub struct MaybeLiveLocals; impl MaybeLiveLocals { fn transfer_function<'a, T>(&self, trans: &'a mut T) -> TransferFunction<'a, T> { TransferFunction(trans) } } impl<'tcx> AnalysisDomain<'tcx> for MaybeLiveLocals { type Domain = BitSet<Local>; type Direction = Backward; const NAME: &'static str = "liveness"; fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain { // bottom = not live BitSet::new_empty(body.local_decls.len()) } fn initialize_start_block(&self, _: &mir::Body<'tcx>, _: &mut Self::Domain) { // No variables are live until we observe a use } } impl<'tcx> GenKillAnalysis<'tcx> for MaybeLiveLocals { type Idx = Local; fn statement_effect( &self, trans: &mut impl GenKill<Self::Idx>, statement: &mir::Statement<'tcx>, location: Location, ) { self.transfer_function(trans).visit_statement(statement, location); } fn terminator_effect( &self, trans: &mut impl GenKill<Self::Idx>, terminator: &mir::Terminator<'tcx>, location: Location, ) { self.transfer_function(trans).visit_terminator(terminator, location); } fn call_return_effect( &self, trans: &mut impl GenKill<Self::Idx>, _block: mir::BasicBlock, return_places: CallReturnPlaces<'_, 'tcx>, ) { return_places.for_each(|place| { if let Some(local) = place.as_local() { trans.kill(local); } }); } fn yield_resume_effect( &self, trans: &mut impl GenKill<Self::Idx>, _resume_block: mir::BasicBlock, resume_place: mir::Place<'tcx>, ) { if let Some(local) = resume_place.as_local() { trans.kill(local); } } } struct TransferFunction<'a, T>(&'a mut T); impl<'tcx, T> Visitor<'tcx> for TransferFunction<'_, T> where T: GenKill<Local>, { fn visit_place(&mut self, place: &mir::Place<'tcx>, context: PlaceContext, location: Location) { let mir::Place { projection, local } = *place; // We purposefully do not call `super_place` here to avoid calling `visit_local` for this // place with one of the `Projection` variants of `PlaceContext`. self.visit_projection(place.as_ref(), context, location); match DefUse::for_place(context) { // Treat derefs as a use of the base local. `*p = 4` is not a def of `p` but a use. Some(_) if place.is_indirect() => self.0.gen(local), Some(DefUse::Def) if projection.is_empty() => self.0.kill(local), Some(DefUse::Use) => self.0.gen(local), _ => {} } } fn visit_local(&mut self, &local: &Local, context: PlaceContext, _: Location) { // Because we do not call `super_place` above, `visit_local` is only called for locals that // do not appear as part of a `Place` in the MIR. This handles cases like the implicit use // of the return place in a `Return` terminator or the index in an `Index` projection. match DefUse::for_place(context) { Some(DefUse::Def) => self.0.kill(local), Some(DefUse::Use) => self.0.gen(local), _ => {} } } } #[derive(Eq, PartialEq, Clone)] enum DefUse { Def, Use, } impl DefUse { fn for_place(context: PlaceContext) -> Option<DefUse> { match context { PlaceContext::NonUse(_) => None, PlaceContext::MutatingUse(MutatingUseContext::Store) => Some(DefUse::Def), // `MutatingUseContext::Call` and `MutatingUseContext::Yield` indicate that this is the // destination place for a `Call` return or `Yield` resume respectively. Since this is // only a `Def` when the function returns successfully, we handle this case separately // in `call_return_effect` above. PlaceContext::MutatingUse( MutatingUseContext::Call | MutatingUseContext::AsmOutput | MutatingUseContext::Yield, ) => None, // All other contexts are uses... PlaceContext::MutatingUse( MutatingUseContext::AddressOf | MutatingUseContext::Borrow | MutatingUseContext::Drop | MutatingUseContext::Retag, ) | PlaceContext::NonMutatingUse( NonMutatingUseContext::AddressOf | NonMutatingUseContext::Copy | NonMutatingUseContext::Inspect | NonMutatingUseContext::Move | NonMutatingUseContext::ShallowBorrow | NonMutatingUseContext::SharedBorrow | NonMutatingUseContext::UniqueBorrow, ) => Some(DefUse::Use), PlaceContext::MutatingUse(MutatingUseContext::Projection) | PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection) => { unreachable!("A projection could be a def or a use and must be handled separately") } } } }
36.925
137
0.625728
22eaa119f2467167e57ed7373b8b9df23e00f145
244
// check-pass enum Nat { S(Box<Nat>), Z } fn test(x: &mut Nat) { let mut p = &mut *x; loop { match p { &mut Nat::Z => break, &mut Nat::S(ref mut n) => p = &mut *n } } } fn main() {}
13.555556
49
0.389344
1d31417fc25e077c2a7b71a2a379a8161c631f9a
733
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 // <Black magic> // Increase recursion limit to allow for use of select! macro. #![recursion_limit = "1024"] // </Black magic> // Public exports #[macro_use] extern crate prometheus; pub use common::NetworkPublicKeys; pub use interface::NetworkProvider; pub mod common; pub mod connectivity_manager; pub mod error; pub mod interface; pub mod peer_manager; pub mod protocols; pub mod validator_network; mod counters; mod peer; mod sink; mod transport; pub type DisconnectReason = peer::DisconnectReason; pub type ConnectivityRequest = connectivity_manager::ConnectivityRequest; pub type ProtocolId = protocols::wire::handshake::v1::ProtocolId;
22.90625
73
0.774898
ccfa2d58adb20259c3fc160f313ba50c352a6a68
15,126
///! ///! Define types which allow us to serialize/de-serialize ///! messages when interacting with the todoist API. ///! ///! Types manually converted from documentation here: ///! - [Todoist API](https://developer.todoist.com/) ///! ///! Note: These are using v7 definition of the todoist API. ///! ///! We attribute each of the structs with: ///! ///! - `Serialize` and `Deserialize` impls for serialization. ///! - 'Debug ' impl for ... debugging. ///! use serde::{Deserialize, Serialize}; /// Struct to be used for objects which we don't /// yet support. It doesn't deserialize anything. /// #[derive(Serialize, Deserialize, Debug)] pub struct NotYetUsedStruct {} #[derive(Serialize, Deserialize, Debug)] pub struct LabelStruct {} // Automatically generate: // - `Serialize` and `Deserialize` impls for serialization. // - 'Debug ' impl for ... debugging. #[derive(Serialize, Deserialize, Debug)] pub struct ItemStruct { /// The id of the task. pub id: u64, pub all_day: bool, /// The owner of the task. pub user_id: u64, /// The project that the task resides in. pub project_id: u64, /// The parent id of the task. pub parent_id: Option<u64>, /// The text of the task. pub content: String, /// The date of the task, added in free form text, /// for example it can be every day @ 10 (or null or an empty string if not set). pub date_string: String, /// The language of the date_string. /// Valid languages are: en, da, pl, zh, ko, de, pt, ja, it, fr, sv, ru, es, nl pub date_lang: String, /// The date of the task in the format: /// Mon 07 Aug 2006 12:34:56 +0000 (or null if not set). /// /// For all day task (i.e. task due “Today”), /// the time part will be set as xx:xx:59. pub due_date_utc: Option<String>, /// The priority of the task between: 1..4. /// - 1 is natural /// - 4 is very urgent pub priority: u8, /// The indent of the task between: 1..4. /// - 1 is top-level pub indent: u8, /// The order of the task inside the project. /// smallest value is placed at the top. pub item_order: u64, /// The order of the task inside the Today or Next 7 days view /// (a number, where the smallest value would place the task at the top). pub day_order: i64, /// Whether the task's sub-tasks are collapsed. pub collapsed: u8, /// The tasks label ids. pub labels: Vec<u64>, /// The id of the user who assigned the current task. /// Only makes sense for shared projects. pub assigned_by_uid: Option<u64>, /// The id of user who is responsible for the current task. /// Only makes sense for shared projects. pub responsible_uid: Option<u64>, /// Whether the task is marked as completed. pub checked: u8, /// Whether the task hsa been marked as completed is marked to be moved to history. pub in_history: u8, /// Whether the task is marked as deleted. pub is_deleted: u8, /// Whether the task is marked as archived. pub is_archived: u8, /// A special id for shared tasks. Can be ignored. pub sync_id: Option<u64>, /// The date when the task was created. pub date_added: Option<String>, /// The date when the task was completed. pub date_completed: Option<String>, } #[derive(Serialize, Deserialize, Debug)] pub struct UserFeaturesStruct { karma_disabled: bool, restriction: u8, karma_vacation: bool, beta: u8, has_push_reminders: bool, dateist_inline_disabled: bool, } #[derive(Serialize, Deserialize, Debug)] pub struct TimeZoneStruct { /// Hours difference from GMT. hours: i8, /// Minutes difference from GMT. minutes: i8, /// Time difference from GMT as a string. gmt_string: String, /// Whether daylight saving time applies. is_dst: u8, /// The timezone name. timezone: String, } #[derive(Serialize, Deserialize, Debug)] pub struct UserStruct { /// URL to the small size of the user avatar. pub avatar_small: String, /// The users avatar in a medium size. pub avatar_medium: String, /// The users avatar in a big size. pub avatar_big: String, /// The users avatar in yet another resolution. pub avatar_s640: String, /// The users start page. pub start_page: String, /// What features the user has set. pub features: UserFeaturesStruct, /// How many tasks the user has completed today. pub completed_today: u32, /// Is this a premium user. pub is_premium: bool, /// If projects should be shown in oldest dates first (value = 0) /// or oldest dates last (value = 1) pub sort_order: u8, /// The users full name. pub full_name: String, /// The default time in minutes for the automatic reminders. pub auto_reminder: u32, /// The date the user joined. pub join_date: Option<String>, /// The users identification number. pub id: u64, /// The day of the next week, that tasks will be postponed to. /// (Between 1 and y, where Monday is 1 and 7 is Sunday) pub next_week: u8, /// The total number tasks the user has completed. pub completed_count: u64, /// The daily goal of tasks. pub daily_goal: u32, /// The currently selected Todoist theme (between 0 and 10). pub theme: u8, /// The users email address. pub email: String, /// The first day of the week (between 1 (Monday) and 7 (Sunday)) pub start_day: u8, /// Users timezone information. pub tz_info: TimeZoneStruct, /// Whether to use the DD-MM-YYYY date format (if set to 0), /// or the MM-DD-YYYY format (if set to 1). pub date_format: u8, /// The project the user has selected to use as their inbox. pub inbox_project: u64, /// Whether to use a 24h format such as 13:00 (if set to 0) when displaying time, /// or a 12h format such as 1:00pm (if set to 1). pub time_format: u8, /// The id of the user's avatar. pub image_id: String, /// The user's karma trend. pub karma_trend: String, /// The id o the user's business account. pub business_account_id: Option<u64>, /// The users mobile phone number. pub mobile_number: Option<String>, /// The users mobile host number. pub mobile_host: Option<String>, /// The date when the users's Premium subscrpition ends. pub premium_until: Option<String>, /// The users authentication token. pub token: String, /// The user's karma score. pub karma: u64, /// Whether the user is a business account admin. pub is_biz_admin: bool, /// The default reminder for the user. Reminders are only possible for Premium users. /// The default reminder can be one of the following: email to send reminders by email, /// mobile to send reminders to mobile devices via SMS, push to send reminders to smart /// devices using push notifications (one of the Android or iOS official clients must be /// installed on the client side to receive these notifications), /// no_default to turn off sending default reminders. pub default_reminder: Option<String>, } #[derive(Serialize, Deserialize, Debug)] pub struct FilterStruct { /// The presentation order of the filter. item_order: u8, /// The name of the filter. name: String, /// The query specifier for this filter. query: String, /// The color to present this filter as. color: u8, /// Is the filter deleted. is_deleted: u8, /// The identifier of this filter. id: u64, } #[derive(Serialize, Deserialize, Debug)] pub struct ReminderStruct { /// The item id that this reminder is for. item_id: u64, /// The alias name for the location. name: String, /// The service to get the reminder for. service: String, /// The type of reminder, 'relative' for time-based reminder in minutes. /// 'absolute' for a time-based reminder with a specific time and date in the future. /// 'location' for a location-based reminder. /// type: String, /// The date of the task, added in free form text, /// for example it can be every day @ 10 (or null or an empty string if not set). date_string: String, /// The language of the date_string. /// Valid languages are: en, da, pl, zh, ko, de, pt, ja, it, fr, sv, ru, es, nl date_lang: String, /// Is the reminder deleted (0,1). is_deleted: u8, /// The identifier of this filter. id: u64, } #[derive(Serialize, Deserialize, Debug)] pub struct ProjectStruct { /// The name of the project. pub name: String, /// The id of the parent project. pub parent_id: Option<u64>, /// The color to present this filter as. pub color: u8, /// Whether the project's sub-projects are collapsed (0,1) pub collapsed: u8, /// This project is marked as the inbox project. pub inbox_project: Option<bool>, /// This project has more notes. pub has_more_notes: Option<bool>, /// The item order (0,1). pub item_order: u8, // The indent of the time (1..4) where 1 is top level. pub indent: u8, /// The identifier of this filter. pub id: u64, /// Is the project deleted (0,1). pub is_deleted: u8, /// Whether the project is marked as archived (0,1) pub is_archived: u8, } // Automatically generate: // - `Serialize` and `Deserialize` impls for serialization. // - 'Debug ' impl for ... debugging. #[derive(Serialize, Deserialize, Debug)] pub struct SyncStruct { /// Collaborators /// Note: Not yet used, so we don't parse it yet. pub collaborators: Option<Vec<NotYetUsedStruct>>, /// Collaborator states. /// Note: Not yet used, so we don't parse it yet. pub collaborator_states: Option<Vec<NotYetUsedStruct>>, /// Specifies the order of items in daily agenda. pub day_orders: Option<NotYetUsedStruct>, /// An array of filters. pub filters: Option<Vec<FilterStruct>>, /// Whether the response contains all data (full sync), or incremental. pub full_sync: bool, /// An array of items. pub items: Option<Vec<ItemStruct>>, /// An array of labels. pub labels: Option<Vec<LabelStruct>>, /// An array of live notifications. pub live_notifications: Option<Vec<NotYetUsedStruct>>, /// The last live notification the user saw. /// Used for implementing unread notifications. pub live_notifications_last_read_id: Option<u64>, /// An array of notes. /// Note: Not yet used, so we don't parse it yet. pub notes: Option<Vec<NotYetUsedStruct>>, /// An array of projects. pub projects: Option<Vec<ProjectStruct>>, /// An array of reminders. pub reminders: Option<Vec<ReminderStruct>>, /// A new synchronization token. /// Used by the client on the next sync request to do incremental sync. pub sync_token: String, /// The users information. pub user: Option<UserStruct>, /// No idea what this is used for. /// Note: Not yet used, so we don't parse it yet. pub temp_id_mapping: Option<NotYetUsedStruct>, } #[cfg(test)] use serde_json; #[test] fn item_deserialize_test() { let json_item = r#"{ "all_day": true, "id": 33511505, "user_id": 1855589, "project_id": 128501470, "content": "Task1", "date_string": "", "date_lang": "en", "due_date_utc": null, "indent": 1, "priority": 1, "item_order": 1, "day_order": -1, "collapsed": 0, "labels": [12839231, 18391839], "assigned_by_uid": 1855589, "responsible_uid": null, "checked": 0, "in_history": 0, "is_deleted": 0, "is_archived": 0, "sync_id": null, "date_added": "Fri 26 Sep 2014 08:25:05 +0000" }"#; let item: ItemStruct = serde_json::from_str(&json_item).unwrap(); assert_eq!(item.id, 33_511_505); assert_eq!(item.user_id, 1_855_589); assert_eq!(item.content, "Task1"); assert_eq!(item.date_string, ""); assert_eq!(item.date_lang, "en"); assert_eq!(item.due_date_utc, None); assert_eq!(item.indent, 1); assert_eq!(item.priority, 1); assert_eq!(item.item_order, 1); assert_eq!(item.day_order, -1); assert_eq!(item.collapsed, 0); assert_eq!(item.labels, vec![12_839_231, 18_391_839]); assert_eq!(item.assigned_by_uid.unwrap(), 1_855_589); assert_eq!(item.responsible_uid, None); assert_eq!(item.checked, 0); assert_eq!(item.in_history, 0); assert_eq!(item.is_deleted, 0); assert_eq!(item.is_archived, 0); assert_eq!(item.sync_id, None); assert_eq!(item.date_added.unwrap(), "Fri 26 Sep 2014 08:25:05 +0000"); } #[test] fn item_deserialize_latest_test() { let json_item = r#"{ "all_day": true, "assigned_by_uid": 7261, "checked": 0, "collapsed": 0, "content": "Order bike electric regulator.", "date_added": "Sat 08 Jun 2019 13:00:28 +0000", "date_completed": null, "date_lang": "en", "date_string": "22 Jun", "day_order": 10, "due_date_utc": "Sun 23 Jun 2019 06:59:59 +0000", "id": 3238150997, "in_history": 0, "indent": 1, "is_archived": 0, "is_deleted": 0, "item_order": 5, "labels": [], "parent_id": null, "priority": 1, "project_id": 2211785308, "responsible_uid": null, "sync_id": null, "user_id": 7261 }"#; let _item: ItemStruct = serde_json::from_str(&json_item).unwrap(); } #[test] fn sync_deserialize_test() { let json_item = r#"{ "full_sync": true, "items": [ { "all_day": true, "assigned_by_uid": 7261, "checked": 0, "collapsed": 0, "content": "Order bike electric regulator.", "date_added": "Sat 08 Jun 2019 13:00:28 +0000", "date_completed": null, "date_lang": "en", "date_string": "22 Jun", "day_order": 10, "due_date_utc": "Sun 23 Jun 2019 06:59:59 +0000", "id": 3238150997, "in_history": 0, "indent": 1, "is_archived": 0, "is_deleted": 0, "item_order": 5, "labels": [], "parent_id": null, "priority": 1, "project_id": 2211785308, "responsible_uid": null, "sync_id": null, "user_id": 7261 } ], "sync_token": "PPHBRv43cSLUtndVYZxjN4JENgQ2AALKeESwpEhysYEtGxJBshJTCGVfPbRJLUGaMAqbcEYtrHB05wdO-6p2nHflyDYPzzjrwlf29nEDsfOVn1s", "temp_id_mapping": {} }"#; let _item: SyncStruct = serde_json::from_str(&json_item).unwrap(); }
28.593573
134
0.615298
c1b82b14133cae851f95a7f0a38e581a84d0b739
710
use std::io::{Read, Result, Seek, SeekFrom}; pub enum FileType { Archive, Elf, Unknown, } pub fn filetype<T>(io: &mut T) -> Result<FileType> where T: Read + Seek, { io.seek(SeekFrom::Start(0))?; let mut magic = [0; 8]; io.read(&mut magic)?; io.seek(SeekFrom::Start(0))?; if magic[0..4] == [0x7F, 'E' as u8, 'L' as u8, 'F' as u8] { return Ok(FileType::Elf); } if magic == [ '!' as u8, '<' as u8, 'a' as u8, 'r' as u8, 'c' as u8, 'h' as u8, '>' as u8, 0x0A, ] { return Ok(FileType::Archive); } return Ok(FileType::Unknown); }
18.205128
63
0.440845
f9dcbf0b0ff251ac143b41deff882697890729e8
6,932
// Rust Bitcoin Library // Written in 2014 by // Andrew Poelstra <[email protected]> // // To the extent possible under law, the author(s) have dedicated all // copyright and related and neighboring rights to this software to // the public domain worldwide. This software is distributed without // any warranty. // // You should have received a copy of the CC0 Public Domain Dedication // along with this software. // If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. // //! Bitcoin network addresses //! //! This module defines the structures and functions needed to encode //! network addresses in Bitcoin messages. //! use std::io; use std::fmt; use std::net::{SocketAddr, Ipv6Addr, SocketAddrV4, SocketAddrV6}; use network::constants::ServiceFlags; use consensus::encode::{self, Decodable, Encodable}; /// A message which can be sent on the Bitcoin network #[derive(Clone, PartialEq, Eq, Hash)] pub struct Address { /// Services provided by the peer whose address this is pub services: ServiceFlags, /// Network byte-order ipv6 address, or ipv4-mapped ipv6 address pub address: [u16; 8], /// Network port pub port: u16 } const ONION : [u16; 3] = [0xFD87, 0xD87E, 0xEB43]; impl Address { /// Create an address message for a socket pub fn new (socket :&SocketAddr, services: ServiceFlags) -> Address { let (address, port) = match socket { &SocketAddr::V4(ref addr) => (addr.ip().to_ipv6_mapped().segments(), addr.port()), &SocketAddr::V6(ref addr) => (addr.ip().segments(), addr.port()) }; Address { address: address, port: port, services: services } } /// extract socket address from an address message /// This will return io::Error ErrorKind::AddrNotAvailable if the message contains a Tor address. pub fn socket_addr (&self) -> Result<SocketAddr, io::Error> { let addr = &self.address; if addr[0..3] == ONION { return Err(io::Error::from(io::ErrorKind::AddrNotAvailable)); } let ipv6 = Ipv6Addr::new( addr[0],addr[1],addr[2],addr[3], addr[4],addr[5],addr[6],addr[7] ); if let Some(ipv4) = ipv6.to_ipv4() { Ok(SocketAddr::V4(SocketAddrV4::new(ipv4, self.port))) } else { Ok(SocketAddr::V6(SocketAddrV6::new(ipv6, self.port, 0, 0))) } } } fn addr_to_be(addr: [u16; 8]) -> [u16; 8] { [addr[0].to_be(), addr[1].to_be(), addr[2].to_be(), addr[3].to_be(), addr[4].to_be(), addr[5].to_be(), addr[6].to_be(), addr[7].to_be()] } impl Encodable for Address { #[inline] fn consensus_encode<S: io::Write>( &self, mut s: S, ) -> Result<usize, encode::Error> { let len = self.services.consensus_encode(&mut s)? + addr_to_be(self.address).consensus_encode(&mut s)? + self.port.to_be().consensus_encode(s)?; Ok(len) } } impl Decodable for Address { #[inline] fn consensus_decode<D: io::Read>(mut d: D) -> Result<Self, encode::Error> { Ok(Address { services: Decodable::consensus_decode(&mut d)?, address: addr_to_be(Decodable::consensus_decode(&mut d)?), port: u16::from_be(Decodable::consensus_decode(d)?) }) } } impl fmt::Debug for Address { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let ipv6 = Ipv6Addr::from(self.address); match ipv6.to_ipv4() { Some(addr) => write!(f, "Address {{services: {}, address: {}, port: {}}}", self.services, addr, self.port), None => write!(f, "Address {{services: {}, address: {}, port: {}}}", self.services, ipv6, self.port) } } } #[cfg(test)] mod test { use std::str::FromStr; use super::Address; use network::constants::ServiceFlags; use std::net::{SocketAddr, IpAddr, Ipv4Addr, Ipv6Addr}; use consensus::encode::{deserialize, serialize}; #[test] fn serialize_address_test() { assert_eq!(serialize(&Address { services: ServiceFlags::NETWORK, address: [0, 0, 0, 0, 0, 0xffff, 0x0a00, 0x0001], port: 8333 }), vec![1u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x0a, 0, 0, 1, 0x20, 0x8d]); } #[test] fn debug_format_test() { assert_eq!( format!("The address is: {:?}", Address { services: ServiceFlags::NETWORK.add(ServiceFlags::WITNESS), address: [0, 0, 0, 0, 0, 0xffff, 0x0a00, 0x0001], port: 8333 }), "The address is: Address {services: ServiceFlags(NETWORK|WITNESS), address: 10.0.0.1, port: 8333}" ); assert_eq!( format!("The address is: {:?}", Address { services: ServiceFlags::NETWORK_LIMITED, address: [0xFD87, 0xD87E, 0xEB43, 0, 0, 0xffff, 0x0a00, 0x0001], port: 8333 }), "The address is: Address {services: ServiceFlags(NETWORK_LIMITED), address: fd87:d87e:eb43::ffff:a00:1, port: 8333}" ); } #[test] fn deserialize_address_test() { let mut addr: Result<Address, _> = deserialize(&[1u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x0a, 0, 0, 1, 0x20, 0x8d]); assert!(addr.is_ok()); let full = addr.unwrap(); assert!(match full.socket_addr().unwrap() { SocketAddr::V4(_) => true, _ => false } ); assert_eq!(full.services, ServiceFlags::NETWORK); assert_eq!(full.address, [0, 0, 0, 0, 0, 0xffff, 0x0a00, 0x0001]); assert_eq!(full.port, 8333); addr = deserialize(&[1u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x0a, 0, 0, 1]); assert!(addr.is_err()); } #[test] fn test_socket_addr () { let s4 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(111,222,123,4)), 5555); let a4 = Address::new(&s4, ServiceFlags::NETWORK | ServiceFlags::WITNESS); assert_eq!(a4.socket_addr().unwrap(), s4); let s6 = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0x1111, 0x2222, 0x3333, 0x4444, 0x5555, 0x6666, 0x7777, 0x8888)), 9999); let a6 = Address::new(&s6, ServiceFlags::NETWORK | ServiceFlags::WITNESS); assert_eq!(a6.socket_addr().unwrap(), s6); } #[test] fn onion_test () { let onionaddr = SocketAddr::new( IpAddr::V6( Ipv6Addr::from_str("FD87:D87E:EB43:edb1:8e4:3588:e546:35ca").unwrap()), 1111); let addr = Address::new(&onionaddr, ServiceFlags::NONE); assert!(addr.socket_addr().is_err()); } }
35.367347
128
0.564772
7a6b4738c8e808ad7ed752e35fd4778bcf6b40ee
4,610
use dispatch::ffi; use dispatch::{Group, Queue, QueueAttribute, QueuePriority}; use rand::random; use std::cell::UnsafeCell; use std::fmt; use std::mem; use std::ops::{Deref, DerefMut}; use std::sync::{Arc, LockResult}; pub struct ThreadPool { pub group: Group, pub system: Queue, // serial, high priority pub process: Vec<Queue>, // process queues, default priority pub bg: Queue, // serial, background priority } // recursive lock #[derive(Debug)] pub struct RecLock<T: ?Sized> { inner: Arc<RecLockInner>, data: UnsafeCell<T>, } struct RecLockInner { queue: Queue, semaphore: ffi::dispatch_semaphore_t, } #[derive(Debug)] pub struct RecLockGuard<'a, T: ?Sized + 'a> { lock: &'a RecLock<T>, } impl ThreadPool { pub fn new() -> ThreadPool { let group = Group::create(); let pr_high = Queue::global(QueuePriority::High); let pr_default = Queue::global(QueuePriority::Default); let pr_bg = Queue::global(QueuePriority::Background); let system = Queue::with_target_queue("system", QueueAttribute::Serial, &pr_high); let bg = Queue::with_target_queue("background", QueueAttribute::Serial, &pr_bg); let mut process = Vec::new(); for n in 0..12 { let queue = Queue::with_target_queue( &format!("process{:?}", n), QueueAttribute::Concurrent, &pr_default, ); process.push(queue) } ThreadPool { group, system, process, bg, } } pub fn wait_all(&self) { self.group.wait() } pub fn process_async<F>(&self, work: F) where F: 'static + Send + FnOnce(), { let n = match random::<u8>() { 0 => 0, n => n % self.process.len() as u8, }; let queue = &self.process[n as usize]; self.group.async(queue, work) } } impl<T> RecLock<T> { pub fn new(data: T) -> RecLock<T> { RecLock { inner: Arc::new(RecLockInner::new()), data: UnsafeCell::new(data), } } pub fn get(&self) -> &T { unsafe { &*self.data.get() } } pub fn get_mut(&self) -> &mut T { unsafe { &mut *self.data.get() } } pub fn lock(&self) -> LockResult<RecLockGuard<T>> { self.inner.lock(); Ok(RecLockGuard::new(self)) } } unsafe impl<T> Send for RecLock<T> {} unsafe impl<T> Sync for RecLock<T> {} impl RecLockInner { #[inline] pub fn new() -> RecLockInner { let target = Queue::global(QueuePriority::Default); let queue = Queue::with_target_queue("RecLock", QueueAttribute::Serial, &target); let semaphore = unsafe { ffi::dispatch_semaphore_create(1) }; RecLockInner { queue, semaphore } } #[inline] pub fn dispatch_object(&self) -> ffi::dispatch_object_t { unsafe { mem::transmute::<ffi::dispatch_semaphore_t, ffi::dispatch_object_t>(self.semaphore) } } pub fn wait(&self) { unsafe { ffi::dispatch_semaphore_wait(self.semaphore, ffi::DISPATCH_TIME_FOREVER) }; } pub fn signal(&self) { unsafe { ffi::dispatch_semaphore_signal(self.dispatch_object()) }; } pub fn lock(&self) { self.wait(); } } impl Drop for RecLockInner { fn drop(&mut self) { unsafe { let obj = mem::transmute::<ffi::dispatch_semaphore_t, ffi::dispatch_object_t>(self.semaphore); ffi::dispatch_release(obj) }; } } impl fmt::Debug for RecLockInner { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "RecLockInner") } } unsafe impl Send for RecLockInner {} unsafe impl Sync for RecLockInner {} impl<'a, T: ?Sized> RecLockGuard<'a, T> { pub fn new(lock: &'a RecLock<T>) -> RecLockGuard<'a, T> { RecLockGuard { lock } } } impl<'a, T: ?Sized> Deref for RecLockGuard<'a, T> { type Target = T; fn deref(&self) -> &T { unsafe { &*self.lock.data.get() } } } impl<'a, T: ?Sized> DerefMut for RecLockGuard<'a, T> { fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.lock.data.get() } } } impl<'a, T: ?Sized> Drop for RecLockGuard<'a, T> { fn drop(&mut self) { let inner = self.lock.inner.clone(); inner.queue.sync(|| { inner.signal(); }); } } // nightly only // impl<'a, T: ?Sized> !Send for RecLockGuard<'a, T> {} unsafe impl<'a, T: ?Sized + Sync> Sync for RecLockGuard<'a, T> {}
25.32967
100
0.564425
e9ab3b5f99baa3666b41cd52d2ac02efe0262ad5
163
// compile-flags: -D while-true fn main() { let mut i = 0; while true { //~ ERROR denote infinite loops with `loop i += 1; if i == 5 { break; } } }
18.111111
58
0.539877
e83b0b168f7511b07ee3c377003186d0b6de7c6d
15,643
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 use std::collections::HashMap; use std::fmt; use failure::Error; use ngram::NgramSet; use preproc::{apply_aggressive, apply_normalizers}; /// The type of a license entry (typically in a `Store`). #[derive(Clone, Copy, PartialEq, Debug, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] pub enum LicenseType { /// The canonical text of the license. Original, /// A license header. There may be more than one in a `Store`. Header, /// An alternate form of a license. This is intended to be used for /// alternate _formats_ of a license, not for variants where the text has /// different meaning. Not currently used in askalono's SPDX dataset. Alternate, } impl fmt::Display for LicenseType { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "{}", match *self { LicenseType::Original => "original text", LicenseType::Header => "license header", LicenseType::Alternate => "alternate text", } ) } } /// A structure representing compiled text/matching data. /// /// This is the key structure used to compare two texts against one another. It /// handles pre-processing the text to n-grams, scoring, and optimizing the /// result to try to identify specific details about a match. /// /// # Examples /// /// Basic scoring of two texts: /// /// ``` /// use askalono::TextData; /// /// let license = TextData::from("My First License"); /// let sample = TextData::from("copyright 20xx me irl\n\n // my first license"); /// assert_eq!(sample.match_score(&license), 1.0); /// ``` /// /// The above example is a perfect match, as identifiable copyright statements /// are stripped out during pre-processing. /// /// Building on that, TextData is able to tell you _where_ in the text a /// license is located: /// /// ``` /// # use std::error::Error; /// # use askalono::TextData; /// # fn main() -> Result<(), Box<Error>> { /// # let license = TextData::from("My First License"); /// let sample = TextData::from("copyright 20xx me irl\n// My First License\nfn hello() {\n ..."); /// let (optimized, score) = sample.optimize_bounds(&license)?; /// assert_eq!((1, 2), optimized.lines_view()); /// assert!(score > 0.99f32, "license within text matches"); /// # Ok(()) /// # } /// ``` #[derive(Serialize, Deserialize, Clone, Debug)] pub struct TextData { match_data: NgramSet, lines_view: (usize, usize), lines_normalized: Option<Vec<String>>, text_processed: Option<String>, } impl TextData { /// Create a new TextData structure from a string. /// /// The given text will be normalized, then smashed down into n-grams for /// matching. By default, the normalized text is stored inside the /// structure for future diagnostics. This is necessary for optimizing a /// match and for diffing against other texts. If you don't want this extra /// data, you can call `without_text` throw it out. Generally, as a user of /// this library you want to keep the text data, but askalono will throw it /// away in its own `Store` as it's not needed. pub fn new(text: &str) -> TextData { let normalized = apply_normalizers(text); let normalized_joined = normalized.join("\n"); let processed = apply_aggressive(&normalized_joined); let match_data = NgramSet::from_str(&processed, 2); TextData { match_data, lines_view: (0, normalized.len()), lines_normalized: Some(normalized), text_processed: Some(processed), } } // impl specialization might be nice to indicate that this type // is lacking stored text; perhaps there's another way to indicate that? // maybe an impl on an enum variant if/when that's available: // https://github.com/rust-lang/rfcs/pull/1450 /// Consume this `TextData`, returning one without normalized/processed /// text stored. /// /// Unless you know you don't want the text, you probably don't want to use /// this. Other methods on `TextData` require that text is present. pub fn without_text(self) -> Self { TextData { match_data: self.match_data, lines_view: (0, 0), lines_normalized: None, text_processed: None, } } /// Get the bounds of the active line view. /// /// This represents the "active" region of lines that matches are generated /// from. The bounds are a 0-indexed `(start, end)` tuple, with inclusive /// start and exclusive end indicies. See `optimize_bounds`. /// /// This is largely for informational purposes; other methods in /// `TextView`, such as `lines` and `match_score`, will already account for /// the line range. However, it's useful to call it after running /// `optimize_bounds` to discover where the input text was discovered. pub fn lines_view(&self) -> (usize, usize) { self.lines_view } /// Clone this `TextView`, creating a copy with the given view. /// /// This will re-generate match data for the given view. It's used in /// `optimize_bounds` to shrink/expand the view of the text to discover /// bounds. /// /// Other methods on `TextView` respect this boundary, so it's not needed /// outside this struct. pub fn with_view(&self, start: usize, end: usize) -> Result<Self, Error> { let view = match self.lines_normalized { Some(ref lines) => &lines[start..end], None => return Err(format_err!("TextData does not have original text")), }; let view_joined = view.join("\n"); let processed = apply_aggressive(&view_joined); Ok(TextData { match_data: NgramSet::from_str(&processed, 2), lines_view: (start, end), lines_normalized: self.lines_normalized.clone(), text_processed: Some(processed), }) } /// "Erase" the current lines in view and restore the view to its original /// bounds. /// /// For example, consider a file with two licenses in it. One was identified /// (and located) with `optimize_bounds`. Now you want to find the other: /// white-out the matched lines, and re-run the overall search to find a /// new high score. pub fn white_out(&self) -> Result<Self, Error> { // note that we're not using the view here... let lines = self .lines_normalized .as_ref() .ok_or_else(|| format_err!("TextData does not have original text"))?; // ...because it's used here to exclude lines let new_normalized: Vec<String> = lines .iter() .enumerate() .map(|(i, line)| { if i >= self.lines_view.0 && i < self.lines_view.1 { "".to_string() } else { line.clone() } }).collect(); let processed = apply_aggressive(&new_normalized.join("\n")); Ok(TextData { match_data: NgramSet::from_str(&processed, 2), lines_view: (0, new_normalized.len()), lines_normalized: Some(new_normalized), text_processed: Some(processed), }) } /// Get a slice of the normalized lines in this `TextData`. /// /// If the text was discarded with `without_text`, this returns `None`. pub fn lines(&self) -> Option<&[String]> { match self.lines_normalized { Some(ref lines) => Some(&lines[self.lines_view.0..self.lines_view.1]), None => None, } } #[doc(hidden)] pub fn text_processed(&self) -> Option<&str> { self.text_processed.as_ref().map(String::as_ref) } /// Compare this `TextData` with another, returning a similarity score. /// /// This is what's used during analysis to rank licenses. pub fn match_score(&self, other: &TextData) -> f32 { self.match_data.dice(&other.match_data) } pub(crate) fn eq_data(&self, other: &Self) -> bool { self.match_data.eq(&other.match_data) } /// Attempt to optimize a known match to locate possible line ranges. /// /// Returns a new `TextData` struct and a score. The returned struct is a /// clone of `self`, with its view set to the best match against `other`. /// /// This will respect any views set on the TextData (an optimized result /// won't go outside the original view). /// /// Note that this won't be 100% optimal if there are blank lines /// surrounding the actual match, since successive blank lines in a range /// will likely have the same score. /// /// You should check the value of `lines_view` on the returned struct to /// find the line ranges. pub fn optimize_bounds(&self, other: &TextData) -> Result<(Self, f32), Error> { if let None = self.lines_normalized { return Err(format_err!("TextData does not have original text")); }; let view = self.lines_view; // optimize the ending bounds of the text match let (end_optimized, _) = self.search_optimize( &|end| self.with_view(view.0, end).unwrap().match_score(other), &|end| self.with_view(view.0, end).unwrap(), ); let new_end = end_optimized.lines_view.1; // then optimize the starting bounds let (optimized, score) = end_optimized.search_optimize( &|start| { end_optimized .with_view(start, new_end) .unwrap() .match_score(other) }, &|start| end_optimized.with_view(start, new_end).unwrap(), ); Ok((optimized, score)) } fn search_optimize(&self, score: &Fn(usize) -> f32, value: &Fn(usize) -> Self) -> (Self, f32) { // cache score checks, since they're kinda expensive let mut memo: HashMap<usize, f32> = HashMap::new(); let mut check_score = |index: usize| -> f32 { *memo.entry(index).or_insert_with(|| score(index)) }; fn search(score: &mut FnMut(usize) -> f32, left: usize, right: usize) -> (usize, f32) { if right - left <= 3 { // find the index of the highest score in the remaining items return (left..=right) .map(|x| (x, score(x))) .fold((0usize, 0f32), |acc, x| if x.1 >= acc.1 { x } else { acc }); } let low = (left * 2 + right) / 3; let high = (left + right * 2) / 3; let score_low = score(low); let score_high = score(high); if score_low > score_high { search(score, left, high - 1) } else { search(score, low + 1, right) } } let optimal = search(&mut check_score, self.lines_view.0, self.lines_view.1); (value(optimal.0), optimal.1) } } impl<'a> From<&'a str> for TextData { fn from(text: &'a str) -> Self { Self::new(text) } } impl<'a> From<String> for TextData { fn from(text: String) -> Self { Self::new(&text) } } #[cfg(test)] mod tests { use super::*; // psst: // cargo test -- --nocapture #[test] fn optimize_bounds() { let license_text = "this is a license text\nor it pretends to be one\nit's just a test"; let sample_text = "this is a license text\nor it pretends to be one\nit's just a test\nwords\n\nhere is some\ncode\nhello();\n\n//a comment too"; let license = TextData::from(license_text).without_text(); let sample = TextData::from(sample_text); let (optimized, _) = sample.optimize_bounds(&license).unwrap(); println!("{:?}", optimized.lines_view); println!("{:?}", optimized.lines_normalized.clone().unwrap()); assert_eq!((0, 3), optimized.lines_view); // add more to the string, try again (avoid int trunc screwups) let sample_text = format!("{}\none more line", sample_text); let sample = TextData::from(sample_text.as_str()); let (optimized, _) = sample.optimize_bounds(&license).unwrap(); println!("{:?}", optimized.lines_view); println!("{:?}", optimized.lines_normalized.clone().unwrap()); assert_eq!((0, 3), optimized.lines_view); // add to the beginning too let sample_text = format!("some content\nat\n\nthe beginning\n{}", sample_text); let sample = TextData::from(sample_text.as_str()); let (optimized, _) = sample.optimize_bounds(&license).unwrap(); println!("{:?}", optimized.lines_view); println!("{:?}", optimized.lines_normalized.clone().unwrap()); // end bounds at 7 and 8 have the same score, since they're empty lines (not // counted). askalono is not smart enough to trim this as close as it // can. assert!( (4, 7) == optimized.lines_view || (4, 8) == optimized.lines_view, "bounds are (4, 7) or (4, 8)" ); } // if a view is set on the text data, optimize_bounds must not find text // outside of that range #[test] fn optimize_doesnt_grow_view() { let sample_text = "0\n1\n2\naaa aaa\naaa\naaa\naaa\n7\n8"; let license_text = "aaa aaa aaa aaa aaa"; let sample = TextData::from(sample_text); let license = TextData::from(license_text).without_text(); // sanity: the optimized bounds should be at (3, 7) let (optimized, _) = sample.optimize_bounds(&license).unwrap(); assert_eq!((3, 7), optimized.lines_view); // this should still work let sample = sample.with_view(3, 7).unwrap(); let (optimized, _) = sample.optimize_bounds(&license).unwrap(); assert_eq!((3, 7), optimized.lines_view); // but if we shrink the view further, it shouldn't be outside that range let sample = sample.with_view(4, 6).unwrap(); let (optimized, _) = sample.optimize_bounds(&license).unwrap(); assert_eq!((4, 6), optimized.lines_view); // restoring the view should still be OK too let sample = sample.with_view(0, 9).unwrap(); let (optimized, _) = sample.optimize_bounds(&license).unwrap(); assert_eq!((3, 7), optimized.lines_view); } // ensure we don't choke on small TextData matches #[test] fn match_small() { let a = TextData::from("a b"); let b = TextData::from("a\nlong\nlicense\nfile\n\n\n\n\nabcdefg"); let x = a.match_score(&b); let y = b.match_score(&a); assert_eq!(x, y); } // don't choke on empty TextData either #[test] fn match_empty() { let a = TextData::from(""); let b = TextData::from("a\nlong\nlicense\nfile\n\n\n\n\nabcdefg"); let x = a.match_score(&b); let y = b.match_score(&a); assert_eq!(x, y); } #[test] fn view_and_white_out() { let a = TextData::from("aaa\nbbb\nccc\nddd"); assert_eq!(Some("aaa bbb ccc ddd"), a.text_processed()); let b = a.with_view(1, 3).expect("with_view must be ok"); assert_eq!(2, b.lines().unwrap().len()); assert_eq!(Some("bbb ccc"), b.text_processed()); let c = b.white_out().expect("white_out must be ok"); assert_eq!(Some("aaa ddd"), c.text_processed()); } }
37.513189
153
0.598351
7a918b98ac87a25e5a1df86850e93660d70a39fc
4,597
extern crate serde_json; extern crate serde; extern crate time; extern crate sha2; use sha2::{Sha256, Digest}; use std::fmt::Write; #[derive(Debug, Clone, Serialize)] struct Transaction { sender: String, receiver: String, amount: f32 } #[derive(Debug, Serialize)] pub struct Blockheader { timestamp: i64, nonce: u32, pre_hash: String, merkle: String, difficulty: u32 } #[derive(Debug, Serialize)] pub struct Block { header: Blockheader, count: u32, transactions: Vec<Transaction> } #[derive(Debug, Serialize)] pub struct Chain { chain: Vec<Block>, current_transaction: Vec<Transaction>, difficulty: u32, miner_address: String, reward: f32 } impl Chain { pub fn new(miner_address: String, difficulty: u32) -> Chain { let mut chain = Chain { chain: Vec::new(), current_transaction: Vec::new(), difficulty, miner_address, reward: 100. }; chain.generate_new_block(); return chain; } pub fn new_transaction (&mut self, sender: String, receiver: String, amount: f32) -> bool { self.current_transaction.push(Transaction { sender, receiver, amount }); return true } pub fn last_hash (&self) -> String { let block = match self.chain.last() { Some(block) => block, None => return String::from_utf8(vec![48; 64]).unwrap() }; Chain::hash(&block.header) } pub fn update_difficulty (&mut self, difficulty: u32) -> bool { self.difficulty = difficulty; return true } pub fn update_reward (&mut self, reward: f32) -> bool { self.reward = reward; return true } pub fn generate_new_block(&mut self) -> bool { let header = Blockheader { timestamp: time::now().to_timespec().sec, nonce: 0, pre_hash: self.last_hash(), merkle: String::new(), difficulty: self.difficulty }; let reward_trans = Transaction { sender: String::from("Root"), receiver: self.miner_address.clone(), amount: self.reward }; let mut block = Block { header, count: 0, transactions: vec![] }; block.transactions.push(reward_trans); block.transactions.append(&mut self.current_transaction); block.count = block.transactions.len() as u32; block.header.merkle = Chain::get_merkle(block.transactions.clone()); Chain::proof_of_work(&mut block.header); println!("{:#?}", &block); self.chain.push(block); return true; } fn get_merkle (current_transaction: Vec<Transaction>) -> String { let mut merkle = Vec::new(); for t in &current_transaction { let hash = Chain::hash(t); merkle.push(hash); } if merkle.len() % 2 == 1 { let last = merkle.last().cloned().unwrap(); merkle.push(last); } while merkle.len() > 1 { let mut h1 = merkle.remove(0); let mut h2 = merkle.remove(0); h1.push_str(&mut h2); let nh = Chain::hash(&h1); merkle.push(nh); } merkle.pop().unwrap() } pub fn proof_of_work (header: &mut Blockheader) { loop { let hash = Chain::hash(header); let slice = &hash[..header.difficulty as usize]; match slice.parse::<u32>() { Ok(val) => { if val != 0 { header.nonce += 1; } else { println!("Block hash: {:?}", hash); break; } }, Err(_) => { header.nonce += 1; continue; } }; } } pub fn hash<T: serde::Serialize>(item: &T) -> String { let input = serde_json::to_string(&item).unwrap(); let mut hasher = Sha256::default(); hasher.input(input.as_bytes()); let res = hasher.result(); let vec_res = res.to_vec(); Chain::hex_to_string(vec_res.as_slice()) } pub fn hex_to_string(vec_res: &[u8]) -> String { let mut s = String::new(); for b in vec_res { write!(&mut s, "{:x}", b).expect("unable to write"); } return s } }
23.574359
95
0.513596
0afd14a7e4c99cdbdf79ad25f1c2b2bd3ecb8342
83,761
//! Android-specific definitions for linux-like values pub type clock_t = ::c_long; pub type time_t = ::c_long; pub type suseconds_t = ::c_long; pub type off_t = ::c_long; pub type blkcnt_t = ::c_ulong; pub type blksize_t = ::c_ulong; pub type nlink_t = u32; pub type useconds_t = u32; pub type pthread_t = ::c_long; pub type pthread_mutexattr_t = ::c_long; pub type pthread_rwlockattr_t = ::c_long; pub type pthread_condattr_t = ::c_long; pub type pthread_key_t = ::c_int; pub type fsfilcnt_t = ::c_ulong; pub type fsblkcnt_t = ::c_ulong; pub type nfds_t = ::c_uint; pub type rlim_t = ::c_ulong; pub type dev_t = ::c_ulong; pub type ino_t = ::c_ulong; pub type __CPU_BITTYPE = ::c_ulong; pub type idtype_t = ::c_int; pub type loff_t = ::c_longlong; pub type __kernel_loff_t = ::c_longlong; pub type __kernel_pid_t = ::c_int; s! { pub struct stack_t { pub ss_sp: *mut ::c_void, pub ss_flags: ::c_int, pub ss_size: ::size_t } pub struct __fsid_t { __val: [::c_int; 2], } pub struct msghdr { pub msg_name: *mut ::c_void, pub msg_namelen: ::socklen_t, pub msg_iov: *mut ::iovec, pub msg_iovlen: ::size_t, pub msg_control: *mut ::c_void, pub msg_controllen: ::size_t, pub msg_flags: ::c_int, } pub struct cmsghdr { pub cmsg_len: ::size_t, pub cmsg_level: ::c_int, pub cmsg_type: ::c_int, } pub struct termios { pub c_iflag: ::tcflag_t, pub c_oflag: ::tcflag_t, pub c_cflag: ::tcflag_t, pub c_lflag: ::tcflag_t, pub c_line: ::cc_t, pub c_cc: [::cc_t; ::NCCS], } pub struct termios2 { pub c_iflag: ::tcflag_t, pub c_oflag: ::tcflag_t, pub c_cflag: ::tcflag_t, pub c_lflag: ::tcflag_t, pub c_line: ::cc_t, pub c_cc: [::cc_t; 19], pub c_ispeed: ::speed_t, pub c_ospeed: ::speed_t, } pub struct flock { pub l_type: ::c_short, pub l_whence: ::c_short, pub l_start: ::off_t, pub l_len: ::off_t, pub l_pid: ::pid_t, } pub struct flock64 { pub l_type: ::c_short, pub l_whence: ::c_short, pub l_start: ::__kernel_loff_t, pub l_len: ::__kernel_loff_t, pub l_pid: ::__kernel_pid_t, } pub struct cpu_set_t { #[cfg(target_pointer_width = "64")] __bits: [__CPU_BITTYPE; 16], #[cfg(target_pointer_width = "32")] __bits: [__CPU_BITTYPE; 1], } pub struct sem_t { count: ::c_uint, #[cfg(target_pointer_width = "64")] __reserved: [::c_int; 3], } pub struct exit_status { pub e_termination: ::c_short, pub e_exit: ::c_short, } pub struct statvfs { pub f_bsize: ::c_ulong, pub f_frsize: ::c_ulong, pub f_blocks: ::fsblkcnt_t, pub f_bfree: ::fsblkcnt_t, pub f_bavail: ::fsblkcnt_t, pub f_files: ::fsfilcnt_t, pub f_ffree: ::fsfilcnt_t, pub f_favail: ::fsfilcnt_t, pub f_fsid: ::c_ulong, pub f_flag: ::c_ulong, pub f_namemax: ::c_ulong, #[cfg(target_pointer_width = "64")] __f_reserved: [u32; 6], } pub struct signalfd_siginfo { pub ssi_signo: u32, pub ssi_errno: i32, pub ssi_code: i32, pub ssi_pid: u32, pub ssi_uid: u32, pub ssi_fd: i32, pub ssi_tid: u32, pub ssi_band: u32, pub ssi_overrun: u32, pub ssi_trapno: u32, pub ssi_status: i32, pub ssi_int: i32, pub ssi_ptr: ::c_ulonglong, pub ssi_utime: ::c_ulonglong, pub ssi_stime: ::c_ulonglong, pub ssi_addr: ::c_ulonglong, pub ssi_addr_lsb: u16, _pad2: u16, pub ssi_syscall: i32, pub ssi_call_addr: u64, pub ssi_arch: u32, _pad: [u8; 28], } pub struct ucred { pub pid: ::pid_t, pub uid: ::uid_t, pub gid: ::gid_t, } pub struct genlmsghdr { pub cmd: u8, pub version: u8, pub reserved: u16, } pub struct nlmsghdr { pub nlmsg_len: u32, pub nlmsg_type: u16, pub nlmsg_flags: u16, pub nlmsg_seq: u32, pub nlmsg_pid: u32, } pub struct nlmsgerr { pub error: ::c_int, pub msg: nlmsghdr, } pub struct nl_pktinfo { pub group: u32, } pub struct nl_mmap_req { pub nm_block_size: ::c_uint, pub nm_block_nr: ::c_uint, pub nm_frame_size: ::c_uint, pub nm_frame_nr: ::c_uint, } pub struct nl_mmap_hdr { pub nm_status: ::c_uint, pub nm_len: ::c_uint, pub nm_group: u32, pub nm_pid: u32, pub nm_uid: u32, pub nm_gid: u32, } pub struct nlattr { pub nla_len: u16, pub nla_type: u16, } pub struct in6_pktinfo { pub ipi6_addr: ::in6_addr, pub ipi6_ifindex: ::c_int, } pub struct inotify_event { pub wd: ::c_int, pub mask: u32, pub cookie: u32, pub len: u32 } } s_no_extra_traits! { pub struct sockaddr_nl { pub nl_family: ::sa_family_t, nl_pad: ::c_ushort, pub nl_pid: u32, pub nl_groups: u32 } pub struct dirent { pub d_ino: u64, pub d_off: i64, pub d_reclen: ::c_ushort, pub d_type: ::c_uchar, pub d_name: [::c_char; 256], } pub struct dirent64 { pub d_ino: u64, pub d_off: i64, pub d_reclen: ::c_ushort, pub d_type: ::c_uchar, pub d_name: [::c_char; 256], } pub struct siginfo_t { pub si_signo: ::c_int, pub si_errno: ::c_int, pub si_code: ::c_int, pub _pad: [::c_int; 29], _align: [usize; 0], } pub struct lastlog { ll_time: ::time_t, ll_line: [::c_char; UT_LINESIZE], ll_host: [::c_char; UT_HOSTSIZE], } pub struct utmp { pub ut_type: ::c_short, pub ut_pid: ::pid_t, pub ut_line: [::c_char; UT_LINESIZE], pub ut_id: [::c_char; 4], pub ut_user: [::c_char; UT_NAMESIZE], pub ut_host: [::c_char; UT_HOSTSIZE], pub ut_exit: exit_status, pub ut_session: ::c_long, pub ut_tv: ::timeval, pub ut_addr_v6: [i32; 4], unused: [::c_char; 20], } pub struct sockaddr_alg { pub salg_family: ::sa_family_t, pub salg_type: [::c_uchar; 14], pub salg_feat: u32, pub salg_mask: u32, pub salg_name: [::c_uchar; 64], } pub struct af_alg_iv { pub ivlen: u32, pub iv: [::c_uchar; 0], } } cfg_if! { if #[cfg(feature = "extra_traits")] { impl PartialEq for sockaddr_nl { fn eq(&self, other: &sockaddr_nl) -> bool { self.nl_family == other.nl_family && self.nl_pid == other.nl_pid && self.nl_groups == other.nl_groups } } impl Eq for sockaddr_nl {} impl ::fmt::Debug for sockaddr_nl { fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result { f.debug_struct("sockaddr_nl") .field("nl_family", &self.nl_family) .field("nl_pid", &self.nl_pid) .field("nl_groups", &self.nl_groups) .finish() } } impl ::hash::Hash for sockaddr_nl { fn hash<H: ::hash::Hasher>(&self, state: &mut H) { self.nl_family.hash(state); self.nl_pid.hash(state); self.nl_groups.hash(state); } } impl PartialEq for dirent { fn eq(&self, other: &dirent) -> bool { self.d_ino == other.d_ino && self.d_off == other.d_off && self.d_reclen == other.d_reclen && self.d_type == other.d_type && self .d_name .iter() .zip(other.d_name.iter()) .all(|(a,b)| a == b) } } impl Eq for dirent {} impl ::fmt::Debug for dirent { fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result { f.debug_struct("dirent") .field("d_ino", &self.d_ino) .field("d_off", &self.d_off) .field("d_reclen", &self.d_reclen) .field("d_type", &self.d_type) // FIXME: .field("d_name", &self.d_name) .finish() } } impl ::hash::Hash for dirent { fn hash<H: ::hash::Hasher>(&self, state: &mut H) { self.d_ino.hash(state); self.d_off.hash(state); self.d_reclen.hash(state); self.d_type.hash(state); self.d_name.hash(state); } } impl PartialEq for dirent64 { fn eq(&self, other: &dirent64) -> bool { self.d_ino == other.d_ino && self.d_off == other.d_off && self.d_reclen == other.d_reclen && self.d_type == other.d_type && self .d_name .iter() .zip(other.d_name.iter()) .all(|(a,b)| a == b) } } impl Eq for dirent64 {} impl ::fmt::Debug for dirent64 { fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result { f.debug_struct("dirent64") .field("d_ino", &self.d_ino) .field("d_off", &self.d_off) .field("d_reclen", &self.d_reclen) .field("d_type", &self.d_type) // FIXME: .field("d_name", &self.d_name) .finish() } } impl ::hash::Hash for dirent64 { fn hash<H: ::hash::Hasher>(&self, state: &mut H) { self.d_ino.hash(state); self.d_off.hash(state); self.d_reclen.hash(state); self.d_type.hash(state); self.d_name.hash(state); } } impl PartialEq for siginfo_t { fn eq(&self, other: &siginfo_t) -> bool { self.si_signo == other.si_signo && self.si_errno == other.si_errno && self.si_code == other.si_code // Ignore _pad // Ignore _align } } impl Eq for siginfo_t {} impl ::fmt::Debug for siginfo_t { fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result { f.debug_struct("siginfo_t") .field("si_signo", &self.si_signo) .field("si_errno", &self.si_errno) .field("si_code", &self.si_code) // Ignore _pad // Ignore _align .finish() } } impl ::hash::Hash for siginfo_t { fn hash<H: ::hash::Hasher>(&self, state: &mut H) { self.si_signo.hash(state); self.si_errno.hash(state); self.si_code.hash(state); // Ignore _pad // Ignore _align } } impl PartialEq for lastlog { fn eq(&self, other: &lastlog) -> bool { self.ll_time == other.ll_time && self .ll_line .iter() .zip(other.ll_line.iter()) .all(|(a,b)| a == b) && self .ll_host .iter() .zip(other.ll_host.iter()) .all(|(a,b)| a == b) } } impl Eq for lastlog {} impl ::fmt::Debug for lastlog { fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result { f.debug_struct("lastlog") .field("ll_time", &self.ll_time) .field("ll_line", &self.ll_line) // FIXME: .field("ll_host", &self.ll_host) .finish() } } impl ::hash::Hash for lastlog { fn hash<H: ::hash::Hasher>(&self, state: &mut H) { self.ll_time.hash(state); self.ll_line.hash(state); self.ll_host.hash(state); } } impl PartialEq for utmp { fn eq(&self, other: &utmp) -> bool { self.ut_type == other.ut_type && self.ut_pid == other.ut_pid && self .ut_line .iter() .zip(other.ut_line.iter()) .all(|(a,b)| a == b) && self.ut_id == other.ut_id && self .ut_user .iter() .zip(other.ut_user.iter()) .all(|(a,b)| a == b) && self .ut_host .iter() .zip(other.ut_host.iter()) .all(|(a,b)| a == b) && self.ut_exit == other.ut_exit && self.ut_session == other.ut_session && self.ut_tv == other.ut_tv && self.ut_addr_v6 == other.ut_addr_v6 && self.unused == other.unused } } impl Eq for utmp {} impl ::fmt::Debug for utmp { fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result { f.debug_struct("utmp") .field("ut_type", &self.ut_type) .field("ut_pid", &self.ut_pid) .field("ut_line", &self.ut_line) .field("ut_id", &self.ut_id) .field("ut_user", &self.ut_user) // FIXME: .field("ut_host", &self.ut_host) .field("ut_exit", &self.ut_exit) .field("ut_session", &self.ut_session) .field("ut_tv", &self.ut_tv) .field("ut_addr_v6", &self.ut_addr_v6) .field("unused", &self.unused) .finish() } } impl ::hash::Hash for utmp { fn hash<H: ::hash::Hasher>(&self, state: &mut H) { self.ut_type.hash(state); self.ut_pid.hash(state); self.ut_line.hash(state); self.ut_id.hash(state); self.ut_user.hash(state); self.ut_host.hash(state); self.ut_exit.hash(state); self.ut_session.hash(state); self.ut_tv.hash(state); self.ut_addr_v6.hash(state); self.unused.hash(state); } } impl PartialEq for sockaddr_alg { fn eq(&self, other: &sockaddr_alg) -> bool { self.salg_family == other.salg_family && self .salg_type .iter() .zip(other.salg_type.iter()) .all(|(a, b)| a == b) && self.salg_feat == other.salg_feat && self.salg_mask == other.salg_mask && self .salg_name .iter() .zip(other.salg_name.iter()) .all(|(a, b)| a == b) } } impl Eq for sockaddr_alg {} impl ::fmt::Debug for sockaddr_alg { fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result { f.debug_struct("sockaddr_alg") .field("salg_family", &self.salg_family) .field("salg_type", &self.salg_type) .field("salg_feat", &self.salg_feat) .field("salg_mask", &self.salg_mask) .field("salg_name", &&self.salg_name[..]) .finish() } } impl ::hash::Hash for sockaddr_alg { fn hash<H: ::hash::Hasher>(&self, state: &mut H) { self.salg_family.hash(state); self.salg_type.hash(state); self.salg_feat.hash(state); self.salg_mask.hash(state); self.salg_name.hash(state); } } impl af_alg_iv { fn as_slice(&self) -> &[u8] { unsafe { ::core::slice::from_raw_parts( self.iv.as_ptr(), self.ivlen as usize ) } } } impl PartialEq for af_alg_iv { fn eq(&self, other: &af_alg_iv) -> bool { *self.as_slice() == *other.as_slice() } } impl Eq for af_alg_iv {} impl ::fmt::Debug for af_alg_iv { fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result { f.debug_struct("af_alg_iv") .field("iv", &self.as_slice()) .finish() } } impl ::hash::Hash for af_alg_iv { fn hash<H: ::hash::Hasher>(&self, state: &mut H) { self.as_slice().hash(state); } } } } pub const MADV_SOFT_OFFLINE: ::c_int = 101; pub const MS_NOUSER: ::c_ulong = 0xffffffff80000000; pub const MS_RMT_MASK: ::c_ulong = 0x02800051; pub const O_TRUNC: ::c_int = 512; pub const O_CLOEXEC: ::c_int = 0x80000; pub const O_PATH: ::c_int = 0o10000000; pub const O_NOATIME: ::c_int = 0o1000000; pub const EBFONT: ::c_int = 59; pub const ENOSTR: ::c_int = 60; pub const ENODATA: ::c_int = 61; pub const ETIME: ::c_int = 62; pub const ENOSR: ::c_int = 63; pub const ENONET: ::c_int = 64; pub const ENOPKG: ::c_int = 65; pub const EREMOTE: ::c_int = 66; pub const ENOLINK: ::c_int = 67; pub const EADV: ::c_int = 68; pub const ESRMNT: ::c_int = 69; pub const ECOMM: ::c_int = 70; pub const EPROTO: ::c_int = 71; pub const EDOTDOT: ::c_int = 73; pub const EPOLL_CLOEXEC: ::c_int = 0x80000; pub const EPOLLONESHOT: ::c_int = 0x40000000; pub const EPOLLRDHUP: ::c_int = 0x00002000; pub const EPOLLWAKEUP: ::c_int = 0x20000000; pub const EFD_CLOEXEC: ::c_int = 0x80000; pub const USER_PROCESS: ::c_short = 7; pub const FALLOC_FL_COLLAPSE_RANGE: ::c_int = 0x08; pub const BUFSIZ: ::c_uint = 1024; pub const FILENAME_MAX: ::c_uint = 4096; pub const FOPEN_MAX: ::c_uint = 20; pub const POSIX_FADV_DONTNEED: ::c_int = 4; pub const POSIX_FADV_NOREUSE: ::c_int = 5; pub const L_tmpnam: ::c_uint = 4096; pub const TMP_MAX: ::c_uint = 308915776; pub const _PC_LINK_MAX: ::c_int = 1; pub const _PC_MAX_CANON: ::c_int = 2; pub const _PC_MAX_INPUT: ::c_int = 3; pub const _PC_NAME_MAX: ::c_int = 4; pub const _PC_PATH_MAX: ::c_int = 5; pub const _PC_PIPE_BUF: ::c_int = 6; pub const _PC_2_SYMLINKS: ::c_int = 7; pub const _PC_ALLOC_SIZE_MIN: ::c_int = 8; pub const _PC_REC_INCR_XFER_SIZE: ::c_int = 9; pub const _PC_REC_MAX_XFER_SIZE: ::c_int = 10; pub const _PC_REC_MIN_XFER_SIZE: ::c_int = 11; pub const _PC_REC_XFER_ALIGN: ::c_int = 12; pub const _PC_SYMLINK_MAX: ::c_int = 13; pub const _PC_CHOWN_RESTRICTED: ::c_int = 14; pub const _PC_NO_TRUNC: ::c_int = 15; pub const _PC_VDISABLE: ::c_int = 16; pub const _PC_ASYNC_IO: ::c_int = 17; pub const _PC_PRIO_IO: ::c_int = 18; pub const _PC_SYNC_IO: ::c_int = 19; pub const FIONBIO: ::c_int = 0x5421; pub const _SC_ARG_MAX: ::c_int = 0; pub const _SC_BC_BASE_MAX: ::c_int = 1; pub const _SC_BC_DIM_MAX: ::c_int = 2; pub const _SC_BC_SCALE_MAX: ::c_int = 3; pub const _SC_BC_STRING_MAX: ::c_int = 4; pub const _SC_CHILD_MAX: ::c_int = 5; pub const _SC_CLK_TCK: ::c_int = 6; pub const _SC_COLL_WEIGHTS_MAX: ::c_int = 7; pub const _SC_EXPR_NEST_MAX: ::c_int = 8; pub const _SC_LINE_MAX: ::c_int = 9; pub const _SC_NGROUPS_MAX: ::c_int = 10; pub const _SC_OPEN_MAX: ::c_int = 11; pub const _SC_PASS_MAX: ::c_int = 12; pub const _SC_2_C_BIND: ::c_int = 13; pub const _SC_2_C_DEV: ::c_int = 14; pub const _SC_2_C_VERSION: ::c_int = 15; pub const _SC_2_CHAR_TERM: ::c_int = 16; pub const _SC_2_FORT_DEV: ::c_int = 17; pub const _SC_2_FORT_RUN: ::c_int = 18; pub const _SC_2_LOCALEDEF: ::c_int = 19; pub const _SC_2_SW_DEV: ::c_int = 20; pub const _SC_2_UPE: ::c_int = 21; pub const _SC_2_VERSION: ::c_int = 22; pub const _SC_JOB_CONTROL: ::c_int = 23; pub const _SC_SAVED_IDS: ::c_int = 24; pub const _SC_VERSION: ::c_int = 25; pub const _SC_RE_DUP_MAX: ::c_int = 26; pub const _SC_STREAM_MAX: ::c_int = 27; pub const _SC_TZNAME_MAX: ::c_int = 28; pub const _SC_XOPEN_CRYPT: ::c_int = 29; pub const _SC_XOPEN_ENH_I18N: ::c_int = 30; pub const _SC_XOPEN_SHM: ::c_int = 31; pub const _SC_XOPEN_VERSION: ::c_int = 32; pub const _SC_XOPEN_XCU_VERSION: ::c_int = 33; pub const _SC_XOPEN_REALTIME: ::c_int = 34; pub const _SC_XOPEN_REALTIME_THREADS: ::c_int = 35; pub const _SC_XOPEN_LEGACY: ::c_int = 36; pub const _SC_ATEXIT_MAX: ::c_int = 37; pub const _SC_IOV_MAX: ::c_int = 38; pub const _SC_PAGESIZE: ::c_int = 39; pub const _SC_PAGE_SIZE: ::c_int = 40; pub const _SC_XOPEN_UNIX: ::c_int = 41; pub const _SC_XBS5_ILP32_OFF32: ::c_int = 42; pub const _SC_XBS5_ILP32_OFFBIG: ::c_int = 43; pub const _SC_XBS5_LP64_OFF64: ::c_int = 44; pub const _SC_XBS5_LPBIG_OFFBIG: ::c_int = 45; pub const _SC_AIO_LISTIO_MAX: ::c_int = 46; pub const _SC_AIO_MAX: ::c_int = 47; pub const _SC_AIO_PRIO_DELTA_MAX: ::c_int = 48; pub const _SC_DELAYTIMER_MAX: ::c_int = 49; pub const _SC_MQ_OPEN_MAX: ::c_int = 50; pub const _SC_MQ_PRIO_MAX: ::c_int = 51; pub const _SC_RTSIG_MAX: ::c_int = 52; pub const _SC_SEM_NSEMS_MAX: ::c_int = 53; pub const _SC_SEM_VALUE_MAX: ::c_int = 54; pub const _SC_SIGQUEUE_MAX: ::c_int = 55; pub const _SC_TIMER_MAX: ::c_int = 56; pub const _SC_ASYNCHRONOUS_IO: ::c_int = 57; pub const _SC_FSYNC: ::c_int = 58; pub const _SC_MAPPED_FILES: ::c_int = 59; pub const _SC_MEMLOCK: ::c_int = 60; pub const _SC_MEMLOCK_RANGE: ::c_int = 61; pub const _SC_MEMORY_PROTECTION: ::c_int = 62; pub const _SC_MESSAGE_PASSING: ::c_int = 63; pub const _SC_PRIORITIZED_IO: ::c_int = 64; pub const _SC_PRIORITY_SCHEDULING: ::c_int = 65; pub const _SC_REALTIME_SIGNALS: ::c_int = 66; pub const _SC_SEMAPHORES: ::c_int = 67; pub const _SC_SHARED_MEMORY_OBJECTS: ::c_int = 68; pub const _SC_SYNCHRONIZED_IO: ::c_int = 69; pub const _SC_TIMERS: ::c_int = 70; pub const _SC_GETGR_R_SIZE_MAX: ::c_int = 71; pub const _SC_GETPW_R_SIZE_MAX: ::c_int = 72; pub const _SC_LOGIN_NAME_MAX: ::c_int = 73; pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: ::c_int = 74; pub const _SC_THREAD_KEYS_MAX: ::c_int = 75; pub const _SC_THREAD_STACK_MIN: ::c_int = 76; pub const _SC_THREAD_THREADS_MAX: ::c_int = 77; pub const _SC_TTY_NAME_MAX: ::c_int = 78; pub const _SC_THREADS: ::c_int = 79; pub const _SC_THREAD_ATTR_STACKADDR: ::c_int = 80; pub const _SC_THREAD_ATTR_STACKSIZE: ::c_int = 81; pub const _SC_THREAD_PRIORITY_SCHEDULING: ::c_int = 82; pub const _SC_THREAD_PRIO_INHERIT: ::c_int = 83; pub const _SC_THREAD_PRIO_PROTECT: ::c_int = 84; pub const _SC_THREAD_SAFE_FUNCTIONS: ::c_int = 85; pub const _SC_NPROCESSORS_CONF: ::c_int = 96; pub const _SC_NPROCESSORS_ONLN: ::c_int = 97; pub const _SC_PHYS_PAGES: ::c_int = 98; pub const _SC_AVPHYS_PAGES: ::c_int = 99; pub const _SC_MONOTONIC_CLOCK: ::c_int = 100; pub const _SC_2_PBS: ::c_int = 101; pub const _SC_2_PBS_ACCOUNTING: ::c_int = 102; pub const _SC_2_PBS_CHECKPOINT: ::c_int = 103; pub const _SC_2_PBS_LOCATE: ::c_int = 104; pub const _SC_2_PBS_MESSAGE: ::c_int = 105; pub const _SC_2_PBS_TRACK: ::c_int = 106; pub const _SC_ADVISORY_INFO: ::c_int = 107; pub const _SC_BARRIERS: ::c_int = 108; pub const _SC_CLOCK_SELECTION: ::c_int = 109; pub const _SC_CPUTIME: ::c_int = 110; pub const _SC_HOST_NAME_MAX: ::c_int = 111; pub const _SC_IPV6: ::c_int = 112; pub const _SC_RAW_SOCKETS: ::c_int = 113; pub const _SC_READER_WRITER_LOCKS: ::c_int = 114; pub const _SC_REGEXP: ::c_int = 115; pub const _SC_SHELL: ::c_int = 116; pub const _SC_SPAWN: ::c_int = 117; pub const _SC_SPIN_LOCKS: ::c_int = 118; pub const _SC_SPORADIC_SERVER: ::c_int = 119; pub const _SC_SS_REPL_MAX: ::c_int = 120; pub const _SC_SYMLOOP_MAX: ::c_int = 121; pub const _SC_THREAD_CPUTIME: ::c_int = 122; pub const _SC_THREAD_PROCESS_SHARED: ::c_int = 123; pub const _SC_THREAD_ROBUST_PRIO_INHERIT: ::c_int = 124; pub const _SC_THREAD_ROBUST_PRIO_PROTECT: ::c_int = 125; pub const _SC_THREAD_SPORADIC_SERVER: ::c_int = 126; pub const _SC_TIMEOUTS: ::c_int = 127; pub const _SC_TRACE: ::c_int = 128; pub const _SC_TRACE_EVENT_FILTER: ::c_int = 129; pub const _SC_TRACE_EVENT_NAME_MAX: ::c_int = 130; pub const _SC_TRACE_INHERIT: ::c_int = 131; pub const _SC_TRACE_LOG: ::c_int = 132; pub const _SC_TRACE_NAME_MAX: ::c_int = 133; pub const _SC_TRACE_SYS_MAX: ::c_int = 134; pub const _SC_TRACE_USER_EVENT_MAX: ::c_int = 135; pub const _SC_TYPED_MEMORY_OBJECTS: ::c_int = 136; pub const _SC_V7_ILP32_OFF32: ::c_int = 137; pub const _SC_V7_ILP32_OFFBIG: ::c_int = 138; pub const _SC_V7_LP64_OFF64: ::c_int = 139; pub const _SC_V7_LPBIG_OFFBIG: ::c_int = 140; pub const _SC_XOPEN_STREAMS: ::c_int = 141; pub const _SC_XOPEN_UUCP: ::c_int = 142; pub const PTHREAD_MUTEX_NORMAL: ::c_int = 0; pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 1; pub const PTHREAD_MUTEX_ERRORCHECK: ::c_int = 2; pub const PTHREAD_MUTEX_DEFAULT: ::c_int = PTHREAD_MUTEX_NORMAL; pub const FIOCLEX: ::c_int = 0x5451; pub const FIONCLEX: ::c_int = 0x5450; pub const SIGCHLD: ::c_int = 17; pub const SIGBUS: ::c_int = 7; pub const SIGUSR1: ::c_int = 10; pub const SIGUSR2: ::c_int = 12; pub const SIGCONT: ::c_int = 18; pub const SIGSTOP: ::c_int = 19; pub const SIGTSTP: ::c_int = 20; pub const SIGURG: ::c_int = 23; pub const SIGIO: ::c_int = 29; pub const SIGSYS: ::c_int = 31; pub const SIGSTKFLT: ::c_int = 16; #[deprecated(since = "0.2.55", note = "Use SIGSYS instead")] pub const SIGUNUSED: ::c_int = 31; pub const SIGTTIN: ::c_int = 21; pub const SIGTTOU: ::c_int = 22; pub const SIGXCPU: ::c_int = 24; pub const SIGXFSZ: ::c_int = 25; pub const SIGVTALRM: ::c_int = 26; pub const SIGPROF: ::c_int = 27; pub const SIGWINCH: ::c_int = 28; pub const SIGPOLL: ::c_int = 29; pub const SIGPWR: ::c_int = 30; pub const SIG_SETMASK: ::c_int = 2; pub const SIG_BLOCK: ::c_int = 0x000000; pub const SIG_UNBLOCK: ::c_int = 0x01; pub const RUSAGE_CHILDREN: ::c_int = -1; pub const LC_PAPER: ::c_int = 7; pub const LC_NAME: ::c_int = 8; pub const LC_ADDRESS: ::c_int = 9; pub const LC_TELEPHONE: ::c_int = 10; pub const LC_MEASUREMENT: ::c_int = 11; pub const LC_IDENTIFICATION: ::c_int = 12; pub const LC_PAPER_MASK: ::c_int = (1 << LC_PAPER); pub const LC_NAME_MASK: ::c_int = (1 << LC_NAME); pub const LC_ADDRESS_MASK: ::c_int = (1 << LC_ADDRESS); pub const LC_TELEPHONE_MASK: ::c_int = (1 << LC_TELEPHONE); pub const LC_MEASUREMENT_MASK: ::c_int = (1 << LC_MEASUREMENT); pub const LC_IDENTIFICATION_MASK: ::c_int = (1 << LC_IDENTIFICATION); pub const LC_ALL_MASK: ::c_int = ::LC_CTYPE_MASK | ::LC_NUMERIC_MASK | ::LC_TIME_MASK | ::LC_COLLATE_MASK | ::LC_MONETARY_MASK | ::LC_MESSAGES_MASK | LC_PAPER_MASK | LC_NAME_MASK | LC_ADDRESS_MASK | LC_TELEPHONE_MASK | LC_MEASUREMENT_MASK | LC_IDENTIFICATION_MASK; pub const MAP_ANON: ::c_int = 0x0020; pub const MAP_ANONYMOUS: ::c_int = 0x0020; pub const MAP_GROWSDOWN: ::c_int = 0x0100; pub const MAP_DENYWRITE: ::c_int = 0x0800; pub const MAP_EXECUTABLE: ::c_int = 0x01000; pub const MAP_LOCKED: ::c_int = 0x02000; pub const MAP_NORESERVE: ::c_int = 0x04000; pub const MAP_POPULATE: ::c_int = 0x08000; pub const MAP_NONBLOCK: ::c_int = 0x010000; pub const MAP_STACK: ::c_int = 0x020000; pub const EDEADLK: ::c_int = 35; pub const ENAMETOOLONG: ::c_int = 36; pub const ENOLCK: ::c_int = 37; pub const ENOSYS: ::c_int = 38; pub const ENOTEMPTY: ::c_int = 39; pub const ELOOP: ::c_int = 40; pub const ENOMSG: ::c_int = 42; pub const EIDRM: ::c_int = 43; pub const ECHRNG: ::c_int = 44; pub const EL2NSYNC: ::c_int = 45; pub const EL3HLT: ::c_int = 46; pub const EL3RST: ::c_int = 47; pub const ELNRNG: ::c_int = 48; pub const EUNATCH: ::c_int = 49; pub const ENOCSI: ::c_int = 50; pub const EL2HLT: ::c_int = 51; pub const EBADE: ::c_int = 52; pub const EBADR: ::c_int = 53; pub const EXFULL: ::c_int = 54; pub const ENOANO: ::c_int = 55; pub const EBADRQC: ::c_int = 56; pub const EBADSLT: ::c_int = 57; pub const EMULTIHOP: ::c_int = 72; pub const EBADMSG: ::c_int = 74; pub const EOVERFLOW: ::c_int = 75; pub const ENOTUNIQ: ::c_int = 76; pub const EBADFD: ::c_int = 77; pub const EREMCHG: ::c_int = 78; pub const ELIBACC: ::c_int = 79; pub const ELIBBAD: ::c_int = 80; pub const ELIBSCN: ::c_int = 81; pub const ELIBMAX: ::c_int = 82; pub const ELIBEXEC: ::c_int = 83; pub const EILSEQ: ::c_int = 84; pub const ERESTART: ::c_int = 85; pub const ESTRPIPE: ::c_int = 86; pub const EUSERS: ::c_int = 87; pub const ENOTSOCK: ::c_int = 88; pub const EDESTADDRREQ: ::c_int = 89; pub const EMSGSIZE: ::c_int = 90; pub const EPROTOTYPE: ::c_int = 91; pub const ENOPROTOOPT: ::c_int = 92; pub const EPROTONOSUPPORT: ::c_int = 93; pub const ESOCKTNOSUPPORT: ::c_int = 94; pub const EOPNOTSUPP: ::c_int = 95; pub const ENOTSUP: ::c_int = EOPNOTSUPP; pub const EPFNOSUPPORT: ::c_int = 96; pub const EAFNOSUPPORT: ::c_int = 97; pub const EADDRINUSE: ::c_int = 98; pub const EADDRNOTAVAIL: ::c_int = 99; pub const ENETDOWN: ::c_int = 100; pub const ENETUNREACH: ::c_int = 101; pub const ENETRESET: ::c_int = 102; pub const ECONNABORTED: ::c_int = 103; pub const ECONNRESET: ::c_int = 104; pub const ENOBUFS: ::c_int = 105; pub const EISCONN: ::c_int = 106; pub const ENOTCONN: ::c_int = 107; pub const ESHUTDOWN: ::c_int = 108; pub const ETOOMANYREFS: ::c_int = 109; pub const ETIMEDOUT: ::c_int = 110; pub const ECONNREFUSED: ::c_int = 111; pub const EHOSTDOWN: ::c_int = 112; pub const EHOSTUNREACH: ::c_int = 113; pub const EALREADY: ::c_int = 114; pub const EINPROGRESS: ::c_int = 115; pub const ESTALE: ::c_int = 116; pub const EUCLEAN: ::c_int = 117; pub const ENOTNAM: ::c_int = 118; pub const ENAVAIL: ::c_int = 119; pub const EISNAM: ::c_int = 120; pub const EREMOTEIO: ::c_int = 121; pub const EDQUOT: ::c_int = 122; pub const ENOMEDIUM: ::c_int = 123; pub const EMEDIUMTYPE: ::c_int = 124; pub const ECANCELED: ::c_int = 125; pub const ENOKEY: ::c_int = 126; pub const EKEYEXPIRED: ::c_int = 127; pub const EKEYREVOKED: ::c_int = 128; pub const EKEYREJECTED: ::c_int = 129; pub const EOWNERDEAD: ::c_int = 130; pub const ENOTRECOVERABLE: ::c_int = 131; pub const SOCK_STREAM: ::c_int = 1; pub const SOCK_DGRAM: ::c_int = 2; pub const SOCK_SEQPACKET: ::c_int = 5; pub const SOCK_DCCP: ::c_int = 6; pub const SOCK_PACKET: ::c_int = 10; pub const SOL_SOCKET: ::c_int = 1; pub const SOL_SCTP: ::c_int = 132; pub const SOL_IPX: ::c_int = 256; pub const SOL_AX25: ::c_int = 257; pub const SOL_ATALK: ::c_int = 258; pub const SOL_NETROM: ::c_int = 259; pub const SOL_ROSE: ::c_int = 260; /* DCCP socket options */ pub const DCCP_SOCKOPT_PACKET_SIZE: ::c_int = 1; pub const DCCP_SOCKOPT_SERVICE: ::c_int = 2; pub const DCCP_SOCKOPT_CHANGE_L: ::c_int = 3; pub const DCCP_SOCKOPT_CHANGE_R: ::c_int = 4; pub const DCCP_SOCKOPT_GET_CUR_MPS: ::c_int = 5; pub const DCCP_SOCKOPT_SERVER_TIMEWAIT: ::c_int = 6; pub const DCCP_SOCKOPT_SEND_CSCOV: ::c_int = 10; pub const DCCP_SOCKOPT_RECV_CSCOV: ::c_int = 11; pub const DCCP_SOCKOPT_AVAILABLE_CCIDS: ::c_int = 12; pub const DCCP_SOCKOPT_CCID: ::c_int = 13; pub const DCCP_SOCKOPT_TX_CCID: ::c_int = 14; pub const DCCP_SOCKOPT_RX_CCID: ::c_int = 15; pub const DCCP_SOCKOPT_QPOLICY_ID: ::c_int = 16; pub const DCCP_SOCKOPT_QPOLICY_TXQLEN: ::c_int = 17; pub const DCCP_SOCKOPT_CCID_RX_INFO: ::c_int = 128; pub const DCCP_SOCKOPT_CCID_TX_INFO: ::c_int = 192; /// maximum number of services provided on the same listening port pub const DCCP_SERVICE_LIST_MAX_LEN: ::c_int = 32; pub const SO_REUSEADDR: ::c_int = 2; pub const SO_TYPE: ::c_int = 3; pub const SO_ERROR: ::c_int = 4; pub const SO_DONTROUTE: ::c_int = 5; pub const SO_BROADCAST: ::c_int = 6; pub const SO_SNDBUF: ::c_int = 7; pub const SO_RCVBUF: ::c_int = 8; pub const SO_KEEPALIVE: ::c_int = 9; pub const SO_OOBINLINE: ::c_int = 10; pub const SO_PRIORITY: ::c_int = 12; pub const SO_LINGER: ::c_int = 13; pub const SO_BSDCOMPAT: ::c_int = 14; pub const SO_REUSEPORT: ::c_int = 15; pub const SO_PASSCRED: ::c_int = 16; pub const SO_PEERCRED: ::c_int = 17; pub const SO_RCVLOWAT: ::c_int = 18; pub const SO_SNDLOWAT: ::c_int = 19; pub const SO_RCVTIMEO: ::c_int = 20; pub const SO_SNDTIMEO: ::c_int = 21; pub const SO_BINDTODEVICE: ::c_int = 25; pub const SO_TIMESTAMP: ::c_int = 29; pub const SO_ACCEPTCONN: ::c_int = 30; pub const SO_SNDBUFFORCE: ::c_int = 32; pub const SO_RCVBUFFORCE: ::c_int = 33; pub const SO_MARK: ::c_int = 36; pub const SO_PROTOCOL: ::c_int = 38; pub const SO_DOMAIN: ::c_int = 39; pub const SO_RXQ_OVFL: ::c_int = 40; pub const SO_PEEK_OFF: ::c_int = 42; pub const SO_BUSY_POLL: ::c_int = 46; pub const IPTOS_ECN_NOTECT: u8 = 0x00; pub const O_ACCMODE: ::c_int = 3; pub const O_APPEND: ::c_int = 1024; pub const O_CREAT: ::c_int = 64; pub const O_EXCL: ::c_int = 128; pub const O_NOCTTY: ::c_int = 256; pub const O_NONBLOCK: ::c_int = 2048; pub const O_SYNC: ::c_int = 0x101000; pub const O_ASYNC: ::c_int = 0x2000; pub const O_NDELAY: ::c_int = 0x800; pub const O_DSYNC: ::c_int = 4096; pub const NI_MAXHOST: ::size_t = 1025; pub const NCCS: usize = 19; pub const TCSBRKP: ::c_int = 0x5425; pub const TCSANOW: ::c_int = 0; pub const TCSADRAIN: ::c_int = 0x1; pub const TCSAFLUSH: ::c_int = 0x2; pub const VEOF: usize = 4; pub const VEOL: usize = 11; pub const VEOL2: usize = 16; pub const VMIN: usize = 6; pub const IEXTEN: ::tcflag_t = 0x00008000; pub const TOSTOP: ::tcflag_t = 0x00000100; pub const FLUSHO: ::tcflag_t = 0x00001000; pub const EXTPROC: ::tcflag_t = 0o200000; pub const ADFS_SUPER_MAGIC: ::c_long = 0x0000adf5; pub const AFFS_SUPER_MAGIC: ::c_long = 0x0000adff; pub const CODA_SUPER_MAGIC: ::c_long = 0x73757245; pub const CRAMFS_MAGIC: ::c_long = 0x28cd3d45; pub const EFS_SUPER_MAGIC: ::c_long = 0x00414a53; pub const EXT2_SUPER_MAGIC: ::c_long = 0x0000ef53; pub const EXT3_SUPER_MAGIC: ::c_long = 0x0000ef53; pub const EXT4_SUPER_MAGIC: ::c_long = 0x0000ef53; pub const HPFS_SUPER_MAGIC: ::c_long = 0xf995e849; pub const HUGETLBFS_MAGIC: ::c_long = 0x958458f6; pub const ISOFS_SUPER_MAGIC: ::c_long = 0x00009660; pub const JFFS2_SUPER_MAGIC: ::c_long = 0x000072b6; pub const MINIX_SUPER_MAGIC: ::c_long = 0x0000137f; pub const MINIX_SUPER_MAGIC2: ::c_long = 0x0000138f; pub const MINIX2_SUPER_MAGIC: ::c_long = 0x00002468; pub const MINIX2_SUPER_MAGIC2: ::c_long = 0x00002478; pub const MSDOS_SUPER_MAGIC: ::c_long = 0x00004d44; pub const NCP_SUPER_MAGIC: ::c_long = 0x0000564c; pub const NFS_SUPER_MAGIC: ::c_long = 0x00006969; pub const OPENPROM_SUPER_MAGIC: ::c_long = 0x00009fa1; pub const PROC_SUPER_MAGIC: ::c_long = 0x00009fa0; pub const QNX4_SUPER_MAGIC: ::c_long = 0x0000002f; pub const REISERFS_SUPER_MAGIC: ::c_long = 0x52654973; pub const SMB_SUPER_MAGIC: ::c_long = 0x0000517b; pub const TMPFS_MAGIC: ::c_long = 0x01021994; pub const USBDEVICE_SUPER_MAGIC: ::c_long = 0x00009fa2; pub const MAP_HUGETLB: ::c_int = 0x040000; pub const PTRACE_TRACEME: ::c_int = 0; pub const PTRACE_PEEKTEXT: ::c_int = 1; pub const PTRACE_PEEKDATA: ::c_int = 2; pub const PTRACE_PEEKUSER: ::c_int = 3; pub const PTRACE_POKETEXT: ::c_int = 4; pub const PTRACE_POKEDATA: ::c_int = 5; pub const PTRACE_POKEUSER: ::c_int = 6; pub const PTRACE_CONT: ::c_int = 7; pub const PTRACE_KILL: ::c_int = 8; pub const PTRACE_SINGLESTEP: ::c_int = 9; pub const PTRACE_ATTACH: ::c_int = 16; pub const PTRACE_DETACH: ::c_int = 17; pub const PTRACE_SYSCALL: ::c_int = 24; pub const PTRACE_SETOPTIONS: ::c_int = 0x4200; pub const PTRACE_GETEVENTMSG: ::c_int = 0x4201; pub const PTRACE_GETSIGINFO: ::c_int = 0x4202; pub const PTRACE_SETSIGINFO: ::c_int = 0x4203; pub const EFD_NONBLOCK: ::c_int = 0x800; pub const F_GETLK: ::c_int = 5; pub const F_GETOWN: ::c_int = 9; pub const F_SETOWN: ::c_int = 8; pub const F_SETLK: ::c_int = 6; pub const F_SETLKW: ::c_int = 7; pub const F_RDLCK: ::c_int = 0; pub const F_WRLCK: ::c_int = 1; pub const F_UNLCK: ::c_int = 2; pub const RLIMIT_CPU: ::c_int = 0; pub const RLIMIT_FSIZE: ::c_int = 1; pub const RLIMIT_DATA: ::c_int = 2; pub const RLIMIT_STACK: ::c_int = 3; pub const RLIMIT_CORE: ::c_int = 4; pub const RLIMIT_LOCKS: ::c_int = 10; pub const RLIMIT_SIGPENDING: ::c_int = 11; pub const RLIMIT_MSGQUEUE: ::c_int = 12; pub const RLIMIT_NICE: ::c_int = 13; pub const RLIMIT_RTPRIO: ::c_int = 14; pub const RLIM_INFINITY: ::rlim_t = !0; pub const TCGETS: ::c_int = 0x5401; pub const TCSETS: ::c_int = 0x5402; pub const TCSETSW: ::c_int = 0x5403; pub const TCSETSF: ::c_int = 0x5404; pub const TCGETA: ::c_int = 0x5405; pub const TCSETA: ::c_int = 0x5406; pub const TCSETAW: ::c_int = 0x5407; pub const TCSETAF: ::c_int = 0x5408; pub const TCSBRK: ::c_int = 0x5409; pub const TCXONC: ::c_int = 0x540A; pub const TCFLSH: ::c_int = 0x540B; pub const TIOCGSOFTCAR: ::c_int = 0x5419; pub const TIOCSSOFTCAR: ::c_int = 0x541A; pub const TIOCINQ: ::c_int = 0x541B; pub const TIOCLINUX: ::c_int = 0x541C; pub const TIOCGSERIAL: ::c_int = 0x541E; pub const TIOCEXCL: ::c_int = 0x540C; pub const TIOCNXCL: ::c_int = 0x540D; pub const TIOCSCTTY: ::c_int = 0x540E; pub const TIOCGPGRP: ::c_int = 0x540F; pub const TIOCSPGRP: ::c_int = 0x5410; pub const TIOCOUTQ: ::c_int = 0x5411; pub const TIOCSTI: ::c_int = 0x5412; pub const TIOCGWINSZ: ::c_int = 0x5413; pub const TIOCSWINSZ: ::c_int = 0x5414; pub const TIOCMGET: ::c_int = 0x5415; pub const TIOCMBIS: ::c_int = 0x5416; pub const TIOCMBIC: ::c_int = 0x5417; pub const TIOCMSET: ::c_int = 0x5418; pub const FIONREAD: ::c_int = 0x541B; pub const TIOCCONS: ::c_int = 0x541D; pub const ST_RDONLY: ::c_ulong = 1; pub const ST_NOSUID: ::c_ulong = 2; pub const ST_NODEV: ::c_ulong = 4; pub const ST_NOEXEC: ::c_ulong = 8; pub const ST_SYNCHRONOUS: ::c_ulong = 16; pub const ST_MANDLOCK: ::c_ulong = 64; pub const ST_NOATIME: ::c_ulong = 1024; pub const ST_NODIRATIME: ::c_ulong = 2048; pub const ST_RELATIME: ::c_ulong = 4096; pub const RTLD_NOLOAD: ::c_int = 0x4; pub const SEM_FAILED: *mut sem_t = 0 as *mut sem_t; pub const AI_PASSIVE: ::c_int = 0x00000001; pub const AI_CANONNAME: ::c_int = 0x00000002; pub const AI_NUMERICHOST: ::c_int = 0x00000004; pub const AI_NUMERICSERV: ::c_int = 0x00000008; pub const AI_MASK: ::c_int = AI_PASSIVE | AI_CANONNAME | AI_NUMERICHOST | AI_NUMERICSERV | AI_ADDRCONFIG; pub const AI_ALL: ::c_int = 0x00000100; pub const AI_V4MAPPED_CFG: ::c_int = 0x00000200; pub const AI_ADDRCONFIG: ::c_int = 0x00000400; pub const AI_V4MAPPED: ::c_int = 0x00000800; pub const AI_DEFAULT: ::c_int = AI_V4MAPPED_CFG | AI_ADDRCONFIG; pub const LINUX_REBOOT_MAGIC1: ::c_int = 0xfee1dead; pub const LINUX_REBOOT_MAGIC2: ::c_int = 672274793; pub const LINUX_REBOOT_MAGIC2A: ::c_int = 85072278; pub const LINUX_REBOOT_MAGIC2B: ::c_int = 369367448; pub const LINUX_REBOOT_MAGIC2C: ::c_int = 537993216; pub const LINUX_REBOOT_CMD_RESTART: ::c_int = 0x01234567; pub const LINUX_REBOOT_CMD_HALT: ::c_int = 0xCDEF0123; pub const LINUX_REBOOT_CMD_CAD_ON: ::c_int = 0x89ABCDEF; pub const LINUX_REBOOT_CMD_CAD_OFF: ::c_int = 0x00000000; pub const LINUX_REBOOT_CMD_POWER_OFF: ::c_int = 0x4321FEDC; pub const LINUX_REBOOT_CMD_RESTART2: ::c_int = 0xA1B2C3D4; pub const LINUX_REBOOT_CMD_SW_SUSPEND: ::c_int = 0xD000FCE2; pub const LINUX_REBOOT_CMD_KEXEC: ::c_int = 0x45584543; pub const MCL_CURRENT: ::c_int = 0x0001; pub const MCL_FUTURE: ::c_int = 0x0002; pub const CBAUD: ::tcflag_t = 0o0010017; pub const TAB1: ::tcflag_t = 0x00000800; pub const TAB2: ::tcflag_t = 0x00001000; pub const TAB3: ::tcflag_t = 0x00001800; pub const CR1: ::tcflag_t = 0x00000200; pub const CR2: ::tcflag_t = 0x00000400; pub const CR3: ::tcflag_t = 0x00000600; pub const FF1: ::tcflag_t = 0x00008000; pub const BS1: ::tcflag_t = 0x00002000; pub const VT1: ::tcflag_t = 0x00004000; pub const VWERASE: usize = 14; pub const VREPRINT: usize = 12; pub const VSUSP: usize = 10; pub const VSTART: usize = 8; pub const VSTOP: usize = 9; pub const VDISCARD: usize = 13; pub const VTIME: usize = 5; pub const IXON: ::tcflag_t = 0x00000400; pub const IXOFF: ::tcflag_t = 0x00001000; pub const ONLCR: ::tcflag_t = 0x4; pub const CSIZE: ::tcflag_t = 0x00000030; pub const CS6: ::tcflag_t = 0x00000010; pub const CS7: ::tcflag_t = 0x00000020; pub const CS8: ::tcflag_t = 0x00000030; pub const CSTOPB: ::tcflag_t = 0x00000040; pub const CREAD: ::tcflag_t = 0x00000080; pub const PARENB: ::tcflag_t = 0x00000100; pub const PARODD: ::tcflag_t = 0x00000200; pub const HUPCL: ::tcflag_t = 0x00000400; pub const CLOCAL: ::tcflag_t = 0x00000800; pub const ECHOKE: ::tcflag_t = 0x00000800; pub const ECHOE: ::tcflag_t = 0x00000010; pub const ECHOK: ::tcflag_t = 0x00000020; pub const ECHONL: ::tcflag_t = 0x00000040; pub const ECHOPRT: ::tcflag_t = 0x00000400; pub const ECHOCTL: ::tcflag_t = 0x00000200; pub const ISIG: ::tcflag_t = 0x00000001; pub const ICANON: ::tcflag_t = 0x00000002; pub const PENDIN: ::tcflag_t = 0x00004000; pub const NOFLSH: ::tcflag_t = 0x00000080; pub const VSWTC: usize = 7; pub const OLCUC: ::tcflag_t = 0o000002; pub const NLDLY: ::tcflag_t = 0o000400; pub const CRDLY: ::tcflag_t = 0o003000; pub const TABDLY: ::tcflag_t = 0o014000; pub const BSDLY: ::tcflag_t = 0o020000; pub const FFDLY: ::tcflag_t = 0o100000; pub const VTDLY: ::tcflag_t = 0o040000; pub const XTABS: ::tcflag_t = 0o014000; pub const B0: ::speed_t = 0o000000; pub const B50: ::speed_t = 0o000001; pub const B75: ::speed_t = 0o000002; pub const B110: ::speed_t = 0o000003; pub const B134: ::speed_t = 0o000004; pub const B150: ::speed_t = 0o000005; pub const B200: ::speed_t = 0o000006; pub const B300: ::speed_t = 0o000007; pub const B600: ::speed_t = 0o000010; pub const B1200: ::speed_t = 0o000011; pub const B1800: ::speed_t = 0o000012; pub const B2400: ::speed_t = 0o000013; pub const B4800: ::speed_t = 0o000014; pub const B9600: ::speed_t = 0o000015; pub const B19200: ::speed_t = 0o000016; pub const B38400: ::speed_t = 0o000017; pub const EXTA: ::speed_t = B19200; pub const EXTB: ::speed_t = B38400; pub const BOTHER: ::speed_t = 0o010000; pub const B57600: ::speed_t = 0o010001; pub const B115200: ::speed_t = 0o010002; pub const B230400: ::speed_t = 0o010003; pub const B460800: ::speed_t = 0o010004; pub const B500000: ::speed_t = 0o010005; pub const B576000: ::speed_t = 0o010006; pub const B921600: ::speed_t = 0o010007; pub const B1000000: ::speed_t = 0o010010; pub const B1152000: ::speed_t = 0o010011; pub const B1500000: ::speed_t = 0o010012; pub const B2000000: ::speed_t = 0o010013; pub const B2500000: ::speed_t = 0o010014; pub const B3000000: ::speed_t = 0o010015; pub const B3500000: ::speed_t = 0o010016; pub const B4000000: ::speed_t = 0o010017; pub const EAI_AGAIN: ::c_int = 2; pub const EAI_BADFLAGS: ::c_int = 3; pub const EAI_FAIL: ::c_int = 4; pub const EAI_FAMILY: ::c_int = 5; pub const EAI_MEMORY: ::c_int = 6; pub const EAI_NODATA: ::c_int = 7; pub const EAI_NONAME: ::c_int = 8; pub const EAI_SERVICE: ::c_int = 9; pub const EAI_SOCKTYPE: ::c_int = 10; pub const EAI_SYSTEM: ::c_int = 11; pub const EAI_OVERFLOW: ::c_int = 14; pub const NETLINK_ROUTE: ::c_int = 0; pub const NETLINK_UNUSED: ::c_int = 1; pub const NETLINK_USERSOCK: ::c_int = 2; pub const NETLINK_FIREWALL: ::c_int = 3; pub const NETLINK_SOCK_DIAG: ::c_int = 4; pub const NETLINK_NFLOG: ::c_int = 5; pub const NETLINK_XFRM: ::c_int = 6; pub const NETLINK_SELINUX: ::c_int = 7; pub const NETLINK_ISCSI: ::c_int = 8; pub const NETLINK_AUDIT: ::c_int = 9; pub const NETLINK_FIB_LOOKUP: ::c_int = 10; pub const NETLINK_CONNECTOR: ::c_int = 11; pub const NETLINK_NETFILTER: ::c_int = 12; pub const NETLINK_IP6_FW: ::c_int = 13; pub const NETLINK_DNRTMSG: ::c_int = 14; pub const NETLINK_KOBJECT_UEVENT: ::c_int = 15; pub const NETLINK_GENERIC: ::c_int = 16; pub const NETLINK_SCSITRANSPORT: ::c_int = 18; pub const NETLINK_ECRYPTFS: ::c_int = 19; pub const NETLINK_RDMA: ::c_int = 20; pub const NETLINK_CRYPTO: ::c_int = 21; pub const NETLINK_INET_DIAG: ::c_int = NETLINK_SOCK_DIAG; pub const MAX_LINKS: ::c_int = 32; pub const NLM_F_REQUEST: ::c_int = 1; pub const NLM_F_MULTI: ::c_int = 2; pub const NLM_F_ACK: ::c_int = 4; pub const NLM_F_ECHO: ::c_int = 8; pub const NLM_F_DUMP_INTR: ::c_int = 16; pub const NLM_F_ROOT: ::c_int = 0x100; pub const NLM_F_MATCH: ::c_int = 0x200; pub const NLM_F_ATOMIC: ::c_int = 0x400; pub const NLM_F_DUMP: ::c_int = NLM_F_ROOT | NLM_F_MATCH; pub const NLM_F_REPLACE: ::c_int = 0x100; pub const NLM_F_EXCL: ::c_int = 0x200; pub const NLM_F_CREATE: ::c_int = 0x400; pub const NLM_F_APPEND: ::c_int = 0x800; pub const NLMSG_NOOP: ::c_int = 0x1; pub const NLMSG_ERROR: ::c_int = 0x2; pub const NLMSG_DONE: ::c_int = 0x3; pub const NLMSG_OVERRUN: ::c_int = 0x4; pub const NLMSG_MIN_TYPE: ::c_int = 0x10; // linux/netfilter/nfnetlink.h pub const NFNLGRP_NONE: ::c_int = 0; pub const NFNLGRP_CONNTRACK_NEW: ::c_int = 1; pub const NFNLGRP_CONNTRACK_UPDATE: ::c_int = 2; pub const NFNLGRP_CONNTRACK_DESTROY: ::c_int = 3; pub const NFNLGRP_CONNTRACK_EXP_NEW: ::c_int = 4; pub const NFNLGRP_CONNTRACK_EXP_UPDATE: ::c_int = 5; pub const NFNLGRP_CONNTRACK_EXP_DESTROY: ::c_int = 6; pub const NFNLGRP_NFTABLES: ::c_int = 7; pub const NFNLGRP_ACCT_QUOTA: ::c_int = 8; pub const NFNETLINK_V0: ::c_int = 0; pub const NFNL_SUBSYS_NONE: ::c_int = 0; pub const NFNL_SUBSYS_CTNETLINK: ::c_int = 1; pub const NFNL_SUBSYS_CTNETLINK_EXP: ::c_int = 2; pub const NFNL_SUBSYS_QUEUE: ::c_int = 3; pub const NFNL_SUBSYS_ULOG: ::c_int = 4; pub const NFNL_SUBSYS_OSF: ::c_int = 5; pub const NFNL_SUBSYS_IPSET: ::c_int = 6; pub const NFNL_SUBSYS_ACCT: ::c_int = 7; pub const NFNL_SUBSYS_CTNETLINK_TIMEOUT: ::c_int = 8; pub const NFNL_SUBSYS_CTHELPER: ::c_int = 9; pub const NFNL_SUBSYS_NFTABLES: ::c_int = 10; pub const NFNL_SUBSYS_NFT_COMPAT: ::c_int = 11; pub const NFNL_SUBSYS_COUNT: ::c_int = 12; pub const NFNL_MSG_BATCH_BEGIN: ::c_int = NLMSG_MIN_TYPE; pub const NFNL_MSG_BATCH_END: ::c_int = NLMSG_MIN_TYPE + 1; // linux/netfilter/nfnetlink_log.h pub const NFULNL_MSG_PACKET: ::c_int = 0; pub const NFULNL_MSG_CONFIG: ::c_int = 1; pub const NFULA_UNSPEC: ::c_int = 0; pub const NFULA_PACKET_HDR: ::c_int = 1; pub const NFULA_MARK: ::c_int = 2; pub const NFULA_TIMESTAMP: ::c_int = 3; pub const NFULA_IFINDEX_INDEV: ::c_int = 4; pub const NFULA_IFINDEX_OUTDEV: ::c_int = 5; pub const NFULA_IFINDEX_PHYSINDEV: ::c_int = 6; pub const NFULA_IFINDEX_PHYSOUTDEV: ::c_int = 7; pub const NFULA_HWADDR: ::c_int = 8; pub const NFULA_PAYLOAD: ::c_int = 9; pub const NFULA_PREFIX: ::c_int = 10; pub const NFULA_UID: ::c_int = 11; pub const NFULA_SEQ: ::c_int = 12; pub const NFULA_SEQ_GLOBAL: ::c_int = 13; pub const NFULA_GID: ::c_int = 14; pub const NFULA_HWTYPE: ::c_int = 15; pub const NFULA_HWHEADER: ::c_int = 16; pub const NFULA_HWLEN: ::c_int = 17; pub const NFULA_CT: ::c_int = 18; pub const NFULA_CT_INFO: ::c_int = 19; pub const NFULNL_CFG_CMD_NONE: ::c_int = 0; pub const NFULNL_CFG_CMD_BIND: ::c_int = 1; pub const NFULNL_CFG_CMD_UNBIND: ::c_int = 2; pub const NFULNL_CFG_CMD_PF_BIND: ::c_int = 3; pub const NFULNL_CFG_CMD_PF_UNBIND: ::c_int = 4; pub const NFULA_CFG_UNSPEC: ::c_int = 0; pub const NFULA_CFG_CMD: ::c_int = 1; pub const NFULA_CFG_MODE: ::c_int = 2; pub const NFULA_CFG_NLBUFSIZ: ::c_int = 3; pub const NFULA_CFG_TIMEOUT: ::c_int = 4; pub const NFULA_CFG_QTHRESH: ::c_int = 5; pub const NFULA_CFG_FLAGS: ::c_int = 6; pub const NFULNL_COPY_NONE: ::c_int = 0x00; pub const NFULNL_COPY_META: ::c_int = 0x01; pub const NFULNL_COPY_PACKET: ::c_int = 0x02; pub const NFULNL_CFG_F_SEQ: ::c_int = 0x0001; pub const NFULNL_CFG_F_SEQ_GLOBAL: ::c_int = 0x0002; pub const NFULNL_CFG_F_CONNTRACK: ::c_int = 0x0004; pub const GENL_NAMSIZ: ::c_int = 16; pub const GENL_MIN_ID: ::c_int = NLMSG_MIN_TYPE; pub const GENL_MAX_ID: ::c_int = 1023; pub const GENL_ADMIN_PERM: ::c_int = 0x01; pub const GENL_CMD_CAP_DO: ::c_int = 0x02; pub const GENL_CMD_CAP_DUMP: ::c_int = 0x04; pub const GENL_CMD_CAP_HASPOL: ::c_int = 0x08; pub const GENL_UNS_ADMIN_PERM: ::c_int = 0x10; pub const GENL_ID_CTRL: ::c_int = NLMSG_MIN_TYPE; pub const GENL_ID_VFS_DQUOT: ::c_int = NLMSG_MIN_TYPE + 1; pub const GENL_ID_PMCRAID: ::c_int = NLMSG_MIN_TYPE + 2; pub const CTRL_CMD_UNSPEC: ::c_int = 0; pub const CTRL_CMD_NEWFAMILY: ::c_int = 1; pub const CTRL_CMD_DELFAMILY: ::c_int = 2; pub const CTRL_CMD_GETFAMILY: ::c_int = 3; pub const CTRL_CMD_NEWOPS: ::c_int = 4; pub const CTRL_CMD_DELOPS: ::c_int = 5; pub const CTRL_CMD_GETOPS: ::c_int = 6; pub const CTRL_CMD_NEWMCAST_GRP: ::c_int = 7; pub const CTRL_CMD_DELMCAST_GRP: ::c_int = 8; pub const CTRL_CMD_GETMCAST_GRP: ::c_int = 9; pub const CTRL_ATTR_UNSPEC: ::c_int = 0; pub const CTRL_ATTR_FAMILY_ID: ::c_int = 1; pub const CTRL_ATTR_FAMILY_NAME: ::c_int = 2; pub const CTRL_ATTR_VERSION: ::c_int = 3; pub const CTRL_ATTR_HDRSIZE: ::c_int = 4; pub const CTRL_ATTR_MAXATTR: ::c_int = 5; pub const CTRL_ATTR_OPS: ::c_int = 6; pub const CTRL_ATTR_MCAST_GROUPS: ::c_int = 7; pub const CTRL_ATTR_OP_UNSPEC: ::c_int = 0; pub const CTRL_ATTR_OP_ID: ::c_int = 1; pub const CTRL_ATTR_OP_FLAGS: ::c_int = 2; pub const CTRL_ATTR_MCAST_GRP_UNSPEC: ::c_int = 0; pub const CTRL_ATTR_MCAST_GRP_NAME: ::c_int = 1; pub const CTRL_ATTR_MCAST_GRP_ID: ::c_int = 2; pub const NETLINK_ADD_MEMBERSHIP: ::c_int = 1; pub const NETLINK_DROP_MEMBERSHIP: ::c_int = 2; pub const NETLINK_PKTINFO: ::c_int = 3; pub const NETLINK_BROADCAST_ERROR: ::c_int = 4; pub const NETLINK_NO_ENOBUFS: ::c_int = 5; pub const NETLINK_RX_RING: ::c_int = 6; pub const NETLINK_TX_RING: ::c_int = 7; pub const GRND_NONBLOCK: ::c_uint = 0x0001; pub const GRND_RANDOM: ::c_uint = 0x0002; pub const SECCOMP_MODE_DISABLED: ::c_uint = 0; pub const SECCOMP_MODE_STRICT: ::c_uint = 1; pub const SECCOMP_MODE_FILTER: ::c_uint = 2; pub const NLA_F_NESTED: ::c_int = 1 << 15; pub const NLA_F_NET_BYTEORDER: ::c_int = 1 << 14; pub const NLA_TYPE_MASK: ::c_int = !(NLA_F_NESTED | NLA_F_NET_BYTEORDER); pub const NLA_ALIGNTO: ::c_int = 4; pub const SIGEV_THREAD_ID: ::c_int = 4; pub const CIBAUD: ::tcflag_t = 0o02003600000; pub const CBAUDEX: ::tcflag_t = 0o010000; pub const TIOCM_LE: ::c_int = 0x001; pub const TIOCM_DTR: ::c_int = 0x002; pub const TIOCM_RTS: ::c_int = 0x004; pub const TIOCM_ST: ::c_int = 0x008; pub const TIOCM_SR: ::c_int = 0x010; pub const TIOCM_CTS: ::c_int = 0x020; pub const TIOCM_CAR: ::c_int = 0x040; pub const TIOCM_RNG: ::c_int = 0x080; pub const TIOCM_DSR: ::c_int = 0x100; pub const TIOCM_CD: ::c_int = TIOCM_CAR; pub const TIOCM_RI: ::c_int = TIOCM_RNG; pub const POLLWRNORM: ::c_short = 0x100; pub const POLLWRBAND: ::c_short = 0x200; pub const SFD_CLOEXEC: ::c_int = O_CLOEXEC; pub const SFD_NONBLOCK: ::c_int = O_NONBLOCK; pub const SOCK_NONBLOCK: ::c_int = O_NONBLOCK; pub const SO_ORIGINAL_DST: ::c_int = 80; pub const IP_ORIGDSTADDR: ::c_int = 20; pub const IP_RECVORIGDSTADDR: ::c_int = IP_ORIGDSTADDR; pub const IPV6_ORIGDSTADDR: ::c_int = 74; pub const IPV6_RECVORIGDSTADDR: ::c_int = IPV6_ORIGDSTADDR; pub const IPV6_FLOWLABEL_MGR: ::c_int = 32; pub const IPV6_FLOWINFO_SEND: ::c_int = 33; pub const IPV6_FLOWINFO_FLOWLABEL: ::c_int = 0x000fffff; pub const IPV6_FLOWINFO_PRIORITY: ::c_int = 0x0ff00000; pub const IUTF8: ::tcflag_t = 0x00004000; pub const CMSPAR: ::tcflag_t = 0o10000000000; pub const O_TMPFILE: ::c_int = 0o20000000 | O_DIRECTORY; pub const MFD_CLOEXEC: ::c_uint = 0x0001; pub const MFD_ALLOW_SEALING: ::c_uint = 0x0002; pub const MFD_HUGETLB: ::c_uint = 0x0004; // linux/netfilter.h pub const NF_DROP: ::c_int = 0; pub const NF_ACCEPT: ::c_int = 1; pub const NF_STOLEN: ::c_int = 2; pub const NF_QUEUE: ::c_int = 3; pub const NF_REPEAT: ::c_int = 4; pub const NF_STOP: ::c_int = 5; pub const NF_MAX_VERDICT: ::c_int = NF_STOP; pub const NF_VERDICT_MASK: ::c_int = 0x000000ff; pub const NF_VERDICT_FLAG_QUEUE_BYPASS: ::c_int = 0x00008000; pub const NF_VERDICT_QMASK: ::c_int = 0xffff0000; pub const NF_VERDICT_QBITS: ::c_int = 16; pub const NF_VERDICT_BITS: ::c_int = 16; pub const NF_INET_PRE_ROUTING: ::c_int = 0; pub const NF_INET_LOCAL_IN: ::c_int = 1; pub const NF_INET_FORWARD: ::c_int = 2; pub const NF_INET_LOCAL_OUT: ::c_int = 3; pub const NF_INET_POST_ROUTING: ::c_int = 4; pub const NF_INET_NUMHOOKS: ::c_int = 5; pub const NF_NETDEV_INGRESS: ::c_int = 0; pub const NF_NETDEV_NUMHOOKS: ::c_int = 1; pub const NFPROTO_UNSPEC: ::c_int = 0; pub const NFPROTO_INET: ::c_int = 1; pub const NFPROTO_IPV4: ::c_int = 2; pub const NFPROTO_ARP: ::c_int = 3; pub const NFPROTO_NETDEV: ::c_int = 5; pub const NFPROTO_BRIDGE: ::c_int = 7; pub const NFPROTO_IPV6: ::c_int = 10; pub const NFPROTO_DECNET: ::c_int = 12; pub const NFPROTO_NUMPROTO: ::c_int = 13; // linux/netfilter_ipv4.h pub const NF_IP_PRE_ROUTING: ::c_int = 0; pub const NF_IP_LOCAL_IN: ::c_int = 1; pub const NF_IP_FORWARD: ::c_int = 2; pub const NF_IP_LOCAL_OUT: ::c_int = 3; pub const NF_IP_POST_ROUTING: ::c_int = 4; pub const NF_IP_NUMHOOKS: ::c_int = 5; pub const NF_IP_PRI_FIRST: ::c_int = ::INT_MIN; pub const NF_IP_PRI_CONNTRACK_DEFRAG: ::c_int = -400; pub const NF_IP_PRI_RAW: ::c_int = -300; pub const NF_IP_PRI_SELINUX_FIRST: ::c_int = -225; pub const NF_IP_PRI_CONNTRACK: ::c_int = -200; pub const NF_IP_PRI_MANGLE: ::c_int = -150; pub const NF_IP_PRI_NAT_DST: ::c_int = -100; pub const NF_IP_PRI_FILTER: ::c_int = 0; pub const NF_IP_PRI_SECURITY: ::c_int = 50; pub const NF_IP_PRI_NAT_SRC: ::c_int = 100; pub const NF_IP_PRI_SELINUX_LAST: ::c_int = 225; pub const NF_IP_PRI_CONNTRACK_HELPER: ::c_int = 300; pub const NF_IP_PRI_CONNTRACK_CONFIRM: ::c_int = ::INT_MAX; pub const NF_IP_PRI_LAST: ::c_int = ::INT_MAX; // linux/netfilter_ipv6.h pub const NF_IP6_PRE_ROUTING: ::c_int = 0; pub const NF_IP6_LOCAL_IN: ::c_int = 1; pub const NF_IP6_FORWARD: ::c_int = 2; pub const NF_IP6_LOCAL_OUT: ::c_int = 3; pub const NF_IP6_POST_ROUTING: ::c_int = 4; pub const NF_IP6_NUMHOOKS: ::c_int = 5; pub const NF_IP6_PRI_FIRST: ::c_int = ::INT_MIN; pub const NF_IP6_PRI_CONNTRACK_DEFRAG: ::c_int = -400; pub const NF_IP6_PRI_RAW: ::c_int = -300; pub const NF_IP6_PRI_SELINUX_FIRST: ::c_int = -225; pub const NF_IP6_PRI_CONNTRACK: ::c_int = -200; pub const NF_IP6_PRI_MANGLE: ::c_int = -150; pub const NF_IP6_PRI_NAT_DST: ::c_int = -100; pub const NF_IP6_PRI_FILTER: ::c_int = 0; pub const NF_IP6_PRI_SECURITY: ::c_int = 50; pub const NF_IP6_PRI_NAT_SRC: ::c_int = 100; pub const NF_IP6_PRI_SELINUX_LAST: ::c_int = 225; pub const NF_IP6_PRI_CONNTRACK_HELPER: ::c_int = 300; pub const NF_IP6_PRI_LAST: ::c_int = ::INT_MAX; // linux/netfilter/nf_tables.h pub const NFT_TABLE_MAXNAMELEN: ::c_int = 256; pub const NFT_CHAIN_MAXNAMELEN: ::c_int = 256; pub const NFT_SET_MAXNAMELEN: ::c_int = 256; pub const NFT_OBJ_MAXNAMELEN: ::c_int = 256; pub const NFT_USERDATA_MAXLEN: ::c_int = 256; pub const NFT_REG_VERDICT: ::c_int = 0; pub const NFT_REG_1: ::c_int = 1; pub const NFT_REG_2: ::c_int = 2; pub const NFT_REG_3: ::c_int = 3; pub const NFT_REG_4: ::c_int = 4; pub const __NFT_REG_MAX: ::c_int = 5; pub const NFT_REG32_00: ::c_int = 8; pub const NFT_REG32_01: ::c_int = 9; pub const NFT_REG32_02: ::c_int = 10; pub const NFT_REG32_03: ::c_int = 11; pub const NFT_REG32_04: ::c_int = 12; pub const NFT_REG32_05: ::c_int = 13; pub const NFT_REG32_06: ::c_int = 14; pub const NFT_REG32_07: ::c_int = 15; pub const NFT_REG32_08: ::c_int = 16; pub const NFT_REG32_09: ::c_int = 17; pub const NFT_REG32_10: ::c_int = 18; pub const NFT_REG32_11: ::c_int = 19; pub const NFT_REG32_12: ::c_int = 20; pub const NFT_REG32_13: ::c_int = 21; pub const NFT_REG32_14: ::c_int = 22; pub const NFT_REG32_15: ::c_int = 23; pub const NFT_REG_SIZE: ::c_int = 16; pub const NFT_REG32_SIZE: ::c_int = 4; pub const NFT_CONTINUE: ::c_int = -1; pub const NFT_BREAK: ::c_int = -2; pub const NFT_JUMP: ::c_int = -3; pub const NFT_GOTO: ::c_int = -4; pub const NFT_RETURN: ::c_int = -5; pub const NFT_MSG_NEWTABLE: ::c_int = 0; pub const NFT_MSG_GETTABLE: ::c_int = 1; pub const NFT_MSG_DELTABLE: ::c_int = 2; pub const NFT_MSG_NEWCHAIN: ::c_int = 3; pub const NFT_MSG_GETCHAIN: ::c_int = 4; pub const NFT_MSG_DELCHAIN: ::c_int = 5; pub const NFT_MSG_NEWRULE: ::c_int = 6; pub const NFT_MSG_GETRULE: ::c_int = 7; pub const NFT_MSG_DELRULE: ::c_int = 8; pub const NFT_MSG_NEWSET: ::c_int = 9; pub const NFT_MSG_GETSET: ::c_int = 10; pub const NFT_MSG_DELSET: ::c_int = 11; pub const NFT_MSG_NEWSETELEM: ::c_int = 12; pub const NFT_MSG_GETSETELEM: ::c_int = 13; pub const NFT_MSG_DELSETELEM: ::c_int = 14; pub const NFT_MSG_NEWGEN: ::c_int = 15; pub const NFT_MSG_GETGEN: ::c_int = 16; pub const NFT_MSG_TRACE: ::c_int = 17; pub const NFT_MSG_NEWOBJ: ::c_int = 18; pub const NFT_MSG_GETOBJ: ::c_int = 19; pub const NFT_MSG_DELOBJ: ::c_int = 20; pub const NFT_MSG_GETOBJ_RESET: ::c_int = 21; pub const NFT_MSG_MAX: ::c_int = 25; pub const NFT_SET_ANONYMOUS: ::c_int = 0x1; pub const NFT_SET_CONSTANT: ::c_int = 0x2; pub const NFT_SET_INTERVAL: ::c_int = 0x4; pub const NFT_SET_MAP: ::c_int = 0x8; pub const NFT_SET_TIMEOUT: ::c_int = 0x10; pub const NFT_SET_EVAL: ::c_int = 0x20; pub const NFT_SET_POL_PERFORMANCE: ::c_int = 0; pub const NFT_SET_POL_MEMORY: ::c_int = 1; pub const NFT_SET_ELEM_INTERVAL_END: ::c_int = 0x1; pub const NFT_DATA_VALUE: ::c_uint = 0; pub const NFT_DATA_VERDICT: ::c_uint = 0xffffff00; pub const NFT_DATA_RESERVED_MASK: ::c_uint = 0xffffff00; pub const NFT_DATA_VALUE_MAXLEN: ::c_int = 64; pub const NFT_BYTEORDER_NTOH: ::c_int = 0; pub const NFT_BYTEORDER_HTON: ::c_int = 1; pub const NFT_CMP_EQ: ::c_int = 0; pub const NFT_CMP_NEQ: ::c_int = 1; pub const NFT_CMP_LT: ::c_int = 2; pub const NFT_CMP_LTE: ::c_int = 3; pub const NFT_CMP_GT: ::c_int = 4; pub const NFT_CMP_GTE: ::c_int = 5; pub const NFT_RANGE_EQ: ::c_int = 0; pub const NFT_RANGE_NEQ: ::c_int = 1; pub const NFT_LOOKUP_F_INV: ::c_int = (1 << 0); pub const NFT_DYNSET_OP_ADD: ::c_int = 0; pub const NFT_DYNSET_OP_UPDATE: ::c_int = 1; pub const NFT_DYNSET_F_INV: ::c_int = (1 << 0); pub const NFT_PAYLOAD_LL_HEADER: ::c_int = 0; pub const NFT_PAYLOAD_NETWORK_HEADER: ::c_int = 1; pub const NFT_PAYLOAD_TRANSPORT_HEADER: ::c_int = 2; pub const NFT_PAYLOAD_CSUM_NONE: ::c_int = 0; pub const NFT_PAYLOAD_CSUM_INET: ::c_int = 1; pub const NFT_META_LEN: ::c_int = 0; pub const NFT_META_PROTOCOL: ::c_int = 1; pub const NFT_META_PRIORITY: ::c_int = 2; pub const NFT_META_MARK: ::c_int = 3; pub const NFT_META_IIF: ::c_int = 4; pub const NFT_META_OIF: ::c_int = 5; pub const NFT_META_IIFNAME: ::c_int = 6; pub const NFT_META_OIFNAME: ::c_int = 7; pub const NFT_META_IIFTYPE: ::c_int = 8; pub const NFT_META_OIFTYPE: ::c_int = 9; pub const NFT_META_SKUID: ::c_int = 10; pub const NFT_META_SKGID: ::c_int = 11; pub const NFT_META_NFTRACE: ::c_int = 12; pub const NFT_META_RTCLASSID: ::c_int = 13; pub const NFT_META_SECMARK: ::c_int = 14; pub const NFT_META_NFPROTO: ::c_int = 15; pub const NFT_META_L4PROTO: ::c_int = 16; pub const NFT_META_BRI_IIFNAME: ::c_int = 17; pub const NFT_META_BRI_OIFNAME: ::c_int = 18; pub const NFT_META_PKTTYPE: ::c_int = 19; pub const NFT_META_CPU: ::c_int = 20; pub const NFT_META_IIFGROUP: ::c_int = 21; pub const NFT_META_OIFGROUP: ::c_int = 22; pub const NFT_META_CGROUP: ::c_int = 23; pub const NFT_META_PRANDOM: ::c_int = 24; pub const NFT_CT_STATE: ::c_int = 0; pub const NFT_CT_DIRECTION: ::c_int = 1; pub const NFT_CT_STATUS: ::c_int = 2; pub const NFT_CT_MARK: ::c_int = 3; pub const NFT_CT_SECMARK: ::c_int = 4; pub const NFT_CT_EXPIRATION: ::c_int = 5; pub const NFT_CT_HELPER: ::c_int = 6; pub const NFT_CT_L3PROTOCOL: ::c_int = 7; pub const NFT_CT_SRC: ::c_int = 8; pub const NFT_CT_DST: ::c_int = 9; pub const NFT_CT_PROTOCOL: ::c_int = 10; pub const NFT_CT_PROTO_SRC: ::c_int = 11; pub const NFT_CT_PROTO_DST: ::c_int = 12; pub const NFT_CT_LABELS: ::c_int = 13; pub const NFT_CT_PKTS: ::c_int = 14; pub const NFT_CT_BYTES: ::c_int = 15; pub const NFT_LIMIT_PKTS: ::c_int = 0; pub const NFT_LIMIT_PKT_BYTES: ::c_int = 1; pub const NFT_LIMIT_F_INV: ::c_int = (1 << 0); pub const NFT_QUEUE_FLAG_BYPASS: ::c_int = 0x01; pub const NFT_QUEUE_FLAG_CPU_FANOUT: ::c_int = 0x02; pub const NFT_QUEUE_FLAG_MASK: ::c_int = 0x03; pub const NFT_QUOTA_F_INV: ::c_int = (1 << 0); pub const NFT_REJECT_ICMP_UNREACH: ::c_int = 0; pub const NFT_REJECT_TCP_RST: ::c_int = 1; pub const NFT_REJECT_ICMPX_UNREACH: ::c_int = 2; pub const NFT_REJECT_ICMPX_NO_ROUTE: ::c_int = 0; pub const NFT_REJECT_ICMPX_PORT_UNREACH: ::c_int = 1; pub const NFT_REJECT_ICMPX_HOST_UNREACH: ::c_int = 2; pub const NFT_REJECT_ICMPX_ADMIN_PROHIBITED: ::c_int = 3; pub const NFT_NAT_SNAT: ::c_int = 0; pub const NFT_NAT_DNAT: ::c_int = 1; pub const NFT_TRACETYPE_UNSPEC: ::c_int = 0; pub const NFT_TRACETYPE_POLICY: ::c_int = 1; pub const NFT_TRACETYPE_RETURN: ::c_int = 2; pub const NFT_TRACETYPE_RULE: ::c_int = 3; pub const NFT_NG_INCREMENTAL: ::c_int = 0; pub const NFT_NG_RANDOM: ::c_int = 1; pub const IFF_TUN: ::c_int = 0x0001; pub const IFF_TAP: ::c_int = 0x0002; pub const IFF_NO_PI: ::c_int = 0x1000; // start android/platform/bionic/libc/kernel/uapi/linux/if_ether.h // from https://android.googlesource.com/ // platform/bionic/+/master/libc/kernel/uapi/linux/if_ether.h pub const ETH_ALEN: ::c_int = 6; pub const ETH_HLEN: ::c_int = 14; pub const ETH_ZLEN: ::c_int = 60; pub const ETH_DATA_LEN: ::c_int = 1500; pub const ETH_FRAME_LEN: ::c_int = 1514; pub const ETH_FCS_LEN: ::c_int = 4; pub const ETH_MIN_MTU: ::c_int = 68; pub const ETH_MAX_MTU: ::c_int = 0xFFFF; pub const ETH_P_LOOP: ::c_int = 0x0060; pub const ETH_P_PUP: ::c_int = 0x0200; pub const ETH_P_PUPAT: ::c_int = 0x0201; pub const ETH_P_TSN: ::c_int = 0x22F0; pub const ETH_P_IP: ::c_int = 0x0800; pub const ETH_P_X25: ::c_int = 0x0805; pub const ETH_P_ARP: ::c_int = 0x0806; pub const ETH_P_BPQ: ::c_int = 0x08FF; pub const ETH_P_IEEEPUP: ::c_int = 0x0a00; pub const ETH_P_IEEEPUPAT: ::c_int = 0x0a01; pub const ETH_P_BATMAN: ::c_int = 0x4305; pub const ETH_P_DEC: ::c_int = 0x6000; pub const ETH_P_DNA_DL: ::c_int = 0x6001; pub const ETH_P_DNA_RC: ::c_int = 0x6002; pub const ETH_P_DNA_RT: ::c_int = 0x6003; pub const ETH_P_LAT: ::c_int = 0x6004; pub const ETH_P_DIAG: ::c_int = 0x6005; pub const ETH_P_CUST: ::c_int = 0x6006; pub const ETH_P_SCA: ::c_int = 0x6007; pub const ETH_P_TEB: ::c_int = 0x6558; pub const ETH_P_RARP: ::c_int = 0x8035; pub const ETH_P_ATALK: ::c_int = 0x809B; pub const ETH_P_AARP: ::c_int = 0x80F3; pub const ETH_P_8021Q: ::c_int = 0x8100; /* see rust-lang/libc#924 pub const ETH_P_ERSPAN: ::c_int = 0x88BE;*/ pub const ETH_P_IPX: ::c_int = 0x8137; pub const ETH_P_IPV6: ::c_int = 0x86DD; pub const ETH_P_PAUSE: ::c_int = 0x8808; pub const ETH_P_SLOW: ::c_int = 0x8809; pub const ETH_P_WCCP: ::c_int = 0x883E; pub const ETH_P_MPLS_UC: ::c_int = 0x8847; pub const ETH_P_MPLS_MC: ::c_int = 0x8848; pub const ETH_P_ATMMPOA: ::c_int = 0x884c; pub const ETH_P_PPP_DISC: ::c_int = 0x8863; pub const ETH_P_PPP_SES: ::c_int = 0x8864; pub const ETH_P_LINK_CTL: ::c_int = 0x886c; pub const ETH_P_ATMFATE: ::c_int = 0x8884; pub const ETH_P_PAE: ::c_int = 0x888E; pub const ETH_P_AOE: ::c_int = 0x88A2; pub const ETH_P_8021AD: ::c_int = 0x88A8; pub const ETH_P_802_EX1: ::c_int = 0x88B5; pub const ETH_P_TIPC: ::c_int = 0x88CA; pub const ETH_P_MACSEC: ::c_int = 0x88E5; pub const ETH_P_8021AH: ::c_int = 0x88E7; pub const ETH_P_MVRP: ::c_int = 0x88F5; pub const ETH_P_1588: ::c_int = 0x88F7; pub const ETH_P_NCSI: ::c_int = 0x88F8; pub const ETH_P_PRP: ::c_int = 0x88FB; pub const ETH_P_FCOE: ::c_int = 0x8906; /* see rust-lang/libc#924 pub const ETH_P_IBOE: ::c_int = 0x8915;*/ pub const ETH_P_TDLS: ::c_int = 0x890D; pub const ETH_P_FIP: ::c_int = 0x8914; pub const ETH_P_80221: ::c_int = 0x8917; pub const ETH_P_HSR: ::c_int = 0x892F; /* see rust-lang/libc#924 pub const ETH_P_NSH: ::c_int = 0x894F;*/ pub const ETH_P_LOOPBACK: ::c_int = 0x9000; pub const ETH_P_QINQ1: ::c_int = 0x9100; pub const ETH_P_QINQ2: ::c_int = 0x9200; pub const ETH_P_QINQ3: ::c_int = 0x9300; pub const ETH_P_EDSA: ::c_int = 0xDADA; /* see rust-lang/libc#924 pub const ETH_P_IFE: ::c_int = 0xED3E;*/ pub const ETH_P_AF_IUCV: ::c_int = 0xFBFB; pub const ETH_P_802_3_MIN: ::c_int = 0x0600; pub const ETH_P_802_3: ::c_int = 0x0001; pub const ETH_P_AX25: ::c_int = 0x0002; pub const ETH_P_ALL: ::c_int = 0x0003; pub const ETH_P_802_2: ::c_int = 0x0004; pub const ETH_P_SNAP: ::c_int = 0x0005; pub const ETH_P_DDCMP: ::c_int = 0x0006; pub const ETH_P_WAN_PPP: ::c_int = 0x0007; pub const ETH_P_PPP_MP: ::c_int = 0x0008; pub const ETH_P_LOCALTALK: ::c_int = 0x0009; pub const ETH_P_CAN: ::c_int = 0x000C; pub const ETH_P_CANFD: ::c_int = 0x000D; pub const ETH_P_PPPTALK: ::c_int = 0x0010; pub const ETH_P_TR_802_2: ::c_int = 0x0011; pub const ETH_P_MOBITEX: ::c_int = 0x0015; pub const ETH_P_CONTROL: ::c_int = 0x0016; pub const ETH_P_IRDA: ::c_int = 0x0017; pub const ETH_P_ECONET: ::c_int = 0x0018; pub const ETH_P_HDLC: ::c_int = 0x0019; pub const ETH_P_ARCNET: ::c_int = 0x001A; pub const ETH_P_DSA: ::c_int = 0x001B; pub const ETH_P_TRAILER: ::c_int = 0x001C; pub const ETH_P_PHONET: ::c_int = 0x00F5; pub const ETH_P_IEEE802154: ::c_int = 0x00F6; pub const ETH_P_CAIF: ::c_int = 0x00F7; pub const ETH_P_XDSA: ::c_int = 0x00F8; /* see rust-lang/libc#924 pub const ETH_P_MAP: ::c_int = 0x00F9;*/ // end android/platform/bionic/libc/kernel/uapi/linux/if_ether.h pub const SIOCADDRT: ::c_ulong = 0x0000890B; pub const SIOCDELRT: ::c_ulong = 0x0000890C; pub const SIOCGIFNAME: ::c_ulong = 0x00008910; pub const SIOCSIFLINK: ::c_ulong = 0x00008911; pub const SIOCGIFCONF: ::c_ulong = 0x00008912; pub const SIOCGIFFLAGS: ::c_ulong = 0x00008913; pub const SIOCSIFFLAGS: ::c_ulong = 0x00008914; pub const SIOCGIFADDR: ::c_ulong = 0x00008915; pub const SIOCSIFADDR: ::c_ulong = 0x00008916; pub const SIOCGIFDSTADDR: ::c_ulong = 0x00008917; pub const SIOCSIFDSTADDR: ::c_ulong = 0x00008918; pub const SIOCGIFBRDADDR: ::c_ulong = 0x00008919; pub const SIOCSIFBRDADDR: ::c_ulong = 0x0000891A; pub const SIOCGIFNETMASK: ::c_ulong = 0x0000891B; pub const SIOCSIFNETMASK: ::c_ulong = 0x0000891C; pub const SIOCGIFMETRIC: ::c_ulong = 0x0000891D; pub const SIOCSIFMETRIC: ::c_ulong = 0x0000891E; pub const SIOCGIFMEM: ::c_ulong = 0x0000891F; pub const SIOCSIFMEM: ::c_ulong = 0x00008920; pub const SIOCGIFMTU: ::c_ulong = 0x00008921; pub const SIOCSIFMTU: ::c_ulong = 0x00008922; pub const SIOCSIFHWADDR: ::c_ulong = 0x00008924; pub const SIOCGIFENCAP: ::c_ulong = 0x00008925; pub const SIOCSIFENCAP: ::c_ulong = 0x00008926; pub const SIOCGIFHWADDR: ::c_ulong = 0x00008927; pub const SIOCGIFSLAVE: ::c_ulong = 0x00008929; pub const SIOCSIFSLAVE: ::c_ulong = 0x00008930; pub const SIOCADDMULTI: ::c_ulong = 0x00008931; pub const SIOCDELMULTI: ::c_ulong = 0x00008932; pub const SIOCDARP: ::c_ulong = 0x00008953; pub const SIOCGARP: ::c_ulong = 0x00008954; pub const SIOCSARP: ::c_ulong = 0x00008955; pub const SIOCDRARP: ::c_ulong = 0x00008960; pub const SIOCGRARP: ::c_ulong = 0x00008961; pub const SIOCSRARP: ::c_ulong = 0x00008962; pub const SIOCGIFMAP: ::c_ulong = 0x00008970; pub const SIOCSIFMAP: ::c_ulong = 0x00008971; // linux/module.h pub const MODULE_INIT_IGNORE_MODVERSIONS: ::c_uint = 0x0001; pub const MODULE_INIT_IGNORE_VERMAGIC: ::c_uint = 0x0002; #[deprecated( since = "0.2.55", note = "ENOATTR is not available on Android; use ENODATA instead" )] pub const ENOATTR: ::c_int = ::ENODATA; // linux/if_alg.h pub const ALG_SET_KEY: ::c_int = 1; pub const ALG_SET_IV: ::c_int = 2; pub const ALG_SET_OP: ::c_int = 3; pub const ALG_SET_AEAD_ASSOCLEN: ::c_int = 4; pub const ALG_SET_AEAD_AUTHSIZE: ::c_int = 5; pub const ALG_OP_DECRYPT: ::c_int = 0; pub const ALG_OP_ENCRYPT: ::c_int = 1; // uapi/linux/inotify.h pub const IN_ACCESS: u32 = 0x0000_0001; pub const IN_MODIFY: u32 = 0x0000_0002; pub const IN_ATTRIB: u32 = 0x0000_0004; pub const IN_CLOSE_WRITE: u32 = 0x0000_0008; pub const IN_CLOSE_NOWRITE: u32 = 0x0000_0010; pub const IN_CLOSE: u32 = (IN_CLOSE_WRITE | IN_CLOSE_NOWRITE); pub const IN_OPEN: u32 = 0x0000_0020; pub const IN_MOVED_FROM: u32 = 0x0000_0040; pub const IN_MOVED_TO: u32 = 0x0000_0080; pub const IN_MOVE: u32 = (IN_MOVED_FROM | IN_MOVED_TO); pub const IN_CREATE: u32 = 0x0000_0100; pub const IN_DELETE: u32 = 0x0000_0200; pub const IN_DELETE_SELF: u32 = 0x0000_0400; pub const IN_MOVE_SELF: u32 = 0x0000_0800; pub const IN_UNMOUNT: u32 = 0x0000_2000; pub const IN_Q_OVERFLOW: u32 = 0x0000_4000; pub const IN_IGNORED: u32 = 0x0000_8000; pub const IN_ONLYDIR: u32 = 0x0100_0000; pub const IN_DONT_FOLLOW: u32 = 0x0200_0000; // pub const IN_EXCL_UNLINK: u32 = 0x0400_0000; // pub const IN_MASK_CREATE: u32 = 0x1000_0000; // pub const IN_MASK_ADD: u32 = 0x2000_0000; pub const IN_ISDIR: u32 = 0x4000_0000; pub const IN_ONESHOT: u32 = 0x8000_0000; pub const IN_ALL_EVENTS: u32 = (IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE | IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM | IN_MOVED_TO | IN_DELETE | IN_CREATE | IN_DELETE_SELF | IN_MOVE_SELF); pub const IN_CLOEXEC: ::c_int = O_CLOEXEC; pub const IN_NONBLOCK: ::c_int = O_NONBLOCK; pub const FUTEX_WAIT: ::c_int = 0; pub const FUTEX_WAKE: ::c_int = 1; pub const FUTEX_FD: ::c_int = 2; pub const FUTEX_REQUEUE: ::c_int = 3; pub const FUTEX_CMP_REQUEUE: ::c_int = 4; pub const FUTEX_WAKE_OP: ::c_int = 5; pub const FUTEX_LOCK_PI: ::c_int = 6; pub const FUTEX_UNLOCK_PI: ::c_int = 7; pub const FUTEX_TRYLOCK_PI: ::c_int = 8; pub const FUTEX_WAIT_BITSET: ::c_int = 9; pub const FUTEX_WAKE_BITSET: ::c_int = 10; pub const FUTEX_WAIT_REQUEUE_PI: ::c_int = 11; pub const FUTEX_CMP_REQUEUE_PI: ::c_int = 12; pub const FUTEX_PRIVATE_FLAG: ::c_int = 128; pub const FUTEX_CLOCK_REALTIME: ::c_int = 256; pub const FUTEX_CMD_MASK: ::c_int = !(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME); f! { pub fn CMSG_NXTHDR(mhdr: *const msghdr, cmsg: *const cmsghdr) -> *mut cmsghdr { let next = (cmsg as usize + super::CMSG_ALIGN((*cmsg).cmsg_len as usize)) as *mut cmsghdr; let max = (*mhdr).msg_control as usize + (*mhdr).msg_controllen as usize; if (next.offset(1)) as usize > max { 0 as *mut cmsghdr } else { next as *mut cmsghdr } } pub fn CPU_ZERO(cpuset: &mut cpu_set_t) -> () { for slot in cpuset.__bits.iter_mut() { *slot = 0; } } pub fn CPU_SET(cpu: usize, cpuset: &mut cpu_set_t) -> () { let size_in___bits = 8 * ::mem::size_of_val(&cpuset.__bits[0]); let (idx, offset) = (cpu / size_in___bits, cpu % size_in___bits); cpuset.__bits[idx] |= 1 << offset; () } pub fn CPU_CLR(cpu: usize, cpuset: &mut cpu_set_t) -> () { let size_in___bits = 8 * ::mem::size_of_val(&cpuset.__bits[0]); let (idx, offset) = (cpu / size_in___bits, cpu % size_in___bits); cpuset.__bits[idx] &= !(1 << offset); () } pub fn CPU_ISSET(cpu: usize, cpuset: &cpu_set_t) -> bool { let size_in___bits = 8 * ::mem::size_of_val(&cpuset.__bits[0]); let (idx, offset) = (cpu / size_in___bits, cpu % size_in___bits); 0 != (cpuset.__bits[idx] & (1 << offset)) } pub fn CPU_EQUAL(set1: &cpu_set_t, set2: &cpu_set_t) -> bool { set1.__bits == set2.__bits } pub fn major(dev: ::dev_t) -> ::c_int { ((dev >> 8) & 0xfff) as ::c_int } pub fn minor(dev: ::dev_t) -> ::c_int { ((dev & 0xff) | ((dev >> 12) & 0xfff00)) as ::c_int } pub fn makedev(ma: ::c_int, mi: ::c_int) -> ::dev_t { let ma = ma as ::dev_t; let mi = mi as ::dev_t; ((ma & 0xfff) << 8) | (mi & 0xff) | ((mi & 0xfff00) << 12) } pub fn NLA_ALIGN(len: ::c_int) -> ::c_int { return ((len) + NLA_ALIGNTO - 1) & !(NLA_ALIGNTO - 1) } } extern "C" { pub fn getrlimit64(resource: ::c_int, rlim: *mut rlimit64) -> ::c_int; pub fn setrlimit64(resource: ::c_int, rlim: *const rlimit64) -> ::c_int; pub fn getrlimit(resource: ::c_int, rlim: *mut ::rlimit) -> ::c_int; pub fn setrlimit(resource: ::c_int, rlim: *const ::rlimit) -> ::c_int; pub fn strerror_r( errnum: ::c_int, buf: *mut c_char, buflen: ::size_t, ) -> ::c_int; pub fn gettimeofday(tp: *mut ::timeval, tz: *mut ::timezone) -> ::c_int; pub fn madvise( addr: *mut ::c_void, len: ::size_t, advice: ::c_int, ) -> ::c_int; pub fn ioctl(fd: ::c_int, request: ::c_int, ...) -> ::c_int; pub fn msync( addr: *mut ::c_void, len: ::size_t, flags: ::c_int, ) -> ::c_int; pub fn mprotect( addr: *mut ::c_void, len: ::size_t, prot: ::c_int, ) -> ::c_int; pub fn recvfrom( socket: ::c_int, buf: *mut ::c_void, len: ::size_t, flags: ::c_int, addr: *mut ::sockaddr, addrlen: *mut ::socklen_t, ) -> ::ssize_t; pub fn getnameinfo( sa: *const ::sockaddr, salen: ::socklen_t, host: *mut ::c_char, hostlen: ::size_t, serv: *mut ::c_char, sevlen: ::size_t, flags: ::c_int, ) -> ::c_int; pub fn ptrace(request: ::c_int, ...) -> ::c_long; pub fn getpriority(which: ::c_int, who: ::id_t) -> ::c_int; pub fn setpriority(which: ::c_int, who: ::id_t, prio: ::c_int) -> ::c_int; pub fn __sched_cpualloc(count: ::size_t) -> *mut ::cpu_set_t; pub fn __sched_cpufree(set: *mut ::cpu_set_t); pub fn __sched_cpucount( setsize: ::size_t, set: *const cpu_set_t, ) -> ::c_int; pub fn sched_getcpu() -> ::c_int; pub fn utmpname(name: *const ::c_char) -> ::c_int; pub fn setutent(); pub fn getutent() -> *mut utmp; pub fn posix_fallocate( fd: ::c_int, offset: ::off_t, len: ::off_t, ) -> ::c_int; pub fn signalfd( fd: ::c_int, mask: *const ::sigset_t, flags: ::c_int, ) -> ::c_int; pub fn syscall(num: ::c_long, ...) -> ::c_long; pub fn sched_getaffinity( pid: ::pid_t, cpusetsize: ::size_t, cpuset: *mut cpu_set_t, ) -> ::c_int; pub fn sched_setaffinity( pid: ::pid_t, cpusetsize: ::size_t, cpuset: *const cpu_set_t, ) -> ::c_int; pub fn epoll_create(size: ::c_int) -> ::c_int; pub fn epoll_create1(flags: ::c_int) -> ::c_int; pub fn epoll_wait( epfd: ::c_int, events: *mut ::epoll_event, maxevents: ::c_int, timeout: ::c_int, ) -> ::c_int; pub fn epoll_ctl( epfd: ::c_int, op: ::c_int, fd: ::c_int, event: *mut ::epoll_event, ) -> ::c_int; pub fn pthread_getschedparam( native: ::pthread_t, policy: *mut ::c_int, param: *mut ::sched_param, ) -> ::c_int; pub fn unshare(flags: ::c_int) -> ::c_int; pub fn umount(target: *const ::c_char) -> ::c_int; pub fn sched_get_priority_max(policy: ::c_int) -> ::c_int; pub fn tee( fd_in: ::c_int, fd_out: ::c_int, len: ::size_t, flags: ::c_uint, ) -> ::ssize_t; pub fn settimeofday( tv: *const ::timeval, tz: *const ::timezone, ) -> ::c_int; pub fn splice( fd_in: ::c_int, off_in: *mut ::loff_t, fd_out: ::c_int, off_out: *mut ::loff_t, len: ::size_t, flags: ::c_uint, ) -> ::ssize_t; pub fn eventfd(init: ::c_uint, flags: ::c_int) -> ::c_int; pub fn sched_rr_get_interval(pid: ::pid_t, tp: *mut ::timespec) -> ::c_int; pub fn sem_timedwait( sem: *mut sem_t, abstime: *const ::timespec, ) -> ::c_int; pub fn sem_getvalue(sem: *mut sem_t, sval: *mut ::c_int) -> ::c_int; pub fn sched_setparam( pid: ::pid_t, param: *const ::sched_param, ) -> ::c_int; pub fn setns(fd: ::c_int, nstype: ::c_int) -> ::c_int; pub fn swapoff(puath: *const ::c_char) -> ::c_int; pub fn vmsplice( fd: ::c_int, iov: *const ::iovec, nr_segs: ::size_t, flags: ::c_uint, ) -> ::ssize_t; pub fn mount( src: *const ::c_char, target: *const ::c_char, fstype: *const ::c_char, flags: ::c_ulong, data: *const ::c_void, ) -> ::c_int; pub fn personality(persona: ::c_uint) -> ::c_int; pub fn prctl(option: ::c_int, ...) -> ::c_int; pub fn sched_getparam(pid: ::pid_t, param: *mut ::sched_param) -> ::c_int; pub fn ppoll( fds: *mut ::pollfd, nfds: nfds_t, timeout: *const ::timespec, sigmask: *const sigset_t, ) -> ::c_int; pub fn pthread_mutex_timedlock( lock: *mut pthread_mutex_t, abstime: *const ::timespec, ) -> ::c_int; pub fn clone( cb: extern "C" fn(*mut ::c_void) -> ::c_int, child_stack: *mut ::c_void, flags: ::c_int, arg: *mut ::c_void, ... ) -> ::c_int; pub fn sched_getscheduler(pid: ::pid_t) -> ::c_int; pub fn clock_nanosleep( clk_id: ::clockid_t, flags: ::c_int, rqtp: *const ::timespec, rmtp: *mut ::timespec, ) -> ::c_int; pub fn pthread_attr_getguardsize( attr: *const ::pthread_attr_t, guardsize: *mut ::size_t, ) -> ::c_int; pub fn sethostname(name: *const ::c_char, len: ::size_t) -> ::c_int; pub fn sched_get_priority_min(policy: ::c_int) -> ::c_int; pub fn pthread_condattr_getpshared( attr: *const pthread_condattr_t, pshared: *mut ::c_int, ) -> ::c_int; pub fn sysinfo(info: *mut ::sysinfo) -> ::c_int; pub fn umount2(target: *const ::c_char, flags: ::c_int) -> ::c_int; pub fn pthread_setschedparam( native: ::pthread_t, policy: ::c_int, param: *const ::sched_param, ) -> ::c_int; pub fn swapon(path: *const ::c_char, swapflags: ::c_int) -> ::c_int; pub fn sched_setscheduler( pid: ::pid_t, policy: ::c_int, param: *const ::sched_param, ) -> ::c_int; pub fn sendfile( out_fd: ::c_int, in_fd: ::c_int, offset: *mut off_t, count: ::size_t, ) -> ::ssize_t; pub fn setfsgid(gid: ::gid_t) -> ::c_int; pub fn setfsuid(uid: ::uid_t) -> ::c_int; pub fn sigsuspend(mask: *const ::sigset_t) -> ::c_int; #[cfg_attr(target_os = "solaris", link_name = "__posix_getgrgid_r")] pub fn getgrgid_r( gid: ::gid_t, grp: *mut ::group, buf: *mut ::c_char, buflen: ::size_t, result: *mut *mut ::group, ) -> ::c_int; #[cfg_attr( all(target_os = "macos", target_arch = "x86"), link_name = "sigaltstack$UNIX2003" )] #[cfg_attr(target_os = "netbsd", link_name = "__sigaltstack14")] pub fn sigaltstack(ss: *const stack_t, oss: *mut stack_t) -> ::c_int; pub fn sem_close(sem: *mut sem_t) -> ::c_int; #[cfg_attr(target_os = "solaris", link_name = "__posix_getgrnam_r")] pub fn getgrnam_r( name: *const ::c_char, grp: *mut ::group, buf: *mut ::c_char, buflen: ::size_t, result: *mut *mut ::group, ) -> ::c_int; #[cfg_attr( all(target_os = "macos", target_arch = "x86"), link_name = "pthread_sigmask$UNIX2003" )] pub fn pthread_sigmask( how: ::c_int, set: *const sigset_t, oldset: *mut sigset_t, ) -> ::c_int; pub fn sem_open(name: *const ::c_char, oflag: ::c_int, ...) -> *mut sem_t; pub fn getgrnam(name: *const ::c_char) -> *mut ::group; pub fn pthread_kill(thread: ::pthread_t, sig: ::c_int) -> ::c_int; pub fn sem_unlink(name: *const ::c_char) -> ::c_int; pub fn daemon(nochdir: ::c_int, noclose: ::c_int) -> ::c_int; #[cfg_attr(target_os = "netbsd", link_name = "__getpwnam_r50")] #[cfg_attr(target_os = "solaris", link_name = "__posix_getpwnam_r")] pub fn getpwnam_r( name: *const ::c_char, pwd: *mut passwd, buf: *mut ::c_char, buflen: ::size_t, result: *mut *mut passwd, ) -> ::c_int; #[cfg_attr(target_os = "netbsd", link_name = "__getpwuid_r50")] #[cfg_attr(target_os = "solaris", link_name = "__posix_getpwuid_r")] pub fn getpwuid_r( uid: ::uid_t, pwd: *mut passwd, buf: *mut ::c_char, buflen: ::size_t, result: *mut *mut passwd, ) -> ::c_int; #[cfg_attr( all(target_os = "macos", target_arch = "x86"), link_name = "sigwait$UNIX2003" )] #[cfg_attr(target_os = "solaris", link_name = "__posix_sigwait")] pub fn sigwait(set: *const sigset_t, sig: *mut ::c_int) -> ::c_int; pub fn pthread_atfork( prepare: ::Option<unsafe extern "C" fn()>, parent: ::Option<unsafe extern "C" fn()>, child: ::Option<unsafe extern "C" fn()>, ) -> ::c_int; pub fn getgrgid(gid: ::gid_t) -> *mut ::group; pub fn getgrouplist( user: *const ::c_char, group: ::gid_t, groups: *mut ::gid_t, ngroups: *mut ::c_int, ) -> ::c_int; pub fn initgroups(user: *const ::c_char, group: ::gid_t) -> ::c_int; pub fn pthread_mutexattr_getpshared( attr: *const pthread_mutexattr_t, pshared: *mut ::c_int, ) -> ::c_int; #[cfg_attr( all(target_os = "macos", target_arch = "x86"), link_name = "popen$UNIX2003" )] pub fn popen(command: *const c_char, mode: *const c_char) -> *mut ::FILE; pub fn faccessat( dirfd: ::c_int, pathname: *const ::c_char, mode: ::c_int, flags: ::c_int, ) -> ::c_int; pub fn pthread_create( native: *mut ::pthread_t, attr: *const ::pthread_attr_t, f: extern "C" fn(*mut ::c_void) -> *mut ::c_void, value: *mut ::c_void, ) -> ::c_int; pub fn __errno() -> *mut ::c_int; pub fn inotify_rm_watch(fd: ::c_int, wd: u32) -> ::c_int; pub fn sendmmsg( sockfd: ::c_int, msgvec: *const ::mmsghdr, vlen: ::c_uint, flags: ::c_int, ) -> ::c_int; pub fn recvmmsg( sockfd: ::c_int, msgvec: *mut ::mmsghdr, vlen: ::c_uint, flags: ::c_int, timeout: *const ::timespec, ) -> ::c_int; pub fn inotify_init() -> ::c_int; pub fn inotify_init1(flags: ::c_int) -> ::c_int; pub fn inotify_add_watch( fd: ::c_int, path: *const ::c_char, mask: u32, ) -> ::c_int; } cfg_if! { if #[cfg(target_pointer_width = "32")] { mod b32; pub use self::b32::*; } else if #[cfg(target_pointer_width = "64")] { mod b64; pub use self::b64::*; } else { // Unknown target_pointer_width } } impl siginfo_t { pub unsafe fn si_value(&self) -> ::sigval { #[repr(C)] struct siginfo_timer { _si_signo: ::c_int, _si_errno: ::c_int, _si_code: ::c_int, _si_tid: ::c_int, _si_overrun: ::c_int, si_sigval: ::sigval, } (*(self as *const siginfo_t as *const siginfo_timer)).si_sigval } }
34.540619
78
0.641719
dbb0fa524ef5180d1489dc9e3663751a6b309c2f
779
// pretty-expanded FIXME #23616 // run-pass #![allow(dead_code)] #![feature(box_syntax)] // Tests for a previous bug that occurred due to an interaction // between struct field initialization and the auto-coercion // from a vector to a slice. The drop glue was being invoked on // the temporary slice with a wrong type, triggering an LLVM assert. struct Thing1<'a> { baz: &'a [Box<isize>], bar: Box<u64>, } struct Thing2<'a> { baz: &'a [Box<isize>], bar: u64, } pub fn main() { let _t1_fixed = Thing1 { baz: &[], bar: box 32, }; Thing1 { baz: &Vec::new(), bar: box 32, }; let _t2_fixed = Thing2 { baz: &[], bar: 32, }; Thing2 { baz: &Vec::new(), bar: 32, }; }
18.547619
68
0.557125
ac03c085b7b6cc6c3d03bbe8f742daa7677f58a2
1,092
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(rustc_attrs)] #[rustc_object_lifetime_default] struct A<T>(T); //~ ERROR None #[rustc_object_lifetime_default] struct B<'a,T>(&'a (), T); //~ ERROR None #[rustc_object_lifetime_default] struct C<'a,T:'a>(&'a T); //~ ERROR 'a #[rustc_object_lifetime_default] struct D<'a,'b,T:'a+'b>(&'a T, &'b T); //~ ERROR Ambiguous #[rustc_object_lifetime_default] struct E<'a,'b:'a,T:'b>(&'a T, &'b T); //~ ERROR 'b #[rustc_object_lifetime_default] struct F<'a,'b,T:'a,U:'b>(&'a T, &'b U); //~ ERROR 'a,'b #[rustc_object_lifetime_default] struct G<'a,'b,T:'a,U:'a+'b>(&'a T, &'b U); //~ ERROR 'a,Ambiguous fn main() { }
31.2
68
0.677656
62996bf4cbef20cbabd6ac505afbde0533ad811b
10,944
//! Candidate selection. See the [rustc dev guide] for more information on how this works. //! //! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/resolution.html#selection use self::EvaluationResult::*; use super::{SelectionError, SelectionResult}; use crate::ty; use rustc_hir::def_id::DefId; use rustc_query_system::cache::Cache; pub type SelectionCache<'tcx> = Cache< ty::ConstnessAnd<ty::ParamEnvAnd<'tcx, ty::TraitRef<'tcx>>>, SelectionResult<'tcx, SelectionCandidate<'tcx>>, >; pub type EvaluationCache<'tcx> = Cache<ty::ParamEnvAnd<'tcx, ty::ConstnessAnd<ty::PolyTraitRef<'tcx>>>, EvaluationResult>; /// The selection process begins by considering all impls, where /// clauses, and so forth that might resolve an obligation. Sometimes /// we'll be able to say definitively that (e.g.) an impl does not /// apply to the obligation: perhaps it is defined for `usize` but the /// obligation is for `i32`. In that case, we drop the impl out of the /// list. But the other cases are considered *candidates*. /// /// For selection to succeed, there must be exactly one matching /// candidate. If the obligation is fully known, this is guaranteed /// by coherence. However, if the obligation contains type parameters /// or variables, there may be multiple such impls. /// /// It is not a real problem if multiple matching impls exist because /// of type variables - it just means the obligation isn't sufficiently /// elaborated. In that case we report an ambiguity, and the caller can /// try again after more type information has been gathered or report a /// "type annotations needed" error. /// /// However, with type parameters, this can be a real problem - type /// parameters don't unify with regular types, but they *can* unify /// with variables from blanket impls, and (unless we know its bounds /// will always be satisfied) picking the blanket impl will be wrong /// for at least *some* substitutions. To make this concrete, if we have /// /// ```rust, ignore /// trait AsDebug { type Out: fmt::Debug; fn debug(self) -> Self::Out; } /// impl<T: fmt::Debug> AsDebug for T { /// type Out = T; /// fn debug(self) -> fmt::Debug { self } /// } /// fn foo<T: AsDebug>(t: T) { println!("{:?}", <T as AsDebug>::debug(t)); } /// ``` /// /// we can't just use the impl to resolve the `<T as AsDebug>` obligation /// -- a type from another crate (that doesn't implement `fmt::Debug`) could /// implement `AsDebug`. /// /// Because where-clauses match the type exactly, multiple clauses can /// only match if there are unresolved variables, and we can mostly just /// report this ambiguity in that case. This is still a problem - we can't /// *do anything* with ambiguities that involve only regions. This is issue /// #21974. /// /// If a single where-clause matches and there are no inference /// variables left, then it definitely matches and we can just select /// it. /// /// In fact, we even select the where-clause when the obligation contains /// inference variables. The can lead to inference making "leaps of logic", /// for example in this situation: /// /// ```rust, ignore /// pub trait Foo<T> { fn foo(&self) -> T; } /// impl<T> Foo<()> for T { fn foo(&self) { } } /// impl Foo<bool> for bool { fn foo(&self) -> bool { *self } } /// /// pub fn foo<T>(t: T) where T: Foo<bool> { /// println!("{:?}", <T as Foo<_>>::foo(&t)); /// } /// fn main() { foo(false); } /// ``` /// /// Here the obligation `<T as Foo<$0>>` can be matched by both the blanket /// impl and the where-clause. We select the where-clause and unify `$0=bool`, /// so the program prints "false". However, if the where-clause is omitted, /// the blanket impl is selected, we unify `$0=()`, and the program prints /// "()". /// /// Exactly the same issues apply to projection and object candidates, except /// that we can have both a projection candidate and a where-clause candidate /// for the same obligation. In that case either would do (except that /// different "leaps of logic" would occur if inference variables are /// present), and we just pick the where-clause. This is, for example, /// required for associated types to work in default impls, as the bounds /// are visible both as projection bounds and as where-clauses from the /// parameter environment. #[derive(PartialEq, Eq, Debug, Clone, TypeFoldable)] pub enum SelectionCandidate<'tcx> { BuiltinCandidate { /// `false` if there are no *further* obligations. has_nested: bool, }, ParamCandidate(ty::ConstnessAnd<ty::PolyTraitRef<'tcx>>), ImplCandidate(DefId), AutoImplCandidate(DefId), /// This is a trait matching with a projected type as `Self`, and we found /// an applicable bound in the trait definition. The `usize` is an index /// into the list returned by `tcx.item_bounds`. ProjectionCandidate(usize), /// Implementation of a `Fn`-family trait by one of the anonymous types /// generated for an `||` expression. ClosureCandidate, /// Implementation of a `Generator` trait by one of the anonymous types /// generated for a generator. GeneratorCandidate, /// Implementation of a `Fn`-family trait by one of the anonymous /// types generated for a fn pointer type (e.g., `fn(int) -> int`) FnPointerCandidate, /// Builtin implementation of `DiscriminantKind`. DiscriminantKindCandidate, /// Builtin implementation of `Pointee`. PointeeCandidate, TraitAliasCandidate(DefId), /// Matching `dyn Trait` with a supertrait of `Trait`. The index is the /// position in the iterator returned by /// `rustc_infer::traits::util::supertraits`. ObjectCandidate(usize), /// Perform trait upcasting coercion of `dyn Trait` to a supertrait of `Trait`. /// The index is the position in the iterator returned by /// `rustc_infer::traits::util::supertraits`. TraitUpcastingUnsizeCandidate(usize), BuiltinObjectCandidate, BuiltinUnsizeCandidate, } /// The result of trait evaluation. The order is important /// here as the evaluation of a list is the maximum of the /// evaluations. /// /// The evaluation results are ordered: /// - `EvaluatedToOk` implies `EvaluatedToOkModuloRegions` /// implies `EvaluatedToAmbig` implies `EvaluatedToUnknown` /// - `EvaluatedToErr` implies `EvaluatedToRecur` /// - the "union" of evaluation results is equal to their maximum - /// all the "potential success" candidates can potentially succeed, /// so they are noops when unioned with a definite error, and within /// the categories it's easy to see that the unions are correct. #[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq, HashStable)] pub enum EvaluationResult { /// Evaluation successful. EvaluatedToOk, /// Evaluation successful, but there were unevaluated region obligations. EvaluatedToOkModuloRegions, /// Evaluation is known to be ambiguous -- it *might* hold for some /// assignment of inference variables, but it might not. /// /// While this has the same meaning as `EvaluatedToUnknown` -- we can't /// know whether this obligation holds or not -- it is the result we /// would get with an empty stack, and therefore is cacheable. EvaluatedToAmbig, /// Evaluation failed because of recursion involving inference /// variables. We are somewhat imprecise there, so we don't actually /// know the real result. /// /// This can't be trivially cached for the same reason as `EvaluatedToRecur`. EvaluatedToUnknown, /// Evaluation failed because we encountered an obligation we are already /// trying to prove on this branch. /// /// We know this branch can't be a part of a minimal proof-tree for /// the "root" of our cycle, because then we could cut out the recursion /// and maintain a valid proof tree. However, this does not mean /// that all the obligations on this branch do not hold -- it's possible /// that we entered this branch "speculatively", and that there /// might be some other way to prove this obligation that does not /// go through this cycle -- so we can't cache this as a failure. /// /// For example, suppose we have this: /// /// ```rust,ignore (pseudo-Rust) /// pub trait Trait { fn xyz(); } /// // This impl is "useless", but we can still have /// // an `impl Trait for SomeUnsizedType` somewhere. /// impl<T: Trait + Sized> Trait for T { fn xyz() {} } /// /// pub fn foo<T: Trait + ?Sized>() { /// <T as Trait>::xyz(); /// } /// ``` /// /// When checking `foo`, we have to prove `T: Trait`. This basically /// translates into this: /// /// ```plain,ignore /// (T: Trait + Sized →_\impl T: Trait), T: Trait ⊢ T: Trait /// ``` /// /// When we try to prove it, we first go the first option, which /// recurses. This shows us that the impl is "useless" -- it won't /// tell us that `T: Trait` unless it already implemented `Trait` /// by some other means. However, that does not prevent `T: Trait` /// does not hold, because of the bound (which can indeed be satisfied /// by `SomeUnsizedType` from another crate). // // FIXME: when an `EvaluatedToRecur` goes past its parent root, we // ought to convert it to an `EvaluatedToErr`, because we know // there definitely isn't a proof tree for that obligation. Not // doing so is still sound -- there isn't any proof tree, so the // branch still can't be a part of a minimal one -- but does not re-enable caching. EvaluatedToRecur, /// Evaluation failed. EvaluatedToErr, } impl EvaluationResult { /// Returns `true` if this evaluation result is known to apply, even /// considering outlives constraints. pub fn must_apply_considering_regions(self) -> bool { self == EvaluatedToOk } /// Returns `true` if this evaluation result is known to apply, ignoring /// outlives constraints. pub fn must_apply_modulo_regions(self) -> bool { self <= EvaluatedToOkModuloRegions } pub fn may_apply(self) -> bool { match self { EvaluatedToOk | EvaluatedToOkModuloRegions | EvaluatedToAmbig | EvaluatedToUnknown => { true } EvaluatedToErr | EvaluatedToRecur => false, } } pub fn is_stack_dependent(self) -> bool { match self { EvaluatedToUnknown | EvaluatedToRecur => true, EvaluatedToOk | EvaluatedToOkModuloRegions | EvaluatedToAmbig | EvaluatedToErr => false, } } } /// Indicates that trait evaluation caused overflow. #[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable)] pub struct OverflowError; impl<'tcx> From<OverflowError> for SelectionError<'tcx> { fn from(OverflowError: OverflowError) -> SelectionError<'tcx> { SelectionError::Overflow } }
40.835821
100
0.673611
d5461bd907a217b6bf449bdfa97bbe97639eaf7b
3,356
use std::pin::Pin; use std::task::{Context, Poll}; use pin_project_lite::pin_project; use crate::actor::Actor; use crate::fut::{ActorFuture, ActorStream, IntoActorFuture}; pin_project! { /// A future used to collect all the results of a stream into one generic type. /// /// This future is returned by the `ActorStream::fold` method. #[derive(Debug)] #[must_use = "streams do nothing unless polled"] pub struct StreamFold<S, F, Fut, T> where Fut: IntoActorFuture, { #[pin] stream: S, f: F, #[pin] state: State<T, Fut::Future>, } } pin_project! { #[project = FoldStateProj] #[derive(Debug)] enum State<T, F> where F: ActorFuture, { /// Placeholder state when doing work Empty, /// Ready to process the next stream item; current accumulator is the `T` Ready { res: Option<T> }, /// Working on a future the process the previous stream item Processing { #[pin] fut: F }, } } pub fn new<S, F, Fut, T>(stream: S, f: F, t: T) -> StreamFold<S, F, Fut, T> where S: ActorStream, F: FnMut(T, S::Item, &mut S::Actor, &mut <S::Actor as Actor>::Context) -> Fut, Fut: IntoActorFuture<Output = T, Actor = S::Actor>, { StreamFold { stream, f, state: State::Ready { res: Some(t) }, } } impl<S, F, Fut, T> ActorFuture for StreamFold<S, F, Fut, T> where S: ActorStream, F: FnMut(T, S::Item, &mut S::Actor, &mut <S::Actor as Actor>::Context) -> Fut, Fut: IntoActorFuture<Output = T, Actor = S::Actor>, Fut::Future: ActorFuture, { type Output = T; type Actor = S::Actor; fn poll( mut self: Pin<&mut Self>, act: &mut S::Actor, ctx: &mut <S::Actor as Actor>::Context, task: &mut Context<'_>, ) -> Poll<T> { loop { let this = self.as_mut().project(); match this.state.project() { FoldStateProj::Ready { res } => { match this.stream.poll_next(act, ctx, task) { Poll::Ready(Some(e)) => { let future = (this.f)(res.take().unwrap(), e, act, ctx); let fut = future.into_future(); self.as_mut().project().state.set(State::Processing { fut }); } Poll::Ready(None) => { return { let res = res.take().unwrap(); self.project().state.set(State::Empty); Poll::Ready(res) } } Poll::Pending => return Poll::Pending, } } FoldStateProj::Processing { fut } => match fut.poll(act, ctx, task) { Poll::Ready(state) => self .as_mut() .project() .state .set(State::Ready { res: Some(state) }), Poll::Pending => return Poll::Pending, }, FoldStateProj::Empty => panic!("cannot poll Fold twice"), } } } }
29.964286
89
0.466627
2fb7c2c75c0d4cb7cb912ad7ed9bfc80aec84834
324
extern crate diesel; use diesel::expression::AsExpression; #[derive(AsExpression)] #[diesel(sql_type)] struct Lol; #[derive(AsExpression)] #[diesel(sql_type(Foo))] struct Lol2; #[derive(AsExpression)] #[diesel(sql_type = "foo")] struct Lol3; #[derive(AsExpression)] #[diesel(sql_type = 1omg)] struct Lol4; fn main() {}
15.428571
37
0.712963
9c3d1d04d0faf5a7f99802f67b9607edd3dd49c9
4,991
// Copyright 2018 The HuggingFace Inc. team. // Copyright 2019 Guillaume Becquin // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::preprocessing::error::TokenizerError; use std::collections::HashMap; use std::fs::File; use std::io::{BufRead, BufReader}; use std::mem::ManuallyDrop; use std::ptr; #[derive(Eq, PartialEq, Hash, Clone, Debug)] pub struct BpePairRef<'a> { pub byte_1: &'a String, pub byte_2: &'a String, } #[derive(Debug)] pub struct BpePairVocab { pub values: HashMap<(String, String), i64>, } impl BpePairVocab { pub fn from_file(path: &str) -> Result<BpePairVocab, TokenizerError> { let f = File::open(path).map_err(|e| { TokenizerError::FileNotFound(format!("{} vocabulary file not found :{}", path, e)) })?; let br = BufReader::new(f); let mut data = HashMap::new(); let mut index = 0; for line in br.lines().skip(1) { let line = match line { Ok(value) => value, Err(e) => { return Err(TokenizerError::VocabularyParsingError(e.to_string())); } }; let tuple: Vec<String> = line.trim().split(' ').map(|v| v.to_owned()).collect(); if tuple.len() > 1 { data.insert((tuple[0].clone(), tuple[1].clone()), index); index += 1; } } Ok(BpePairVocab { values: data }) } pub fn byte_pair_to_id(&self, byte_pair: &BpePairRef) -> Option<&i64> { unsafe { let byte_1 = byte_pair.byte_1; let byte_2 = byte_pair.byte_2; let k = (ptr::read(byte_1), ptr::read(byte_2)); let k = ManuallyDrop::new(k); let v = self.values.get(&k); v } } } //============================== // Unit tests //============================== #[cfg(test)] mod tests { extern crate anyhow; use super::*; use std::io::Write; #[test] fn test_create_pair_vocab() { // Given let values: HashMap<(String, String), i64> = HashMap::new(); // When let pair_vocab = BpePairVocab { values: values.clone(), }; // Then assert_eq!(pair_vocab.values, values); } #[test] fn test_create_pair_vocab_from_file() -> anyhow::Result<()> { // Given let mut merges_file = tempfile::NamedTempFile::new()?; write!(merges_file, "#version: 0.1\n t h\na n\ni n\nth e</w>")?; let path = merges_file.into_temp_path(); let target_values: HashMap<(String, String), i64> = [ (("t".to_owned(), "h".to_owned()), 0), (("a".to_owned(), "n".to_owned()), 1), (("i".to_owned(), "n".to_owned()), 2), (("th".to_owned(), "e</w>".to_owned()), 3), ] .iter() .cloned() .collect(); // When let pair_vocab = BpePairVocab::from_file(path.to_path_buf().to_str().unwrap())?; // Then assert_eq!(pair_vocab.values, target_values); drop(path); Ok(()) } #[test] fn test_encode_byte_pairs() -> anyhow::Result<()> { // Given let mut merges_file = tempfile::NamedTempFile::new()?; write!(merges_file, "#version: 0.1\n t h\na n\ni n\nth e</w>")?; let path = merges_file.into_temp_path(); let pair_vocab = BpePairVocab::from_file(path.to_path_buf().to_str().unwrap())?; // Given let t = String::from("t"); let h = String::from("h"); let a = String::from("a"); let i = String::from("i"); let n = String::from("n"); let th = String::from("th"); let e_eow = String::from("e</w>"); let test_tuples = [ ((t.clone(), h.clone()), Some(&(0 as i64))), ((a.clone(), n.clone()), Some(&(1 as i64))), ((i.clone(), n.clone()), Some(&(2 as i64))), ((th.clone(), e_eow.clone()), Some(&(3 as i64))), ((a.clone(), e_eow.clone()), None), ]; // When & Then for (input, expected_output) in &test_tuples { assert_eq!( pair_vocab.byte_pair_to_id(&BpePairRef { byte_1: &input.0, byte_2: &input.1 }), *expected_output ); } drop(path); Ok(()) } }
31.99359
94
0.521138
f519c710a85f55140bef52308f17db424997ce0a
1,389
//! [POST /_matrix/media/r0/upload](https://matrix.org/docs/spec/client_server/r0.6.0#post-matrix-media-r0-upload) use ruma_api::ruma_api; ruma_api! { metadata: { description: "Upload content to the media store.", method: POST, name: "create_media_content", path: "/_matrix/media/r0/upload", rate_limited: true, authentication: AccessToken, } request: { /// The name of the file being uploaded. #[ruma_api(query)] #[serde(skip_serializing_if = "Option::is_none")] pub filename: Option<&'a str>, /// The content type of the file being uploaded. // TODO: This should be optional. #[ruma_api(header = CONTENT_TYPE)] pub content_type: &'a str, /// The file contents to upload. #[ruma_api(raw_body)] pub file: Vec<u8>, } response: { /// The MXC URI for the uploaded content. pub content_uri: String, } error: crate::Error } impl<'a> Request<'a> { /// Creates a new `Request` with the given content type and file contents. pub fn new(content_type: &'a str, file: Vec<u8>) -> Self { Self { filename: None, content_type, file } } } impl Response { /// Creates a new `Response` with the given MXC URI. pub fn new(content_uri: String) -> Self { Self { content_uri } } }
26.711538
114
0.594672