hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
1e4181d1d99e5566205213cf43f8f71f700696c0
17,724
//! HAL abstractions for USART/Serial //! //! Check the documentation of [`Usart`] for details. use core::cmp::Ordering; use core::marker; use void::ResultVoidExt; use crate::port; /// Representation of a USART baudrate /// /// Precalculated parameters for configuring a certain USART baudrate. #[derive(Debug, Clone, Copy)] pub struct Baudrate<CLOCK> { /// Value of the `UBRR#` register pub ubrr: u16, /// Value of the `U2X#` bit pub u2x: bool, /// The baudrate calculation depends on the configured clock rate, thus a `CLOCK` generic /// parameter is needed. pub _clock: marker::PhantomData<CLOCK>, } impl<CLOCK: crate::clock::Clock> PartialEq for Baudrate<CLOCK> { fn eq(&self, other: &Self) -> bool { self.compare_value() == other.compare_value() } } impl<CLOCK: crate::clock::Clock> Eq for Baudrate<CLOCK> {} impl<CLOCK: crate::clock::Clock> PartialOrd for Baudrate<CLOCK> { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.compare_value().cmp(&other.compare_value())) } } impl<CLOCK: crate::clock::Clock> Ord for Baudrate<CLOCK> { fn cmp(&self, other: &Self) -> Ordering { other.compare_value().cmp(&self.compare_value()) } } impl<CLOCK: crate::clock::Clock> From<u32> for Baudrate<CLOCK> { fn from(baud: u32) -> Self { Baudrate::new(baud) } } impl<CLOCK: crate::clock::Clock> Baudrate<CLOCK> { /// Calculate parameters for a certain baudrate at a certain `CLOCK` speed. pub fn new(baud: u32) -> Baudrate<CLOCK> { let mut ubrr = (CLOCK::FREQ / 4 / baud - 1) / 2; let mut u2x = true; debug_assert!(ubrr <= u16::MAX as u32); if ubrr > 4095 { u2x = false; ubrr = (CLOCK::FREQ / 8 / baud - 1) / 2; } Baudrate { ubrr: ubrr as u16, u2x, _clock: ::core::marker::PhantomData, } } /// Construct a `Baudrate` from given `UBRR#` and `U2X#` values. /// /// This provides exact control over the resulting clock speed. pub fn with_exact(u2x: bool, ubrr: u16) -> Baudrate<CLOCK> { Baudrate { ubrr, u2x, _clock: ::core::marker::PhantomData, } } fn compare_value(&self) -> u32 { if self.u2x { return 8 * (self.ubrr as u32 + 1); } else { return 16 * (self.ubrr as u32 + 1); }; } } /// Provide a `into_baudrate()` method for integers. /// /// This extension trait allows conveniently initializing a baudrate by using /// /// ``` /// let mut serial = arduino_uno::Serial::new( /// dp.USART0, /// pins.d0, /// pins.d1.into_output(&mut pins.ddr), /// 57600.into_baudrate(), /// ); /// ``` /// /// instead of having to call [`Baudrate::new(57600)`](Baudrate::new). pub trait BaudrateExt { /// Calculate baudrate parameters from this number. fn into_baudrate<CLOCK: crate::clock::Clock>(self) -> Baudrate<CLOCK>; } impl BaudrateExt for u32 { fn into_baudrate<CLOCK: crate::clock::Clock>(self) -> Baudrate<CLOCK> { Baudrate::new(self) } } /// Same as [`BaudrateExt`] but accounts for an errata of certain Arduino boards: /// /// The affected boards where this trait should be used instead are: /// /// - Duemilanove /// - Uno /// - Mega 2560 pub trait BaudrateArduinoExt { /// Calculate baudrate parameters from this number (with Arduino errata). fn into_baudrate<CLOCK: crate::clock::Clock>(self) -> Baudrate<CLOCK>; } impl BaudrateArduinoExt for u32 { fn into_baudrate<CLOCK: crate::clock::Clock>(self) -> Baudrate<CLOCK> { let br = Baudrate::new(self); // hardcoded exception for 57600 for compatibility with the bootloader // shipped with the Duemilanove and previous boards and the firmware // on the 8U2 on the Uno and Mega 2560. // // https://github.com/arduino/ArduinoCore-avr/blob/3055c1efa3c6980c864f661e6c8cc5d5ac773af4/cores/arduino/HardwareSerial.cpp#L123-L132 if CLOCK::FREQ == 16_000_000 && br.ubrr == 34 && br.u2x { // (CLOCK::FREQ / 8 / 57600 - 1) / 2 == 16 Baudrate::with_exact(false, 16) } else { br } } } /// Events/Interrupts for USART peripherals #[repr(u8)] pub enum Event { /// A complete byte was received. /// /// Corresponds to the `USART_RX` or `USART#_RX` interrupt. Please refer to the datasheet for /// your MCU for details. RxComplete, /// All data from the USART data register was transmitted. /// /// Corresponds to the `USART_UDRE` or `USART#_UDRE` interrupt. Please refer to the datasheet /// for your MCU for details. DataRegisterEmpty, } /// Internal trait for low-level USART peripherals. /// /// This trait defines the common interface for all USART peripheral variants. It is used as an /// intermediate abstraction ontop of which the [`Usart`] API is built. **Prefer using the /// [`Usart`] API instead of this trait.** pub trait UsartOps<H, RX, TX> { /// Enable & initialize this USART peripheral to the given baudrate. /// /// **Warning**: This is a low-level method and should not be called directly from user code. fn raw_init<CLOCK>(&mut self, baudrate: Baudrate<CLOCK>); /// Disable this USART peripheral such that the pins can be used for other purposes again. /// /// **Warning**: This is a low-level method and should not be called directly from user code. fn raw_deinit(&mut self); /// Flush all remaining data in the TX buffer. /// /// This operation must be non-blocking and return [`nb::Error::WouldBlock`] if not all data /// was flushed yet. /// /// **Warning**: This is a low-level method and should not be called directly from user code. fn raw_flush(&mut self) -> nb::Result<(), void::Void>; /// Write a byte to the TX buffer. /// /// This operation must be non-blocking and return [`nb::Error::WouldBlock`] until the byte is /// enqueued. The operation should not wait for the byte to have actually been sent. /// /// **Warning**: This is a low-level method and should not be called directly from user code. fn raw_write(&mut self, byte: u8) -> nb::Result<(), void::Void>; /// Read a byte from the RX buffer. /// /// This operation must be non-blocking and return [`nb::Error::WouldBlock`] if no incoming /// byte is available. /// /// **Warning**: This is a low-level method and should not be called directly from user code. fn raw_read(&mut self) -> nb::Result<u8, void::Void>; /// Enable/Disable a certain interrupt. /// /// **Warning**: This is a low-level method and should not be called directly from user code. fn raw_interrupt(&mut self, event: Event, state: bool); } /// USART/Serial driver /// /// # Example /// (This example is taken from Arduino Uno) /// ``` /// let dp = arduino_uno::Peripherals::take().unwrap(); /// let mut pins = arduino_uno::Pins::new(dp.PORTB, dp.PORTC, dp.PORTD); /// let mut serial = arduino_uno::Serial::new( /// dp.USART0, /// pins.d0, /// pins.d1.into_output(&mut pins.ddr), /// 57600.into_baudrate(), /// ); /// /// ufmt::uwriteln!(&mut serial, "Hello from Arduino!\r").void_unwrap(); /// /// loop { /// let b = nb::block!(serial.read()).void_unwrap(); /// ufmt::uwriteln!(&mut serial, "Got {}!\r", b).void_unwrap(); /// } /// ``` pub struct Usart<H, USART: UsartOps<H, RX, TX>, RX, TX, CLOCK> { p: USART, rx: RX, tx: TX, _clock: marker::PhantomData<CLOCK>, _h: marker::PhantomData<H>, } impl<H, USART, RXPIN, TXPIN, CLOCK> Usart< H, USART, port::Pin<port::mode::Input, RXPIN>, port::Pin<port::mode::Output, TXPIN>, CLOCK, > where USART: UsartOps<H, port::Pin<port::mode::Input, RXPIN>, port::Pin<port::mode::Output, TXPIN>>, RXPIN: port::PinOps, TXPIN: port::PinOps, { /// Initialize a USART peripheral on the given pins. /// /// Note that the RX and TX pins are hardwired for each USART peripheral and you *must* pass /// the correct ones. This is enforced at compile time. pub fn new<IMODE: port::mode::InputMode>( p: USART, rx: port::Pin<port::mode::Input<IMODE>, RXPIN>, tx: port::Pin<port::mode::Output, TXPIN>, baudrate: Baudrate<CLOCK>, ) -> Self { let mut usart = Self { p, rx: rx.forget_imode(), tx, _clock: marker::PhantomData, _h: marker::PhantomData, }; usart.p.raw_init(baudrate); usart } } impl<H, USART: UsartOps<H, RX, TX>, RX, TX, CLOCK> Usart<H, USART, RX, TX, CLOCK> { /// Deinitialize/disable this peripheral and release the pins. pub fn release(mut self) -> (USART, RX, TX) { self.p.raw_deinit(); (self.p, self.rx, self.tx) } /// Block until all remaining data has been transmitted. pub fn flush(&mut self) { nb::block!(self.p.raw_flush()).void_unwrap() } /// Transmit a byte. /// /// This method will block until the byte has been enqueued for transmission but **not** until /// it was entirely sent. pub fn write_byte(&mut self, byte: u8) { nb::block!(self.p.raw_write(byte)).void_unwrap() } /// Receive a byte. /// /// This method will block until a byte could be received. pub fn read_byte(&mut self) -> u8 { nb::block!(self.p.raw_read()).void_unwrap() } /// Enable the interrupt for [`Event`]. pub fn listen(&mut self, event: Event) { self.p.raw_interrupt(event, true); } /// Disable the interrupt for [`Event`]. pub fn unlisten(&mut self, event: Event) { self.p.raw_interrupt(event, false); } /// Split this USART into a [`UsartReader`] and a [`UsartWriter`]. /// /// This allows concurrently receiving and transmitting data from different contexts. pub fn split( self, ) -> ( UsartReader<H, USART, RX, TX, CLOCK>, UsartWriter<H, USART, RX, TX, CLOCK>, ) { ( UsartReader { p: unsafe { core::ptr::read(&self.p) }, rx: self.rx, _tx: marker::PhantomData, _clock: marker::PhantomData, _h: marker::PhantomData, }, UsartWriter { p: self.p, tx: self.tx, _rx: marker::PhantomData, _clock: marker::PhantomData, _h: marker::PhantomData, }, ) } } impl<H, USART: UsartOps<H, RX, TX>, RX, TX, CLOCK> ufmt::uWrite for Usart<H, USART, RX, TX, CLOCK> { type Error = void::Void; fn write_str(&mut self, s: &str) -> Result<(), Self::Error> { for b in s.as_bytes().iter() { self.write_byte(*b); } Ok(()) } } impl<H, USART: UsartOps<H, RX, TX>, RX, TX, CLOCK> hal::serial::Write<u8> for Usart<H, USART, RX, TX, CLOCK> { type Error = void::Void; fn write(&mut self, byte: u8) -> nb::Result<(), Self::Error> { self.p.raw_write(byte) } fn flush(&mut self) -> nb::Result<(), Self::Error> { self.p.raw_flush() } } impl<H, USART: UsartOps<H, RX, TX>, RX, TX, CLOCK> hal::serial::Read<u8> for Usart<H, USART, RX, TX, CLOCK> { type Error = void::Void; fn read(&mut self) -> nb::Result<u8, Self::Error> { self.p.raw_read() } } /// Writer half of a [`Usart`] peripheral. /// /// Created by calling [`Usart::split`]. Splitting a peripheral into reader and writer allows /// concurrently receiving and transmitting data from different contexts. /// /// The writer half most notably implements [`embedded_hal::serial::Write`] and [`ufmt::uWrite`] /// for transmitting data. pub struct UsartWriter<H, USART: UsartOps<H, RX, TX>, RX, TX, CLOCK> { p: USART, tx: TX, _rx: marker::PhantomData<RX>, _clock: marker::PhantomData<CLOCK>, _h: marker::PhantomData<H>, } /// Reader half of a [`Usart`] peripheral. /// /// Created by calling [`Usart::split`]. Splitting a peripheral into reader and writer allows /// concurrently receiving and transmitting data from different contexts. /// /// The reader half most notably implements [`embedded_hal::serial::Read`] for receiving data. pub struct UsartReader<H, USART: UsartOps<H, RX, TX>, RX, TX, CLOCK> { p: USART, rx: RX, _tx: marker::PhantomData<TX>, _clock: marker::PhantomData<CLOCK>, _h: marker::PhantomData<H>, } impl<H, USART: UsartOps<H, RX, TX>, RX, TX, CLOCK> UsartWriter<H, USART, RX, TX, CLOCK> { /// Merge this `UsartWriter` with a [`UsartReader`] back into a single [`Usart`] peripheral. pub fn reunite( self, other: UsartReader<H, USART, RX, TX, CLOCK>, ) -> Usart<H, USART, RX, TX, CLOCK> { Usart { p: self.p, rx: other.rx, tx: self.tx, _clock: marker::PhantomData, _h: marker::PhantomData, } } } impl<H, USART: UsartOps<H, RX, TX>, RX, TX, CLOCK> UsartReader<H, USART, RX, TX, CLOCK> { /// Merge this `UsartReader` with a [`UsartWriter`] back into a single [`Usart`] peripheral. pub fn reunite( self, other: UsartWriter<H, USART, RX, TX, CLOCK>, ) -> Usart<H, USART, RX, TX, CLOCK> { Usart { p: self.p, rx: self.rx, tx: other.tx, _clock: marker::PhantomData, _h: marker::PhantomData, } } } impl<H, USART: UsartOps<H, RX, TX>, RX, TX, CLOCK> ufmt::uWrite for UsartWriter<H, USART, RX, TX, CLOCK> { type Error = void::Void; fn write_str(&mut self, s: &str) -> Result<(), Self::Error> { for b in s.as_bytes().iter() { nb::block!(self.p.raw_write(*b)).void_unwrap() } Ok(()) } } impl<H, USART: UsartOps<H, RX, TX>, RX, TX, CLOCK> hal::serial::Write<u8> for UsartWriter<H, USART, RX, TX, CLOCK> { type Error = void::Void; fn write(&mut self, byte: u8) -> nb::Result<(), Self::Error> { self.p.raw_write(byte) } fn flush(&mut self) -> nb::Result<(), Self::Error> { self.p.raw_flush() } } impl<H, USART: UsartOps<H, RX, TX>, RX, TX, CLOCK> hal::serial::Read<u8> for UsartReader<H, USART, RX, TX, CLOCK> { type Error = void::Void; fn read(&mut self) -> nb::Result<u8, Self::Error> { self.p.raw_read() } } #[macro_export] macro_rules! impl_usart_traditional { ( hal: $HAL:ty, peripheral: $USART:ty, register_suffix: $n:expr, rx: $rxpin:ty, tx: $txpin:ty, ) => { $crate::paste::paste! { impl $crate::usart::UsartOps< $HAL, $crate::port::Pin<$crate::port::mode::Input, $rxpin>, $crate::port::Pin<$crate::port::mode::Output, $txpin>, > for $USART { fn raw_init<CLOCK>(&mut self, baudrate: $crate::usart::Baudrate<CLOCK>) { self.[<ubrr $n>].write(|w| unsafe { w.bits(baudrate.ubrr) }); self.[<ucsr $n a>].write(|w| w.[<u2x $n>]().bit(baudrate.u2x)); // Enable receiver and transmitter but leave interrupts disabled. self.[<ucsr $n b>].write(|w| w .[<txen $n>]().set_bit() .[<rxen $n>]().set_bit() ); // Set frame format to 8n1 for now. At some point, this should be made // configurable, similar to what is done in other HALs. self.[<ucsr $n c>].write(|w| w .[<umsel $n>]().usart_async() .[<ucsz $n>]().chr8() .[<usbs $n>]().stop1() .[<upm $n>]().disabled() ); } fn raw_deinit(&mut self) { // Wait for any ongoing transfer to finish. $crate::nb::block!(self.raw_flush()).ok(); self.[<ucsr $n b>].reset(); } fn raw_flush(&mut self) -> $crate::nb::Result<(), $crate::void::Void> { if self.[<ucsr $n a>].read().[<udre $n>]().bit_is_clear() { Err($crate::nb::Error::WouldBlock) } else { Ok(()) } } fn raw_write(&mut self, byte: u8) -> $crate::nb::Result<(), $crate::void::Void> { // Call flush to make sure the data-register is empty self.raw_flush()?; self.[<udr $n>].write(|w| unsafe { w.bits(byte) }); Ok(()) } fn raw_read(&mut self) -> $crate::nb::Result<u8, $crate::void::Void> { if self.[<ucsr $n a>].read().[<rxc $n>]().bit_is_clear() { return Err($crate::nb::Error::WouldBlock); } Ok(self.[<udr $n>].read().bits()) } fn raw_interrupt(&mut self, event: $crate::usart::Event, state: bool) { match event { $crate::usart::Event::RxComplete => self.[<ucsr $n b>].modify(|_, w| w.[<rxcie $n>]().bit(state)), $crate::usart::Event::DataRegisterEmpty => self.[<ucsr $n b>].modify(|_, w| w.[<txcie $n>]().bit(state)), } } } } }; }
32.701107
142
0.558339
098bfee193219ef619fffb8175963bd9df8b3330
9,539
//! # Row //! //! Utilities for rows. A `Row` owns the underlying characters, the rendered string and the syntax //! highlighting information. use std::iter::repeat; use unicode_width::UnicodeWidthChar; use crate::ansi_escape::{RESET_FMT, REVERSE_VIDEO}; use crate::syntax::{Conf as SyntaxConf, HlType}; /// The "Highlight State" of the row #[derive(Clone, Copy, PartialEq)] pub enum HlState { /// Normal state. Normal, /// A multi-line comment has been open, but not yet closed. MultiLineComment, /// A string has been open with the given quote character (for instance b'\'' or b'"'), but not yet closed. String(u8), /// A multi-line string has been open, but not yet closed. MultiLineString, } impl Default for HlState { fn default() -> Self { Self::Normal } } /// Represents a row of characters and how it is rendered. #[derive(Default)] pub struct Row { /// The characters of the row. pub chars: Vec<u8>, /// How the characters are rendered. In particular, tabs are converted into several spaces, and /// bytes may be combined into single UTF-8 characters. render: String, /// Mapping from indices in `self.chars` to the corresponding indices in `self.render`. pub cx2rx: Vec<usize>, /// Mapping from indices in `self.render` to the corresponding indices in `self.chars`. pub rx2cx: Vec<usize>, /// The vector of `HLType` for each rendered character. hl: Vec<HlType>, /// The final state of the row. pub hl_state: HlState, /// If not `None`, the range that is currently matched during a FIND operation. pub match_segment: Option<std::ops::Range<usize>>, } impl Row { /// Create a new row, containing characters `chars`. pub fn new(chars: Vec<u8>) -> Self { Self { chars, cx2rx: vec![0], ..Self::default() } } // TODO: Combine update and update_syntax /// Update the row: convert tabs into spaces and compute highlight symbols /// The `hl_state` argument is the `HLState` for the previous row. pub fn update(&mut self, syntax: &SyntaxConf, hl_state: HlState, tab: usize) -> HlState { self.render.clear(); self.cx2rx.clear(); self.rx2cx.clear(); let (mut cx, mut rx) = (0, 0); for c in String::from_utf8_lossy(&self.chars).chars() { // The number of bytes used to store the character let n_bytes = c.len_utf8(); // The number of rendered characters let n_rend_chars = if c == '\t' { tab - (rx % tab) } else { c.width().unwrap_or(1) }; self.render.push_str(&(if c == '\t' { " ".repeat(n_rend_chars) } else { c.into() })); self.cx2rx.extend(std::iter::repeat(rx).take(n_bytes)); self.rx2cx.extend(std::iter::repeat(cx).take(n_rend_chars)); rx += n_rend_chars; cx += n_bytes; } self.cx2rx.push(rx); self.rx2cx.push(cx); self.update_syntax(syntax, hl_state) } /// Obtain the character size, in bytes, given its position in `self.render`. This is done in /// constant time by using the difference between `self.rx2cx[rx]` and the cx for the next /// character. pub fn get_char_size(&self, rx: usize) -> usize { let cx0 = self.rx2cx[rx]; self.rx2cx.iter().skip(rx + 1).map(|cx| cx - cx0).find(|d| *d > 0).unwrap_or(1) } /// Update the syntax highlighting types of the row. fn update_syntax(&mut self, syntax: &SyntaxConf, mut hl_state: HlState) -> HlState { self.hl.clear(); let line = self.render.as_bytes(); // Delimiters for multi-line comments and multi-line strings, as Option<&String, &String> let ml_comment_delims = syntax.ml_comment_delims.as_ref().map(|(start, end)| (start, end)); let ml_string_delims = syntax.ml_string_delim.as_ref().map(|x| (x, x)); 'syntax_loop: while self.hl.len() < line.len() { let i = self.hl.len(); let find_str = |s: &str| line.get(i..(i + s.len())).map_or(false, |r| r.eq(s.as_bytes())); if hl_state == HlState::Normal && syntax.sl_comment_start.iter().any(|s| find_str(s)) { self.hl.extend(repeat(HlType::Comment).take(line.len() - i)); continue; } // Multi-line strings and multi-line comments have the same behavior; the only // differences are: the start/end delimiters, the `HLState`, the `HLType`. for (delims, mstate, mtype) in &[ (ml_comment_delims, HlState::MultiLineComment, HlType::MlComment), (ml_string_delims, HlState::MultiLineString, HlType::MlString), ] { if let Some((start, end)) = delims { if hl_state == *mstate { if find_str(end) { // Highlight the remaining symbols of the multi line comment end self.hl.extend(repeat(mtype).take(end.len())); hl_state = HlState::Normal; } else { self.hl.push(*mtype); } continue 'syntax_loop; } else if hl_state == HlState::Normal && find_str(start) { // Highlight the remaining symbols of the multi line comment start self.hl.extend(repeat(mtype).take(start.len())); hl_state = *mstate; continue 'syntax_loop; } } } let c = line[i]; // At this point, hl_state is Normal or String if let HlState::String(quote) = hl_state { self.hl.push(HlType::String); if c == quote { hl_state = HlState::Normal; } else if c == b'\\' && i != line.len() - 1 { self.hl.push(HlType::String); } continue; } else if syntax.sl_string_quotes.contains(&(c as char)) { hl_state = HlState::String(c); self.hl.push(HlType::String); continue; } let prev_sep = (i == 0) || is_sep(line[i - 1]); if syntax.highlight_numbers && ((c.is_ascii_digit() && prev_sep) || (i != 0 && self.hl[i - 1] == HlType::Number && !prev_sep && !is_sep(c))) { self.hl.push(HlType::Number); continue; } if prev_sep { // This filters makes sure that names such as "in_comment" are not partially // highlighted (even though "in" is a keyword in rust) // The argument is the keyword that is matched at `i`. let s_filter = |kw: &str| line.get(i + kw.len()).map_or(true, |c| is_sep(*c)); for (keyword_highlight_type, kws) in &syntax.keywords { for keyword in kws.iter().filter(|kw| find_str(kw) && s_filter(kw)) { self.hl.extend(repeat(*keyword_highlight_type).take(keyword.len())); } } } self.hl.push(HlType::Normal); } // String state doesn't propagate to the next row self.hl_state = if matches!(hl_state, HlState::String(_)) { HlState::Normal } else { hl_state }; self.hl_state } /// Draw the row and write the result to a buffer. An `offset` can be given, as well as a limit /// on the length of the row (`max_len`). After writing the characters, clear the rest of the /// line and move the cursor to the start of the next line. pub fn draw(&self, offset: usize, max_len: usize, buffer: &mut String) { let mut current_hl_type = HlType::Normal; let chars = self.render.chars().skip(offset).take(max_len); let mut rx = self.render.chars().take(offset).map(|c| c.width().unwrap_or(1)).sum(); for (c, mut hl_type) in chars.zip(self.hl.iter().skip(offset)) { if c.is_ascii_control() { let rendered_char = if (c as u8) <= 26 { (b'@' + c as u8) as char } else { '?' }; buffer.push_str(&format!("{}{}{}", REVERSE_VIDEO, rendered_char, RESET_FMT,)); // Restore previous color if current_hl_type != HlType::Normal { buffer.push_str(&current_hl_type.to_string()); } } else { if let Some(match_segment) = &self.match_segment { if match_segment.contains(&rx) { // Set the highlight type to Match, i.e. set the background to cyan hl_type = &HlType::Match; } else if rx == match_segment.end { // Reset the formatting, in particular the background buffer.push_str(RESET_FMT); } } if current_hl_type != *hl_type { buffer.push_str(&hl_type.to_string()); current_hl_type = *hl_type; } buffer.push(c as char); } rx += c.width().unwrap_or(1); } buffer.push_str(RESET_FMT); } } /// Return whether `c` is an ASCII separator. fn is_sep(c: u8) -> bool { c.is_ascii_whitespace() || c == b'\0' || (c.is_ascii_punctuation() && c != b'_') }
43.557078
111
0.547437
33db190cd8217c4288afcc499d7fbe3f96f9fe99
178
use crate::ast::Expr; pub fn is_ident(expr: &Expr) -> bool { matches!(expr, Expr::Ident(_)) } pub fn is_string(expr: &Expr) -> bool { matches!(expr, Expr::String(_)) }
17.8
39
0.61236
dee34127ca46e37d0cc4f844ddd6d210c5c5cfd0
4,153
//! Search part of API implementation. // Built-in uses // External uses use actix_web::{ web::{self, Json}, Scope, }; use serde::{Deserialize, Serialize}; // Workspace uses use zksync_crypto::{convert::FeConvert, Fr}; use zksync_storage::{ConnectionPool, QueryResult}; use zksync_types::{tx::TxHash, BlockNumber}; // Local uses use super::{ blocks::BlockInfo, client::{self, Client}, Error as ApiError, JsonResult, }; /// Shared data between `api/v1/search` endpoints. #[derive(Clone)] struct ApiSearchData { pool: ConnectionPool, } impl ApiSearchData { fn new(pool: ConnectionPool) -> Self { Self { pool } } async fn search_block(&self, query: String) -> QueryResult<Option<BlockInfo>> { let mut storage = self.pool.access_storage().await?; let block = storage .chain() .block_schema() .find_block_by_height_or_hash(query) .await; Ok(block.map(BlockInfo::from)) } } // Data transfer objects. #[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] pub struct BlockSearchQuery { query: String, } // Client implementation impl From<BlockNumber> for BlockSearchQuery { /// Convert the block number into the search query. fn from(inner: BlockNumber) -> Self { Self { query: inner.to_string(), } } } impl From<Fr> for BlockSearchQuery { /// Converts the state root hash of the block into the search query. fn from(inner: Fr) -> Self { Self { query: inner.to_hex(), } } } impl From<TxHash> for BlockSearchQuery { /// Converts the commit/verify Ethereum transaction hash into the search query. fn from(inner: TxHash) -> Self { Self { // Serialize without prefix. query: hex::encode(inner), } } } /// Search API part. impl Client { /// Performs a block search with an uncertain query, which can be either of: /// /// - Hash of commit/verify Ethereum transaction for the block. /// - The state root hash of the block. /// - The number of the block. pub async fn search_block( &self, query: impl Into<BlockSearchQuery>, ) -> client::Result<Option<BlockInfo>> { self.get("search").query(&query.into()).send().await } } // Server implementation async fn block_search( data: web::Data<ApiSearchData>, web::Query(query): web::Query<BlockSearchQuery>, ) -> JsonResult<Option<BlockInfo>> { let block_info = data .search_block(query.query) .await .map_err(ApiError::internal)?; Ok(Json(block_info)) } pub fn api_scope(pool: ConnectionPool) -> Scope { let data = ApiSearchData::new(pool); web::scope("search") .data(data) .route("", web::get().to(block_search)) } #[cfg(test)] mod tests { use super::{super::test_utils::TestServerConfig, *}; #[actix_rt::test] async fn search_scope() -> anyhow::Result<()> { let cfg = TestServerConfig::default(); cfg.fill_database().await?; let (client, server) = cfg.start_server(move |cfg| api_scope(cfg.pool.clone())); // Search for the existing block by number. let block_info = client .search_block(1) .await? .expect("block should be exist"); // Search for the existing block by root hash. assert_eq!( client .search_block(block_info.new_state_root) .await? .unwrap(), block_info ); // Search for the existing block by committed tx hash. assert_eq!( client .search_block(block_info.commit_tx_hash.unwrap()) .await? .unwrap(), block_info ); // Search for the existing block by verified tx hash. assert_eq!( client .search_block(block_info.verify_tx_hash.unwrap()) .await? .unwrap(), block_info ); server.stop().await; Ok(()) } }
24.868263
88
0.586082
f5e6b90ec4c7bb1013c39001eddaaf0ff8d12f04
2,209
//! That module exports blending-related types and functions. //! //! Given two pixels *src* and *dst* – source and destination, we associate each pixel a blending //! factor – respectively, *srcK* and *dstK*. *src* is the pixel being computed, and *dst* is the //! pixel that is already stored in the framebuffer. //! //! The pixels can be blended in several ways. See the documentation of [`Equation`] for further //! details. //! //! The factors are encoded with [`Factor`]. //! //! [`Equation`]: crate::blending::Equation //! [`Factor`]: crate::blending::Factor /// Whether or not enable blending. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub(crate) enum BlendingState { /// Enable blending. On, /// Disable blending. Off, } /// Blending equation. Used to state how blending factors and pixel data should be blended. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum Equation { /// `Additive` represents the following blending equation: /// /// > `blended = src * srcK + dst * dstK` Additive, /// `Subtract` represents the following blending equation: /// /// > `blended = src * srcK - dst * dstK` Subtract, /// Because subtracting is not commutative, `ReverseSubtract` represents the following additional /// blending equation: /// /// > `blended = dst * dstK - src * srcK` ReverseSubtract, /// `Min` represents the following blending equation: /// /// > `blended = min(src, dst)` Min, /// `Max` represents the following blending equation: /// /// > `blended = max(src, dst)` Max, } /// Blending factors. Pixel data are multiplied by these factors to achieve several effects driven /// by *blending equations*. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum Factor { /// `1 * color = factor` One, /// `0 * color = 0` Zero, /// `src * color` SrcColor, /// `(1 - src) * color` SrcColorComplement, /// `dst * color` DestColor, /// `(1 - dst) * color` DestColorComplement, /// `srcA * color` SrcAlpha, /// `(1 - src) * color` SrcAlphaComplement, /// `dstA * color` DstAlpha, /// `(1 - dstA) * color` DstAlphaComplement, /// This behavior is still not well understood. Dammit. SrcAlphaSaturate, }
28.688312
99
0.648257
28d82149c5506846c28db6e2e0981032d7cf4225
27,882
use std::env; use std::ffi::{OsStr, OsString}; use std::fs::{self, File}; use std::io::{Read, Write}; use std::path::{Path, PathBuf}; use std::time::{Duration, SystemTime}; use cfg_if::cfg_if; use hyperx::header::HttpDate; use mockito::{self, mock, Matcher}; use semver::Version; use test_support::{self, ok_or_panic, paths, paths::PathExt, process::ProcessBuilder}; use volta_core::fs::{set_executable, symlink_file}; use volta_core::tool::{Node, Yarn, NODE_DISTRO_ARCH, NODE_DISTRO_EXTENSION, NODE_DISTRO_OS}; // version cache for node and yarn #[derive(PartialEq, Clone)] struct CacheBuilder { path: PathBuf, expiry_path: PathBuf, contents: String, expired: bool, } impl CacheBuilder { #[allow(dead_code)] pub fn new(path: PathBuf, expiry_path: PathBuf, contents: &str, expired: bool) -> CacheBuilder { CacheBuilder { path, expiry_path, contents: contents.to_string(), expired, } } fn build(&self) { self.dirname().mkdir_p(); // write cache file let mut cache_file = File::create(&self.path).unwrap_or_else(|e| { panic!("could not create cache file {}: {}", self.path.display(), e) }); ok_or_panic! { cache_file.write_all(self.contents.as_bytes()) }; // write expiry file let one_day = Duration::from_secs(24 * 60 * 60); let expiry_date = HttpDate::from(if self.expired { SystemTime::now() - one_day } else { SystemTime::now() + one_day }); let mut expiry_file = File::create(&self.expiry_path).unwrap_or_else(|e| { panic!( "could not create cache expiry file {}: {}", self.expiry_path.display(), e ) }); ok_or_panic! { expiry_file.write_all(expiry_date.to_string().as_bytes()) }; } fn dirname(&self) -> &Path { self.path.parent().unwrap() } } // environment variables pub struct EnvVar { name: String, value: String, } impl EnvVar { pub fn new(name: &str, value: &str) -> Self { EnvVar { name: name.to_string(), value: value.to_string(), } } } // used to construct sandboxed files like package.json, platform.json, etc. #[derive(PartialEq, Clone)] pub struct FileBuilder { path: PathBuf, contents: String, executable: bool, } impl FileBuilder { pub fn new(path: PathBuf, contents: &str) -> FileBuilder { FileBuilder { path, contents: contents.to_string(), executable: false, } } pub fn make_executable(mut self) -> Self { self.executable = true; self } pub fn build(&self) { self.dirname().mkdir_p(); let mut file = File::create(&self.path) .unwrap_or_else(|e| panic!("could not create file {}: {}", self.path.display(), e)); ok_or_panic! { file.write_all(self.contents.as_bytes()) }; if self.executable { ok_or_panic! { set_executable(&self.path) }; } } fn dirname(&self) -> &Path { self.path.parent().unwrap() } } struct ShimBuilder { name: String, } impl ShimBuilder { fn new(name: String) -> ShimBuilder { ShimBuilder { name } } fn build(&self) { ok_or_panic! { symlink_file(shim_exe(), shim_file(&self.name)) }; } } // used to setup executable binaries in installed packages pub struct PackageBinInfo { pub name: String, pub contents: String, } #[must_use] pub struct SandboxBuilder { root: Sandbox, files: Vec<FileBuilder>, caches: Vec<CacheBuilder>, path_dirs: Vec<PathBuf>, shims: Vec<ShimBuilder>, has_exec_path: bool, } pub trait DistroFixture: From<DistroMetadata> { fn server_path(&self) -> String; fn fixture_path(&self) -> String; fn metadata(&self) -> &DistroMetadata; } #[derive(Clone)] pub struct DistroMetadata { pub version: &'static str, pub compressed_size: u32, pub uncompressed_size: Option<u32>, } pub struct NodeFixture { pub metadata: DistroMetadata, } pub struct NpmFixture { pub metadata: DistroMetadata, } pub struct Yarn1Fixture { pub metadata: DistroMetadata, } pub struct YarnBerryFixture { pub metadata: DistroMetadata, } impl From<DistroMetadata> for NodeFixture { fn from(metadata: DistroMetadata) -> Self { Self { metadata } } } impl From<DistroMetadata> for NpmFixture { fn from(metadata: DistroMetadata) -> Self { Self { metadata } } } impl From<DistroMetadata> for Yarn1Fixture { fn from(metadata: DistroMetadata) -> Self { Self { metadata } } } impl From<DistroMetadata> for YarnBerryFixture { fn from(metadata: DistroMetadata) -> Self { Self { metadata } } } impl DistroFixture for NodeFixture { fn server_path(&self) -> String { let version = &self.metadata.version; format!( "/v{}/node-v{}-{}-{}.{}", version, version, NODE_DISTRO_OS, NODE_DISTRO_ARCH, NODE_DISTRO_EXTENSION ) } fn fixture_path(&self) -> String { let version = &self.metadata.version; format!( "tests/fixtures/node-v{}-{}-{}.{}", version, NODE_DISTRO_OS, NODE_DISTRO_ARCH, NODE_DISTRO_EXTENSION ) } fn metadata(&self) -> &DistroMetadata { &self.metadata } } impl DistroFixture for NpmFixture { fn server_path(&self) -> String { format!("/npm/-/npm-{}.tgz", self.metadata.version) } fn fixture_path(&self) -> String { format!("tests/fixtures/npm-{}.tgz", self.metadata.version) } fn metadata(&self) -> &DistroMetadata { &self.metadata } } impl DistroFixture for Yarn1Fixture { fn server_path(&self) -> String { format!("/yarn/-/yarn-{}.tgz", self.metadata.version) } fn fixture_path(&self) -> String { format!("tests/fixtures/yarn-{}.tgz", self.metadata.version) } fn metadata(&self) -> &DistroMetadata { &self.metadata } } impl DistroFixture for YarnBerryFixture { fn server_path(&self) -> String { format!( "/@yarnpkg/cli-dist/-/cli-dist-{}.tgz", self.metadata.version ) } fn fixture_path(&self) -> String { format!("tests/fixtures/cli-dist-{}.tgz", self.metadata.version) } fn metadata(&self) -> &DistroMetadata { &self.metadata } } impl SandboxBuilder { /// Root of the project, ex: `/path/to/cargo/target/integration_test/t0/foo` pub fn root(&self) -> PathBuf { self.root.root() } pub fn new(root: PathBuf) -> SandboxBuilder { SandboxBuilder { root: Sandbox { root, mocks: vec![], env_vars: vec![], env_vars_remove: vec![], path: OsString::new(), }, files: vec![], caches: vec![], path_dirs: vec![volta_bin_dir()], shims: vec![ ShimBuilder::new("npm".to_string()), ShimBuilder::new("yarn".to_string()), ], has_exec_path: false, } } #[allow(dead_code)] /// Set the Node cache for the sandbox (chainable) pub fn node_cache(mut self, cache: &str, expired: bool) -> Self { self.caches.push(CacheBuilder::new( node_index_file(), node_index_expiry_file(), cache, expired, )); self } /// Set the package.json for the sandbox (chainable) pub fn package_json(mut self, contents: &str) -> Self { let package_file = package_json_file(self.root()); self.files.push(FileBuilder::new(package_file, contents)); self } /// Set the platform.json for the sandbox (chainable) pub fn platform(mut self, contents: &str) -> Self { self.files .push(FileBuilder::new(default_platform_file(), contents)); self } /// Set the hooks.json for the sandbox pub fn default_hooks(mut self, contents: &str) -> Self { self.files .push(FileBuilder::new(default_hooks_file(), contents)); self } /// Set a layout version file for the sandbox (chainable) pub fn layout_file(mut self, version: &str) -> Self { self.files.push(FileBuilder::new(layout_file(version), "")); self } /// Set an environment variable for the sandbox (chainable) pub fn env(mut self, name: &str, value: &str) -> Self { self.root.env_vars.push(EnvVar::new(name, value)); self } /// Setup mock to return the available node versions (chainable) pub fn node_available_versions(mut self, body: &str) -> Self { let mock = mock("GET", "/node-dist/index.json") .with_status(200) .with_header("content-type", "application/json") .with_body(body) .create(); self.root.mocks.push(mock); self } /// Setup mock to return the available Yarn@1 versions (chainable) pub fn yarn_1_available_versions(mut self, body: &str) -> Self { let mock = mock("GET", "/yarn") .with_status(200) .with_header("content-type", "application/json") .with_body(body) .create(); self.root.mocks.push(mock); self } /// Setup mock to return the available Yarn@2+ versions (chainable) pub fn yarn_berry_available_versions(mut self, body: &str) -> Self { let mock = mock("GET", "/@yarnpkg/cli-dist") .with_status(200) .with_header("content-type", "application/json") .with_body(body) .create(); self.root.mocks.push(mock); self } /// Setup mock to return the available npm versions (chainable) pub fn npm_available_versions(mut self, body: &str) -> Self { let mock = mock("GET", "/npm") .with_status(200) .with_header("content-type", "application/json") .with_body(body) .create(); self.root.mocks.push(mock); self } /// Setup mock to return a 404 for any GET request /// Note: Mocks are matched in reverse order, so any created _after_ this will work /// While those created before will not pub fn mock_not_found(mut self) -> Self { let mock = mock("GET", Matcher::Any).with_status(404).create(); self.root.mocks.push(mock); self } fn distro_mock<T: DistroFixture>(mut self, fx: &T) -> Self { // ISSUE(#145): this should actually use a real http server instead of these mocks let server_path = fx.server_path(); let fixture_path = fx.fixture_path(); let metadata = fx.metadata(); if let Some(uncompressed_size) = metadata.uncompressed_size { // This can be abstracted when https://github.com/rust-lang/rust/issues/52963 lands. let uncompressed_size_bytes: [u8; 4] = [ ((uncompressed_size & 0xff00_0000) >> 24) as u8, ((uncompressed_size & 0x00ff_0000) >> 16) as u8, ((uncompressed_size & 0x0000_ff00) >> 8) as u8, (uncompressed_size & 0x0000_00ff) as u8, ]; let range_mock = mock("GET", &server_path[..]) .match_header("Range", Matcher::Any) .with_body(&uncompressed_size_bytes) .create(); self.root.mocks.push(range_mock); } let file_mock = mock("GET", &server_path[..]) .match_header("Range", Matcher::Missing) .with_header("Accept-Ranges", "bytes") .with_body_from_file(&fixture_path) .create(); self.root.mocks.push(file_mock); self } pub fn distro_mocks<T: DistroFixture>(self, fixtures: &[DistroMetadata]) -> Self { let mut this = self; for fixture in fixtures { this = this.distro_mock::<T>(&fixture.clone().into()); } this } /// Add an arbitrary file to the sandbox (chainable) pub fn file(mut self, path: &str, contents: &str) -> Self { let file_name = sandbox_path(path); self.files.push(FileBuilder::new(file_name, contents)); self } /// Add an arbitrary file to the test project within the sandbox (chainable) pub fn project_file(mut self, path: &str, contents: &str) -> Self { let file_name = self.root().join(path); self.files.push(FileBuilder::new(file_name, contents)); self } /// Add an arbitrary file to the test project within the sandbox, /// give it executable permissions, /// and add its directory to the PATH /// (chainable) pub fn executable_file(mut self, path: &str, contents: &str) -> Self { let file_name = self.root().join("exec").join(path); self.files .push(FileBuilder::new(file_name, contents).make_executable()); self.add_exec_dir_to_path() } /// Set a package config file for the sandbox (chainable) pub fn package_config(mut self, name: &str, contents: &str) -> Self { let package_cfg_file = package_config_file(name); self.files .push(FileBuilder::new(package_cfg_file, contents)); self } /// Set a bin config file for the sandbox (chainable) pub fn binary_config(mut self, name: &str, contents: &str) -> Self { let bin_cfg_file = binary_config_file(name); self.files.push(FileBuilder::new(bin_cfg_file, contents)); self } /// Set a shim file for the sandbox (chainable) pub fn shim(mut self, name: &str) -> Self { self.shims.push(ShimBuilder::new(name.to_string())); self } /// Set an unpackaged package for the sandbox (chainable) pub fn package_image( mut self, name: &str, version: &str, bins: Option<Vec<PackageBinInfo>>, ) -> Self { let package_img_dir = package_image_dir(name); let package_json = package_img_dir.join("package.json"); self.files.push(FileBuilder::new( package_json, &format!(r#"{{"name":"{}","version":"{}"}}"#, name, version), )); if let Some(bin_infos) = bins { for bin_info in bin_infos.iter() { cfg_if! { if #[cfg(target_os = "windows")] { let bin_path = package_img_dir.join(format!("{}.cmd", &bin_info.name)); } else { let bin_path = package_img_dir.join("bin").join(&bin_info.name); } } self.files .push(FileBuilder::new(bin_path, &bin_info.contents).make_executable()); } } self } /// Write executable project binaries into node_modules/.bin/ (chainable) pub fn project_bins(mut self, bins: Vec<PackageBinInfo>) -> Self { let project_bin_dir = self.root().join("node_modules").join(".bin"); for bin_info in bins.iter() { cfg_if! { if #[cfg(target_os = "windows")] { // in Windows, binaries have an extra file with an executable extension let win_bin_path = project_bin_dir.join(format!("{}.cmd", &bin_info.name)); self.files.push(FileBuilder::new(win_bin_path, &bin_info.contents).make_executable()); } } // Volta on both Windows and Unix checks for the existence of the binary with no extension let bin_path = project_bin_dir.join(&bin_info.name); self.files .push(FileBuilder::new(bin_path, &bin_info.contents).make_executable()); } self } /// Write '.pnp.cjs' file in local project to mark as Plug-n-Play (chainable) pub fn project_pnp(mut self) -> Self { let pnp_path = self.root().join(".pnp.cjs"); self.files.push(FileBuilder::new(pnp_path, "blegh")); self } /// Write an executable node binary with the input contents (chainable) pub fn setup_node_binary( mut self, node_version: &str, npm_version: &str, contents: &str, ) -> Self { cfg_if! { if #[cfg(target_os = "windows")] { let node_file = "node.cmd"; } else { let node_file = "node"; } } let node_bin_file = node_image_dir(node_version).join("bin").join(node_file); self.files .push(FileBuilder::new(node_bin_file, contents).make_executable()); self.node_npm_version_file(node_version, npm_version) } /// Write an executable npm binary with the input contents (chainable) pub fn setup_npm_binary(mut self, version: &str, contents: &str) -> Self { cfg_if! { if #[cfg(target_os = "windows")] { let npm_file = "npm.cmd"; } else { let npm_file = "npm"; } } let npm_bin_file = npm_image_dir(version).join("bin").join(npm_file); self.files .push(FileBuilder::new(npm_bin_file, contents).make_executable()); self } /// Write an executable yarn binary with the input contents (chainable) pub fn setup_yarn_binary(mut self, version: &str, contents: &str) -> Self { cfg_if! { if #[cfg(target_os = "windows")] { let yarn_file = "yarn.cmd"; } else { let yarn_file = "yarn"; } } let yarn_bin_file = yarn_image_dir(version).join("bin").join(yarn_file); self.files .push(FileBuilder::new(yarn_bin_file, contents).make_executable()); self } /// Write the "default npm" file for a node version (chainable) pub fn node_npm_version_file(mut self, node_version: &str, npm_version: &str) -> Self { let npm_file = node_npm_version_file(node_version); self.files.push(FileBuilder::new(npm_file, npm_version)); self } /// Add directory to the PATH (chainable) pub fn add_dir_to_path(mut self, dir: PathBuf) -> Self { self.path_dirs.push(dir); self } /// Add executable directory to the PATH (chainable) pub fn add_exec_dir_to_path(mut self) -> Self { if !self.has_exec_path { let exec_path = self.root().join("exec"); self.path_dirs.push(exec_path); self.has_exec_path = true; } self } /// Create the project pub fn build(mut self) -> Sandbox { // First, clean the directory if it already exists self.rm_root(); // Create the empty directory self.root.root().mkdir_p(); // make sure these directories exist ok_or_panic! { fs::create_dir_all(volta_bin_dir()) }; ok_or_panic! { fs::create_dir_all(node_cache_dir()) }; ok_or_panic! { fs::create_dir_all(node_inventory_dir()) }; ok_or_panic! { fs::create_dir_all(package_inventory_dir()) }; ok_or_panic! { fs::create_dir_all(yarn_inventory_dir()) }; ok_or_panic! { fs::create_dir_all(volta_tmp_dir()) }; // write node and yarn caches for cache in self.caches.iter() { cache.build(); } // write files for file_builder in self.files { file_builder.build(); } // write shims for shim_builder in self.shims { shim_builder.build(); } // join dirs for the path (volta bin path is already first) self.root.path = env::join_paths(self.path_dirs.iter()).unwrap(); let SandboxBuilder { root, .. } = self; root } fn rm_root(&self) { self.root.root().rm_rf() } } // files and dirs in the sandbox fn home_dir() -> PathBuf { paths::home() } fn volta_home() -> PathBuf { home_dir().join(".volta") } fn volta_tmp_dir() -> PathBuf { volta_home().join("tmp") } fn volta_bin_dir() -> PathBuf { volta_home().join("bin") } fn volta_log_dir() -> PathBuf { volta_home().join("log") } fn volta_postscript() -> PathBuf { volta_tmp_dir().join("volta_tmp_1234.sh") } fn volta_tools_dir() -> PathBuf { volta_home().join("tools") } fn inventory_dir() -> PathBuf { volta_tools_dir().join("inventory") } fn user_dir() -> PathBuf { volta_tools_dir().join("user") } fn image_dir() -> PathBuf { volta_tools_dir().join("image") } fn node_inventory_dir() -> PathBuf { inventory_dir().join("node") } fn yarn_inventory_dir() -> PathBuf { inventory_dir().join("yarn") } fn package_inventory_dir() -> PathBuf { inventory_dir().join("packages") } fn cache_dir() -> PathBuf { volta_home().join("cache") } fn node_cache_dir() -> PathBuf { cache_dir().join("node") } #[allow(dead_code)] fn node_index_file() -> PathBuf { node_cache_dir().join("index.json") } #[allow(dead_code)] fn node_index_expiry_file() -> PathBuf { node_cache_dir().join("index.json.expires") } fn package_json_file(mut root: PathBuf) -> PathBuf { root.push("package.json"); root } fn package_config_file(name: &str) -> PathBuf { user_dir().join("packages").join(format!("{}.json", name)) } fn binary_config_file(name: &str) -> PathBuf { user_dir().join("bins").join(format!("{}.json", name)) } fn shim_file(name: &str) -> PathBuf { volta_bin_dir().join(format!("{}{}", name, env::consts::EXE_SUFFIX)) } fn package_image_dir(name: &str) -> PathBuf { image_dir().join("packages").join(name) } fn node_image_dir(version: &str) -> PathBuf { image_dir().join("node").join(version) } fn npm_image_dir(version: &str) -> PathBuf { image_dir().join("npm").join(version) } fn yarn_image_dir(version: &str) -> PathBuf { image_dir().join("yarn").join(version) } fn default_platform_file() -> PathBuf { user_dir().join("platform.json") } fn default_hooks_file() -> PathBuf { volta_home().join("hooks.json") } fn layout_file(version: &str) -> PathBuf { volta_home().join(format!("layout.{}", version)) } fn node_npm_version_file(node_version: &str) -> PathBuf { node_inventory_dir().join(format!("node-v{}-npm", node_version)) } fn sandbox_path(path: &str) -> PathBuf { home_dir().join(path) } pub struct Sandbox { root: PathBuf, mocks: Vec<mockito::Mock>, env_vars: Vec<EnvVar>, env_vars_remove: Vec<String>, path: OsString, } impl Sandbox { /// Root of the project, ex: `/path/to/cargo/target/integration_test/t0/foo` pub fn root(&self) -> PathBuf { self.root.clone() } /// Create a `ProcessBuilder` to run a program in the project. /// Example: /// assert_that( /// p.process(&p.bin("foo")), /// execs().with_stdout("bar\n"), /// ); pub fn process<T: AsRef<OsStr>>(&self, program: T) -> ProcessBuilder { let mut p = test_support::process::process(program); p.cwd(self.root()) // sandbox the Volta environment .env("VOLTA_HOME", volta_home()) .env("VOLTA_INSTALL_DIR", cargo_dir()) .env("PATH", &self.path) .env("VOLTA_POSTSCRIPT", volta_postscript()) .env_remove("VOLTA_SHELL") .env_remove("MSYSTEM"); // assume cmd.exe everywhere on windows // overrides for env vars for env_var in &self.env_vars { p.env(&env_var.name, &env_var.value); } for env_var_name in &self.env_vars_remove { p.env_remove(env_var_name); } p } /// Create a `ProcessBuilder` to run volta. /// Arguments can be separated by spaces. /// Example: /// assert_that(p.volta("use node 9.5"), execs()); pub fn volta(&self, cmd: &str) -> ProcessBuilder { let mut p = self.process(&volta_exe()); split_and_add_args(&mut p, cmd); p } /// Create a `ProcessBuilder` to run the volta npm shim. /// Arguments can be separated by spaces. /// Example: /// assert_that(p.npm("install ember-cli"), execs()); pub fn npm(&self, cmd: &str) -> ProcessBuilder { self.exec_shim("npm", cmd) } /// Create a `ProcessBuilder` to run the volta yarn shim. /// Arguments can be separated by spaces. /// Example: /// assert_that(p.yarn("add ember-cli"), execs()); pub fn yarn(&self, cmd: &str) -> ProcessBuilder { self.exec_shim("yarn", cmd) } /// Create a `ProcessBuilder` to run an arbitrary shim. /// Arguments can be separated by spaces. /// Example: /// assert_that(p.exec_shim("cowsay", "foo bar"), execs()); pub fn exec_shim(&self, bin: &str, cmd: &str) -> ProcessBuilder { let mut p = self.process(shim_file(bin)); split_and_add_args(&mut p, cmd); p } pub fn read_package_json(&self) -> String { let package_file = package_json_file(self.root()); read_file_to_string(package_file) } pub fn read_log_dir(&self) -> Option<fs::ReadDir> { fs::read_dir(volta_log_dir()).ok() } pub fn remove_volta_home(&self) { volta_home().rm_rf(); } // check that files in the sandbox exist pub fn node_inventory_archive_exists(&self, version: &Version) -> bool { node_inventory_dir() .join(Node::archive_filename(version)) .exists() } pub fn yarn_inventory_archive_exists(&self, version: &str) -> bool { yarn_inventory_dir() .join(Yarn::archive_filename(version)) .exists() } pub fn package_config_exists(name: &str) -> bool { package_config_file(name).exists() } pub fn bin_config_exists(name: &str) -> bool { binary_config_file(name).exists() } pub fn shim_exists(name: &str) -> bool { shim_file(name).exists() } pub fn path_exists(path: &str) -> bool { sandbox_path(path).exists() } pub fn package_image_exists(name: &str) -> bool { let package_img_dir = package_image_dir(name); package_img_dir.join("package.json").exists() } pub fn read_default_platform() -> String { read_file_to_string(default_platform_file()) } } impl Drop for Sandbox { fn drop(&mut self) { paths::root().rm_rf(); } } // Generates a sandboxed environment pub fn sandbox() -> SandboxBuilder { SandboxBuilder::new(paths::root().join("sandbox")) } // Path to compiled executables pub fn cargo_dir() -> PathBuf { env::var_os("CARGO_BIN_PATH") .map(PathBuf::from) .or_else(|| { env::current_exe().ok().map(|mut path| { path.pop(); if path.ends_with("deps") { path.pop(); } path }) }) .unwrap_or_else(|| panic!("CARGO_BIN_PATH wasn't set. Cannot continue running test")) } fn volta_exe() -> PathBuf { cargo_dir().join(format!("volta{}", env::consts::EXE_SUFFIX)) } pub fn shim_exe() -> PathBuf { cargo_dir().join(format!("volta-shim{}", env::consts::EXE_SUFFIX)) } fn split_and_add_args(p: &mut ProcessBuilder, s: &str) { for arg in s.split_whitespace() { if arg.contains('"') || arg.contains('\'') { panic!("shell-style argument parsing is not supported") } p.arg(arg); } } fn read_file_to_string(file_path: PathBuf) -> String { let mut contents = String::new(); let mut file = ok_or_panic! { File::open(file_path) }; ok_or_panic! { file.read_to_string(&mut contents) }; contents }
30.012917
106
0.584858
5dcae35c8d5b46399c5d2fff0279056f2de157be
30,871
//! Wraps the object_store crate with IOx-specific semantics. The main responsibility of this crate //! is to be the single source of truth for the paths of files in object storage. There is a //! specific path type for each IOx-specific reason an object storage file exists. Content of the //! files is managed outside of this crate. #![deny(rustdoc::broken_intra_doc_links, rustdoc::bare_urls, rust_2018_idioms)] #![warn( missing_copy_implementations, missing_debug_implementations, missing_docs, clippy::explicit_iter_loop, clippy::future_not_send, clippy::use_self, clippy::clone_on_ref_ptr )] use bytes::Bytes; use data_types::server_id::ServerId; use futures::{stream::BoxStream, StreamExt, TryStreamExt}; use object_store::{path::Path, DynObjectStore, GetResult, Result}; use observability_deps::tracing::warn; use snafu::{ensure, ResultExt, Snafu}; use std::{ops::Deref, sync::Arc}; use tokio::sync::mpsc::channel; use tokio_stream::wrappers::ReceiverStream; use uuid::Uuid; mod paths; pub use paths::{ parquet_file::{ParquetFilePath, ParquetFilePathParseError}, transaction_file::TransactionFilePath, }; use paths::{DataPath, RootPath, TransactionsPath}; #[derive(Debug, Snafu)] #[allow(missing_docs)] pub enum IoxObjectStoreError { #[snafu(display("{}", source))] UnderlyingObjectStoreError { source: object_store::Error }, #[snafu(display("Cannot create database with UUID `{}`; it already exists", uuid))] DatabaseAlreadyExists { uuid: Uuid }, #[snafu(display("No rules found to load at {}", root_path))] NoRulesFound { root_path: RootPath }, } /// Handles persistence of data for a particular database. Writes within its directory/prefix. /// /// This wrapper on top of an `ObjectStore` maps IOx specific concepts to ObjectStore locations #[derive(Debug)] pub struct IoxObjectStore { inner: Arc<DynObjectStore>, root_path: RootPath, data_path: DataPath, transactions_path: TransactionsPath, } impl IoxObjectStore { /// Get the data for the server config to determine the names and locations of the databases /// that this server owns. /// /// TEMPORARY: Server config used to be at the top level instead of beneath `/nodes/`. Until /// all deployments have transitioned, check both locations before reporting that the server /// config is not found. pub async fn get_server_config_file( inner: &DynObjectStore, server_id: ServerId, ) -> Result<Bytes> { let path = paths::server_config_path(inner, server_id); let result = match inner.get(&path).await { Err(object_store::Error::NotFound { .. }) => { use object_store::path::ObjectStorePath; let mut legacy_path = inner.new_path(); legacy_path.push_dir(server_id.to_string()); legacy_path.set_file_name(paths::SERVER_CONFIG_FILE_NAME); inner.get(&legacy_path).await } other => other, }?; Ok(result.bytes().await?.into()) } /// Store the data for the server config with the names and locations of the databases /// that this server owns. pub async fn put_server_config_file( inner: &DynObjectStore, server_id: ServerId, bytes: Bytes, ) -> Result<()> { let path = paths::server_config_path(inner, server_id); inner.put(&path, bytes).await } /// Return the path to the server config file to be used in database ownership information to /// identify the current server that a database thinks is its owner. pub fn server_config_path(inner: &DynObjectStore, server_id: ServerId) -> Path { paths::server_config_path(inner, server_id) } /// Returns what the root path would be for a given database. Does not check existence or /// validity of the path in object storage. pub fn root_path_for(inner: &DynObjectStore, uuid: Uuid) -> RootPath { RootPath::new(inner, uuid) } /// Create a database-specific wrapper. Takes all the information needed to create a new /// root directory of a database. Checks that there isn't already anything in this database's /// directory in object storage. /// /// Caller *MUST* ensure there is at most 1 concurrent call of this function with the same /// parameters; this function does *NOT* do any locking. pub async fn create( inner: Arc<DynObjectStore>, uuid: Uuid, ) -> Result<Self, IoxObjectStoreError> { let root_path = Self::root_path_for(&*inner, uuid); let list_result = inner .list_with_delimiter(&root_path.inner) .await .context(UnderlyingObjectStoreSnafu)?; ensure!( list_result.objects.is_empty(), DatabaseAlreadyExistsSnafu { uuid } ); Ok(Self::existing(inner, root_path)) } /// Look in object storage for an existing, active database with this UUID. pub async fn load(inner: Arc<DynObjectStore>, uuid: Uuid) -> Result<Self, IoxObjectStoreError> { let root_path = Self::root_path_for(&*inner, uuid); Self::find(inner, root_path).await } /// Look in object storage for an existing database with this name and the given root path /// that was retrieved from a server config pub async fn load_at_root_path( inner: Arc<DynObjectStore>, root_path_str: &str, ) -> Result<Self, IoxObjectStoreError> { let root_path = RootPath::from_str(&*inner, root_path_str); Self::find(inner, root_path).await } async fn find( inner: Arc<DynObjectStore>, root_path: RootPath, ) -> Result<Self, IoxObjectStoreError> { let list_result = inner .list_with_delimiter(&root_path.inner) .await .context(UnderlyingObjectStoreSnafu)?; let rules_file = root_path.rules_path(); let rules_exists = list_result .objects .iter() .any(|object| object.location == rules_file.inner); ensure!(rules_exists, NoRulesFoundSnafu { root_path }); Ok(Self::existing(inner, root_path)) } /// Access the database-specific object storage files for an existing database that has /// already been located and verified to be active. Does not check object storage. pub fn existing(inner: Arc<DynObjectStore>, root_path: RootPath) -> Self { let data_path = root_path.data_path(); let transactions_path = root_path.transactions_path(); Self { inner, root_path, data_path, transactions_path, } } /// In the database's root directory, write out a file pointing to the server's config. This /// data can serve as an extra check on which server owns this database. pub async fn put_owner_file(&self, bytes: Bytes) -> Result<()> { let owner_path = self.root_path.owner_path(); self.inner.put(&owner_path, bytes).await } /// Return the contents of the owner file in the database's root directory that provides /// information on the server that owns this database. pub async fn get_owner_file(&self) -> Result<Bytes> { let owner_path = self.root_path.owner_path(); Ok(self.inner.get(&owner_path).await?.bytes().await?.into()) } /// Delete owner file for testing pub async fn delete_owner_file_for_testing(&self) -> Result<()> { let owner_path = self.root_path.owner_path(); self.inner.delete(&owner_path).await } /// The location in object storage for all files for this database, suitable for logging or /// debugging purposes only. Do not parse this, as its format is subject to change! pub fn debug_database_path(&self) -> String { self.root_path.inner.to_string() } /// The possibly valid location in object storage for this database. Suitable for serialization /// to use during initial database load, but not parsing for semantic meaning, as its format is /// subject to change! pub fn root_path(&self) -> String { self.root_path.to_string() } // Catalog transaction file methods =========================================================== /// List all the catalog transaction files in object storage for this database. pub async fn catalog_transaction_files( &self, ) -> Result<BoxStream<'static, Result<Vec<TransactionFilePath>>>> { Ok(self .list(Some(&self.transactions_path.inner)) .await? .map_ok(move |list| { list.into_iter() // This `flat_map` ignores any filename in the transactions_path we couldn't // parse as a TransactionFilePath .flat_map(TransactionFilePath::from_absolute) .collect() }) .boxed()) } /// Get the catalog transaction data in this relative path in this database's object store. pub async fn get_catalog_transaction_file( &self, location: &TransactionFilePath, ) -> Result<GetResult<object_store::Error>> { let full_path = self.transactions_path.join(location); self.inner.get(&full_path).await } /// Store the data for this parquet file in this database's object store. pub async fn put_catalog_transaction_file( &self, location: &TransactionFilePath, bytes: Bytes, ) -> Result<()> { let full_path = self.transactions_path.join(location); self.inner.put(&full_path, bytes).await } /// Delete all catalog transaction files for this database. pub async fn wipe_catalog(&self) -> Result<()> { let mut stream = self.catalog_transaction_files().await?; while let Some(transaction_file_list) = stream.try_next().await? { for transaction_file_path in &transaction_file_list { self.delete_catalog_transaction_file(transaction_file_path) .await?; } } Ok(()) } /// Remove the data for this catalog transaction file from this database's object store pub async fn delete_catalog_transaction_file( &self, location: &TransactionFilePath, ) -> Result<()> { let full_path = self.transactions_path.join(location); self.inner.delete(&full_path).await } // Parquet file methods ======================================================================= /// List all parquet file paths in object storage for this database. pub async fn parquet_files(&self) -> Result<BoxStream<'static, Result<Vec<ParquetFilePath>>>> { Ok(self .list(Some(&self.data_path.inner)) .await? .map_ok(move |list| { list.into_iter() // This `flat_map` ignores any filename in the data_path we couldn't parse as // a ParquetFilePath .flat_map(ParquetFilePath::from_absolute) .collect() }) .boxed()) } /// Get the parquet file data in this relative path in this database's object store. pub async fn get_parquet_file( &self, location: &ParquetFilePath, ) -> Result<GetResult<object_store::Error>> { self.inner.get(&self.full_parquet_path(location)).await } /// Store the data for this parquet file in this database's object store. pub async fn put_parquet_file(&self, location: &ParquetFilePath, bytes: Bytes) -> Result<()> { self.inner .put(&self.full_parquet_path(location), bytes) .await } /// Remove the data for this parquet file from this database's object store pub async fn delete_parquet_file(&self, location: &ParquetFilePath) -> Result<()> { self.inner.delete(&self.full_parquet_path(location)).await } fn full_parquet_path(&self, location: &ParquetFilePath) -> Path { if location.is_new_gen() { self.inner .deref() .path_from_dirs_and_filename(location.absolute_dirs_and_file_name()) } else { self.data_path.join(location) } } // Database rule file methods ================================================================= // Deliberately private; this should not leak outside this crate // so assumptions about the object store organization are confined // (and can be changed) in this crate fn db_rules_path(&self) -> Path { self.root_path.rules_path().inner } /// Get the data for the database rules pub async fn get_database_rules_file(&self) -> Result<Bytes> { let path = &self.db_rules_path(); Ok(self.inner.get(path).await?.bytes().await?.into()) } /// Return the database rules file content without creating an IoxObjectStore instance. Useful /// when restoring a database given a UUID to check existence of the specified database and /// get information such as the database name from the rules before proceeding with restoring /// and initializing the database. pub async fn load_database_rules(inner: Arc<DynObjectStore>, uuid: Uuid) -> Result<Bytes> { let root_path = Self::root_path_for(&*inner, uuid); let db_rules_path = root_path.rules_path().inner; Ok(inner.get(&db_rules_path).await?.bytes().await?.into()) } /// Store the data for the database rules pub async fn put_database_rules_file(&self, bytes: Bytes) -> Result<()> { self.inner.put(&self.db_rules_path(), bytes).await } /// Delete the data for the database rules pub async fn delete_database_rules_file(&self) -> Result<()> { self.inner.delete(&self.db_rules_path()).await } /// List the relative paths in this database's object store. /// // Deliberately private; this should not leak outside this crate // so assumptions about the object store organization are confined // (and can be changed) in this crate /// All outside calls should go to one of the more specific listing methods. async fn list(&self, prefix: Option<&Path>) -> Result<BoxStream<'static, Result<Vec<Path>>>> { let (tx, rx) = channel(4); let inner = Arc::clone(&self.inner); let prefix = prefix.cloned(); // This is necessary because of the lifetime restrictions on the ObjectStoreApi trait's // methods, which might not actually be necessary but fixing it involves changes to the // cloud_storage crate that are longer term. tokio::spawn(async move { match inner.list(prefix.as_ref()).await { Err(e) => { let _ = tx.send(Err(e)).await; } Ok(mut stream) => { while let Some(list) = stream.next().await { let _ = tx.send(list).await; } } } }); Ok(ReceiverStream::new(rx).boxed()) } } #[cfg(test)] mod tests { use super::*; use crate::paths::ALL_DATABASES_DIRECTORY; use data_types::chunk_metadata::{ChunkAddr, ChunkId}; use data_types2::{NamespaceId, PartitionId, SequencerId, TableId}; use object_store::{parsed_path, path::ObjectStorePath, ObjectStoreImpl}; use test_helpers::assert_error; use uuid::Uuid; /// Creates a new in-memory object store fn make_object_store() -> Arc<DynObjectStore> { Arc::new(ObjectStoreImpl::new_in_memory()) } async fn add_file(object_store: &DynObjectStore, location: &Path) { let data = Bytes::from("arbitrary data"); object_store.put(location, data).await.unwrap(); } async fn parquet_files(iox_object_store: &IoxObjectStore) -> Vec<ParquetFilePath> { iox_object_store .parquet_files() .await .unwrap() .try_collect::<Vec<_>>() .await .unwrap() .into_iter() .flatten() .collect() } async fn add_parquet_file(iox_object_store: &IoxObjectStore, location: &ParquetFilePath) { let data = Bytes::from("arbitrary data"); iox_object_store .put_parquet_file(location, data) .await .unwrap(); } #[tokio::test] async fn only_lists_relevant_parquet_files() { let object_store = make_object_store(); let uuid = Uuid::new_v4(); let uuid_string = uuid.to_string(); let uuid_str = uuid_string.as_str(); let iox_object_store = IoxObjectStore::create(Arc::clone(&object_store), uuid) .await .unwrap(); let parquet_uuid = Uuid::new_v4(); let good_filename = format!("111.{}.parquet", parquet_uuid); let good_filename_str = good_filename.as_str(); // Put a non-database file in let path = object_store.path_from_dirs_and_filename(parsed_path!(["foo"])); add_file(&*object_store, &path).await; // Put a file for some other server in let path = object_store.path_from_dirs_and_filename(parsed_path!(["12345"])); add_file(&*object_store, &path).await; // Put a file for some other database in let other_db_uuid = Uuid::new_v4().to_string(); let path = object_store.path_from_dirs_and_filename(parsed_path!([ ALL_DATABASES_DIRECTORY, other_db_uuid.as_str() ])); add_file(&*object_store, &path).await; // Put a file in the database dir but not the data dir let path = object_store.path_from_dirs_and_filename(parsed_path!( [ALL_DATABASES_DIRECTORY, uuid_str], good_filename_str )); add_file(&*object_store, &path).await; // Put files in the data dir whose names are in the wrong format let mut path = object_store.path_from_dirs_and_filename(parsed_path!( [ALL_DATABASES_DIRECTORY, uuid_str, "data"], "111.parquet" )); add_file(&*object_store, &path).await; path.set_file_name(&format!("111.{}.xls", parquet_uuid)); add_file(&*object_store, &path).await; // Parquet files should be empty let pf = parquet_files(&iox_object_store).await; assert!(pf.is_empty(), "{:?}", pf); // Add a real parquet file let chunk_addr = ChunkAddr { db_name: "clouds".into(), table_name: "my_table".into(), partition_key: "my_partition".into(), chunk_id: ChunkId::new_test(13), }; let p1 = ParquetFilePath::new_old_gen(&chunk_addr); add_parquet_file(&iox_object_store, &p1).await; // Only the real file should be returned let pf = parquet_files(&iox_object_store).await; assert_eq!(&pf, &[p1]); } async fn catalog_transaction_files( iox_object_store: &IoxObjectStore, ) -> Vec<TransactionFilePath> { iox_object_store .catalog_transaction_files() .await .unwrap() .try_collect::<Vec<_>>() .await .unwrap() .into_iter() .flatten() .collect() } async fn add_catalog_transaction_file( iox_object_store: &IoxObjectStore, location: &TransactionFilePath, ) { let data = Bytes::from("arbitrary data"); iox_object_store .put_catalog_transaction_file(location, data) .await .unwrap(); } #[tokio::test] async fn only_lists_relevant_catalog_transaction_files() { let object_store = make_object_store(); let uuid = Uuid::new_v4(); let uuid_string = uuid.to_string(); let uuid_str = uuid_string.as_str(); let iox_object_store = IoxObjectStore::create(Arc::clone(&object_store), uuid) .await .unwrap(); let txn_uuid = Uuid::new_v4(); let good_txn_filename = format!("{}.txn", txn_uuid); let good_txn_filename_str = good_txn_filename.as_str(); // Put a non-database file in let path = object_store.path_from_dirs_and_filename(parsed_path!(["foo"])); add_file(&*object_store, &path).await; // Put a file in a directory other than the databases directory let path = object_store.path_from_dirs_and_filename(parsed_path!(["12345"])); add_file(&*object_store, &path).await; // Put a file for some other database in let other_db_uuid = Uuid::new_v4().to_string(); let path = object_store.path_from_dirs_and_filename(parsed_path!([ ALL_DATABASES_DIRECTORY, other_db_uuid.as_str() ])); add_file(&*object_store, &path).await; // Put a file in the database dir but not the transactions dir let path = object_store.path_from_dirs_and_filename(parsed_path!( [ALL_DATABASES_DIRECTORY, uuid_str], good_txn_filename_str )); add_file(&*object_store, &path).await; // Put files in the transactions dir whose names are in the wrong format let mut path = object_store.path_from_dirs_and_filename(parsed_path!( [ALL_DATABASES_DIRECTORY, uuid_str], "111.parquet" )); add_file(&*object_store, &path).await; path.set_file_name(&format!("{}.xls", txn_uuid)); add_file(&*object_store, &path).await; // Catalog transaction files should be empty let ctf = catalog_transaction_files(&iox_object_store).await; assert!(ctf.is_empty(), "{:?}", ctf); // Add a real transaction file let t1 = TransactionFilePath::new_transaction(123, txn_uuid); add_catalog_transaction_file(&iox_object_store, &t1).await; // Add a real checkpoint file let t2 = TransactionFilePath::new_checkpoint(123, txn_uuid); add_catalog_transaction_file(&iox_object_store, &t2).await; // Only the real files should be returned let ctf = catalog_transaction_files(&iox_object_store).await; assert_eq!(ctf.len(), 2); assert!(ctf.contains(&t1)); assert!(ctf.contains(&t2)); } fn make_db_rules_path(object_store: &DynObjectStore, uuid: Uuid) -> Path { let mut p = object_store.new_path(); p.push_all_dirs(&[ALL_DATABASES_DIRECTORY, uuid.to_string().as_str()]); p.set_file_name("rules.pb"); p } #[tokio::test] async fn db_rules_should_be_a_file() { let object_store = make_object_store(); let uuid = Uuid::new_v4(); let rules_path = make_db_rules_path(&*object_store, uuid); let iox_object_store = IoxObjectStore::create(Arc::clone(&object_store), uuid) .await .unwrap(); // PUT let original_file_content = Bytes::from("hello world"); iox_object_store .put_database_rules_file(original_file_content.clone()) .await .unwrap(); let actual_content = object_store .get(&rules_path) .await .unwrap() .bytes() .await .unwrap(); assert_eq!(original_file_content, actual_content); // GET let updated_file_content = Bytes::from("goodbye moon"); let expected_content = updated_file_content.clone(); object_store .put(&rules_path, updated_file_content) .await .unwrap(); let actual_content = iox_object_store.get_database_rules_file().await.unwrap(); assert_eq!(expected_content, actual_content); // DELETE iox_object_store.delete_database_rules_file().await.unwrap(); let file_count = object_store .list(None) .await .unwrap() .try_fold(0, |a, paths| async move { Ok(a + paths.len()) }) .await .unwrap(); assert_eq!(file_count, 0); } fn make_owner_path(object_store: &DynObjectStore, uuid: Uuid) -> Path { let mut p = object_store.new_path(); p.push_all_dirs(&[ALL_DATABASES_DIRECTORY, uuid.to_string().as_str()]); p.set_file_name("owner.pb"); p } #[tokio::test] async fn owner_should_be_a_file() { let object_store = make_object_store(); let uuid = Uuid::new_v4(); let owner_path = make_owner_path(&*object_store, uuid); let iox_object_store = IoxObjectStore::create(Arc::clone(&object_store), uuid) .await .unwrap(); // PUT let original_file_content = Bytes::from("hello world"); iox_object_store .put_owner_file(original_file_content.clone()) .await .unwrap(); let actual_content = object_store .get(&owner_path) .await .unwrap() .bytes() .await .unwrap(); assert_eq!(original_file_content, actual_content); // GET let updated_file_content = Bytes::from("goodbye moon"); let expected_content = updated_file_content.clone(); object_store .put(&owner_path, updated_file_content) .await .unwrap(); let actual_content = iox_object_store.get_owner_file().await.unwrap(); assert_eq!(expected_content, actual_content); } #[tokio::test] async fn create_new_with_same_uuid_errors() { let object_store = make_object_store(); let uuid = Uuid::new_v4(); let iox_object_store = IoxObjectStore::create(Arc::clone(&object_store), uuid) .await .unwrap(); iox_object_store .put_database_rules_file(Bytes::new()) .await .unwrap(); assert_error!( IoxObjectStore::create(Arc::clone(&object_store), uuid).await, IoxObjectStoreError::DatabaseAlreadyExists { uuid: err_uuid } if err_uuid == uuid, ); } #[tokio::test] async fn create_new_with_any_files_under_uuid_errors() { let object_store = make_object_store(); let uuid = Uuid::new_v4(); let mut not_rules_path = object_store.new_path(); not_rules_path.push_all_dirs(&[ALL_DATABASES_DIRECTORY, uuid.to_string().as_str()]); not_rules_path.set_file_name("not_rules.txt"); object_store .put(&not_rules_path, Bytes::new()) .await .unwrap(); assert_error!( IoxObjectStore::create(Arc::clone(&object_store), uuid).await, IoxObjectStoreError::DatabaseAlreadyExists { uuid: err_uuid } if err_uuid == uuid, ); } async fn create_database(object_store: Arc<DynObjectStore>, uuid: Uuid) -> IoxObjectStore { let iox_object_store = IoxObjectStore::create(Arc::clone(&object_store), uuid) .await .unwrap(); iox_object_store .put_database_rules_file(Bytes::new()) .await .unwrap(); iox_object_store } #[tokio::test] async fn cant_read_rules_if_no_rules_exist() { let object_store = make_object_store(); // Create a uuid but don't create a corresponding database let db = Uuid::new_v4(); // This fails, there are no rules to read assert_error!( IoxObjectStore::load_database_rules(object_store, db).await, object_store::Error::NotFound { .. }, ); } #[tokio::test] async fn test_load() { let object_store = make_object_store(); // Load can't find nonexistent database let nonexistent = Uuid::new_v4(); assert_error!( IoxObjectStore::load(Arc::clone(&object_store), nonexistent).await, IoxObjectStoreError::NoRulesFound { .. }, ); // Create a database let db = Uuid::new_v4(); create_database(Arc::clone(&object_store), db).await; // Load should return that database let returned = IoxObjectStore::load(Arc::clone(&object_store), db) .await .unwrap(); assert_eq!( returned.root_path(), format!("{}/{}/", ALL_DATABASES_DIRECTORY, db) ); } #[tokio::test] async fn round_trip_through_object_store_root_path() { let object_store = make_object_store(); // Create a new iox object store that doesn't exist yet let uuid = Uuid::new_v4(); let db_iox_store = create_database(Arc::clone(&object_store), uuid).await; // Save its root path as the server config would let saved_root_path = db_iox_store.root_path(); // Simulate server restarting and reading the server config to construct iox object stores, // the database files in object storage should be found in the same root let restarted_iox_store = IoxObjectStore::load_at_root_path(Arc::clone(&object_store), &saved_root_path) .await .unwrap(); assert_eq!(db_iox_store.root_path(), restarted_iox_store.root_path()); // This should also equal root_path_for, which can be constructed even if a database // hasn't been fully initialized yet let alternate = IoxObjectStore::root_path_for(&*object_store, uuid).to_string(); assert_eq!(alternate, saved_root_path); } #[tokio::test] async fn test_ng_parquet_io() { let object_store = make_object_store(); let iox_object_store = Arc::new(IoxObjectStore::existing( Arc::clone(&object_store), IoxObjectStore::root_path_for(&*object_store, uuid::Uuid::new_v4()), )); let pfp = ParquetFilePath::new_new_gen( NamespaceId::new(1), TableId::new(2), SequencerId::new(3), PartitionId::new(4), Uuid::nil(), ); // file does not exist yet iox_object_store.get_parquet_file(&pfp).await.unwrap_err(); // create file let content = Bytes::from(b"foo".to_vec()); iox_object_store .put_parquet_file(&pfp, content.clone()) .await .unwrap(); let actual = iox_object_store .get_parquet_file(&pfp) .await .unwrap() .bytes() .await .unwrap(); assert_eq!(content.to_vec(), actual); // delete file iox_object_store.delete_parquet_file(&pfp).await.unwrap(); iox_object_store.get_parquet_file(&pfp).await.unwrap_err(); } }
35.896512
100
0.616954
e23053de9feb1d2e9b0bf3e73d93ce3cd268e71c
4,972
use json; use std::io; use std::process::Command; /// Hold information about a hard drive obtained from lshw. pub struct HdInfo { /// The product string of the hard drive, usually the human-readable product name. pub product: String, /// The "filesystem" name of the hard drive. pub logical_name: String, /// The size of the hard drive, hopefully in bytes. pub size: f64, /// The actual unit used for `size`. At the time of writing, /// lshw will always use bytes, however this is not guaranteed. pub units: String, /// The serial number of the drive. pub serial: String, } /// Attempts to get the serial number of the machine running the application. /// /// # Returns /// - `Err` if lshw did not run correctly, or /// - `Ok` with a blank string if lshw's output was valid but did not contain a serial number, or /// - `Ok` with the parsed serial number /// /// # Panics /// - If lshw's output cannot be turned into a string /// - If lshw's output cannot be parsed as JSON pub fn get_pc_serial() -> io::Result<String> { let output = Command::new("lshw").arg("-quiet").arg("-json").output(); if let Err(e) = output { return Err(e); } let output = String::from_utf8(output.unwrap().stdout).expect("Could not parse output of lshw"); let parsed = json::parse(&output).expect("Could not parse output of lshw"); if parsed["serial"].is_null() || !parsed["serial"].is_string() { Ok(String::from("")) } else { Ok(String::from(parsed["serial"].as_str().unwrap())) } } /// Get a list of all disks in the machine. /// /// # Returns /// - `Err` if lshw did not run correctly /// - `Ok` with an empty vector if no disks were returned by lshw /// - `Ok` with a vector containing all the disks lshw returned /// /// # Panics /// - If lshw's output cannot be turned into a string /// - If lshw's output cannot be parsed as JSON /// - If an element in the parsed output is called "children" but is not an array pub fn get_all_disks() -> io::Result<Vec<HdInfo>> { let output = Command::new("lshw").arg("-quiet").arg("-json").output(); if let Err(e) = output { return Err(e); } let output = String::from_utf8(output.unwrap().stdout).expect("Could not parse output of lshw"); let parsed = json::parse(&output).expect("Could not parse output of lshw"); let mut list = Vec::<HdInfo>::new(); if parsed["children"].is_null() { return Ok(list); } if let json::JsonValue::Array(ref children) = parsed["children"] { parse_children(children, &mut list); } else { panic!("Could not parse output of lshw: invalid value for children"); } Ok(list) } /// Parse a list of children and add any drives found to the list of drives provided. Recursive. /// /// # Arguments /// - children: A list of children /// - list: A mutable reference to the list being compiled fn parse_children(children: &[json::JsonValue], list: &mut Vec<HdInfo>) { for child in children { if !child["children"].is_null() { if let json::JsonValue::Array(ref grandchildren) = child["children"] { parse_children(grandchildren, list); } else { panic!("Could not parse output of lshw: invalid value for children"); } } if child["class"] == "disk" && child["id"] == "disk" { let product: String; let logical_name: String; let size: f64; let units: String; let serial: String; if child["product"].is_null() || !child["product"].is_string() { product = String::from(""); } else { product = String::from(child["product"].as_str().unwrap()); } if child["logicalname"].is_null() || !child["logicalname"].is_string() { logical_name = String::from(""); } else { logical_name = String::from(child["logicalname"].as_str().unwrap()); } if child["size"].is_null() || !child["size"].is_number() { size = 0_f64; } else { size = child["size"].as_f64().expect("Invalid size from lshw"); } if child["units"].is_null() || !child["units"].is_string() { units = String::from(""); } else { units = String::from(child["units"].as_str().unwrap()); } if child["serial"].is_null() || !child["serial"].is_string() { serial = String::from(""); } else { serial = String::from(child["serial"].as_str().unwrap()); } list.push(HdInfo { product: product.clone(), logical_name: logical_name.clone(), size, units: units.clone(), serial: serial.clone(), }) } } }
34.527778
100
0.569791
f4c1c3f160e84dbfbda9bd24f25b07c6e843f878
2,311
/* * Copyright (C) 2021 Aravinth Manivannan <[email protected]> * * Use of this source code is governed by the Apache 2.0 and/or the MIT * License. */ #![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)] //! # What is cache busting? //! //! To optimise network load time, browsers cache static files. Caching //! greatly improves performance but how do you inform browsers to //! invalidate cache when your files have changed? //! //! Cache busting is a simple but effective solution for this issue. There //! are several ways to achieve this but the way this library does this is //! by changing file names to include the hash of the files' contents. //! //! So if you have `bundle.js`, it will become //! `bundle.<long-sha256-hash>.js`. This lets you set a super long cache age //! as, because of the file names changing, the path to the filename, too, //! will change. So as far as the browser is concerned, you are trying to load //! a file that it doesn't have. Pretty neat, isn't it? //! //! ## Example: //! //! - `build.rs` //! ```no_run //! use cache_buster::BusterBuilder; //! //! // note: add error checking yourself. //! // println!("cargo:rustc-env=GIT_process={}", git_process); //! let types = vec![ //! mime::IMAGE_PNG, //! mime::IMAGE_SVG, //! mime::IMAGE_JPEG, //! mime::IMAGE_GIF, //! ]; //! //! let config = BusterBuilder::default() //! .source("./dist") //! .result("./prod") //! .mime_types(types) //! .follow_links(true) //! .build() //! .unwrap(); //! //! config.process().unwrap(); //! ``` //! - `main.rs`: //! //! Module describing runtime compoenet for fetching modified filenames //! //! Add the following tou your program to load the filemap during compiletime: //! //! ```no_run //! use cache_buster::Files; //! use cache_buster::CACHE_BUSTER_DATA_FILE; //! //! let files = Files::new(CACHE_BUSTER_DATA_FILE); //! // the path to the file before setting up for cache busting //! files.get("./dist/github.svg"); //! ``` pub mod processor; pub use processor::BusterBuilder; pub use processor::NoHashCategory; pub mod filemap; pub use filemap::Files; /// file to which filemap is written during compilation /// include this to `.gitignore` pub const CACHE_BUSTER_DATA_FILE: &str = "./src/cache_buster_data.json";
31.657534
78
0.674167
28dc2c902e77593ef0700a9baed019e1defe04ed
1,876
macro_rules! sep_for { ($var:ident in $iter:expr; sep $sep:block; $($rem:tt)* ) => {{ let mut first = true; for $var in $iter { if first { first = false; } else { $sep } $( $rem )* } }} } macro_rules! deref0 { (+mut $name:ident => $tp:ty) => ( deref0!{-mut $name => $tp } impl ::std::ops::DerefMut for $name { fn deref_mut( &mut self ) -> &mut Self::Target { &mut self.0 } } ); (-mut $name:ident => $tp:ty) => ( impl ::std::ops::Deref for $name { type Target = $tp; fn deref( &self ) -> &Self::Target { &self.0 } } ); } #[cfg(test)] macro_rules! assert_ok { ($val:expr) => ({ match $val { Ok( res ) => res, Err( err ) => panic!( "expected Ok(..) got Err({:?})", err) } }); ($val:expr, $ctx:expr) => ({ match $val { Ok( res ) => res, Err( err ) => panic!( "expected Ok(..) got Err({:?}) [ctx: {:?}]", err, $ctx) } }); } #[cfg(test)] macro_rules! assert_err { ($val:expr) => ({ match $val { Ok( val ) => panic!( "expected Err(..) got Ok({:?})", val), Err( err ) => err, } }); ($val:expr, $ctx:expr) => ({ match $val { Ok( val ) => panic!( "expected Err(..) got Ok({:?}) [ctx: {:?}]", val, $ctx), Err( err ) => err, } }); } #[cfg(test)] macro_rules! test { ($name:ident $code:block) => ( #[test] fn $name() { fn inner() -> Result<(), ::failure::Error> { $code; #[allow(unreachable_code)] Ok(()) } inner().unwrap(); } ); }
23.45
89
0.373134
f7a90333ef239c2098544a716472e05e29617a90
23,677
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use io::prelude::*; use core::convert::TryInto; use cmp; use io::{self, Initializer, SeekFrom, Error, ErrorKind}; /// A `Cursor` wraps an in-memory buffer and provides it with a /// [`Seek`] implementation. /// /// `Cursor`s are used with in-memory buffers, anything implementing /// `AsRef<[u8]>`, to allow them to implement [`Read`] and/or [`Write`], /// allowing these buffers to be used anywhere you might use a reader or writer /// that does actual I/O. /// /// The standard library implements some I/O traits on various types which /// are commonly used as a buffer, like `Cursor<`[`Vec`]`<u8>>` and /// `Cursor<`[`&[u8]`][bytes]`>`. /// /// # Examples /// /// We may want to write bytes to a [`File`] in our production /// code, but use an in-memory buffer in our tests. We can do this with /// `Cursor`: /// /// [`Seek`]: trait.Seek.html /// [`Read`]: ../../std/io/trait.Read.html /// [`Write`]: ../../std/io/trait.Write.html /// [`Vec`]: ../../std/vec/struct.Vec.html /// [bytes]: ../../std/primitive.slice.html /// [`File`]: ../fs/struct.File.html /// /// ```no_run /// use std::io::prelude::*; /// use std::io::{self, SeekFrom}; /// use std::fs::File; /// /// // a library function we've written /// fn write_ten_bytes_at_end<W: Write + Seek>(writer: &mut W) -> io::Result<()> { /// writer.seek(SeekFrom::End(-10))?; /// /// for i in 0..10 { /// writer.write(&[i])?; /// } /// /// // all went well /// Ok(()) /// } /// /// # fn foo() -> io::Result<()> { /// // Here's some code that uses this library function. /// // /// // We might want to use a BufReader here for efficiency, but let's /// // keep this example focused. /// let mut file = File::create("foo.txt")?; /// /// write_ten_bytes_at_end(&mut file)?; /// # Ok(()) /// # } /// /// // now let's write a test /// #[test] /// fn test_writes_bytes() { /// // setting up a real File is much slower than an in-memory buffer, /// // let's use a cursor instead /// use std::io::Cursor; /// let mut buff = Cursor::new(vec![0; 15]); /// /// write_ten_bytes_at_end(&mut buff).unwrap(); /// /// assert_eq!(&buff.get_ref()[5..15], &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[derive(Clone, Debug)] pub struct Cursor<T> { inner: T, pos: u64, } impl<T> Cursor<T> { /// Creates a new cursor wrapping the provided underlying in-memory buffer. /// /// Cursor initial position is `0` even if underlying buffer (e.g., `Vec`) /// is not empty. So writing to cursor starts with overwriting `Vec` /// content, not with appending to it. /// /// # Examples /// /// ``` /// use std::io::Cursor; /// /// let buff = Cursor::new(Vec::new()); /// # fn force_inference(_: &Cursor<Vec<u8>>) {} /// # force_inference(&buff); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn new(inner: T) -> Cursor<T> { Cursor { pos: 0, inner: inner } } /// Consumes this cursor, returning the underlying value. /// /// # Examples /// /// ``` /// use std::io::Cursor; /// /// let buff = Cursor::new(Vec::new()); /// # fn force_inference(_: &Cursor<Vec<u8>>) {} /// # force_inference(&buff); /// /// let vec = buff.into_inner(); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn into_inner(self) -> T { self.inner } /// Gets a reference to the underlying value in this cursor. /// /// # Examples /// /// ``` /// use std::io::Cursor; /// /// let buff = Cursor::new(Vec::new()); /// # fn force_inference(_: &Cursor<Vec<u8>>) {} /// # force_inference(&buff); /// /// let reference = buff.get_ref(); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn get_ref(&self) -> &T { &self.inner } /// Gets a mutable reference to the underlying value in this cursor. /// /// Care should be taken to avoid modifying the internal I/O state of the /// underlying value as it may corrupt this cursor's position. /// /// # Examples /// /// ``` /// use std::io::Cursor; /// /// let mut buff = Cursor::new(Vec::new()); /// # fn force_inference(_: &Cursor<Vec<u8>>) {} /// # force_inference(&buff); /// /// let reference = buff.get_mut(); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn get_mut(&mut self) -> &mut T { &mut self.inner } /// Returns the current position of this cursor. /// /// # Examples /// /// ``` /// use std::io::Cursor; /// use std::io::prelude::*; /// use std::io::SeekFrom; /// /// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]); /// /// assert_eq!(buff.position(), 0); /// /// buff.seek(SeekFrom::Current(2)).unwrap(); /// assert_eq!(buff.position(), 2); /// /// buff.seek(SeekFrom::Current(-1)).unwrap(); /// assert_eq!(buff.position(), 1); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn position(&self) -> u64 { self.pos } /// Sets the position of this cursor. /// /// # Examples /// /// ``` /// use std::io::Cursor; /// /// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]); /// /// assert_eq!(buff.position(), 0); /// /// buff.set_position(2); /// assert_eq!(buff.position(), 2); /// /// buff.set_position(4); /// assert_eq!(buff.position(), 4); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn set_position(&mut self, pos: u64) { self.pos = pos; } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> io::Seek for Cursor<T> where T: AsRef<[u8]> { fn seek(&mut self, style: SeekFrom) -> io::Result<u64> { let (base_pos, offset) = match style { SeekFrom::Start(n) => { self.pos = n; return Ok(n); } SeekFrom::End(n) => (self.inner.as_ref().len() as u64, n), SeekFrom::Current(n) => (self.pos, n), }; let new_pos = if offset >= 0 { base_pos.checked_add(offset as u64) } else { base_pos.checked_sub((offset.wrapping_neg()) as u64) }; match new_pos { Some(n) => {self.pos = n; Ok(self.pos)} None => Err(Error::new(ErrorKind::InvalidInput, "invalid seek to a negative or overflowing position")) } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> Read for Cursor<T> where T: AsRef<[u8]> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { let n = Read::read(&mut self.fill_buf()?, buf)?; self.pos += n as u64; Ok(n) } fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> { let n = buf.len(); Read::read_exact(&mut self.fill_buf()?, buf)?; self.pos += n as u64; Ok(()) } #[inline] unsafe fn initializer(&self) -> Initializer { Initializer::nop() } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> BufRead for Cursor<T> where T: AsRef<[u8]> { fn fill_buf(&mut self) -> io::Result<&[u8]> { let amt = cmp::min(self.pos, self.inner.as_ref().len() as u64); Ok(&self.inner.as_ref()[(amt as usize)..]) } fn consume(&mut self, amt: usize) { self.pos += amt as u64; } } // Non-resizing write implementation fn slice_write(pos_mut: &mut u64, slice: &mut [u8], buf: &[u8]) -> io::Result<usize> { let pos = cmp::min(*pos_mut, slice.len() as u64); let amt = (&mut slice[(pos as usize)..]).write(buf)?; *pos_mut += amt as u64; Ok(amt) } // Resizing write implementation fn vec_write(pos_mut: &mut u64, vec: &mut Vec<u8>, buf: &[u8]) -> io::Result<usize> { let pos: usize = (*pos_mut).try_into().map_err(|_| { Error::new(ErrorKind::InvalidInput, "cursor position exceeds maximum possible vector length") })?; // Make sure the internal buffer is as least as big as where we // currently are let len = vec.len(); if len < pos { // use `resize` so that the zero filling is as efficient as possible vec.resize(pos, 0); } // Figure out what bytes will be used to overwrite what's currently // there (left), and what will be appended on the end (right) { let space = vec.len() - pos; let (left, right) = buf.split_at(cmp::min(space, buf.len())); vec[pos..pos + left.len()].copy_from_slice(left); vec.extend_from_slice(right); } // Bump us forward *pos_mut = (pos + buf.len()) as u64; Ok(buf.len()) } #[stable(feature = "rust1", since = "1.0.0")] impl<'a> Write for Cursor<&'a mut [u8]> { #[inline] fn write(&mut self, buf: &[u8]) -> io::Result<usize> { slice_write(&mut self.pos, self.inner, buf) } fn flush(&mut self) -> io::Result<()> { Ok(()) } } #[stable(feature = "cursor_mut_vec", since = "1.25.0")] impl<'a> Write for Cursor<&'a mut Vec<u8>> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { vec_write(&mut self.pos, self.inner, buf) } fn flush(&mut self) -> io::Result<()> { Ok(()) } } #[stable(feature = "rust1", since = "1.0.0")] impl Write for Cursor<Vec<u8>> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { vec_write(&mut self.pos, &mut self.inner, buf) } fn flush(&mut self) -> io::Result<()> { Ok(()) } } #[stable(feature = "cursor_box_slice", since = "1.5.0")] impl Write for Cursor<Box<[u8]>> { #[inline] fn write(&mut self, buf: &[u8]) -> io::Result<usize> { slice_write(&mut self.pos, &mut self.inner, buf) } fn flush(&mut self) -> io::Result<()> { Ok(()) } } #[cfg(test)] mod tests { use io::prelude::*; use io::{Cursor, SeekFrom}; #[test] fn test_vec_writer() { let mut writer = Vec::new(); assert_eq!(writer.write(&[0]).unwrap(), 1); assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3); assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4); let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7]; assert_eq!(writer, b); } #[test] fn test_mem_writer() { let mut writer = Cursor::new(Vec::new()); assert_eq!(writer.write(&[0]).unwrap(), 1); assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3); assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4); let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7]; assert_eq!(&writer.get_ref()[..], b); } #[test] fn test_mem_mut_writer() { let mut vec = Vec::new(); let mut writer = Cursor::new(&mut vec); assert_eq!(writer.write(&[0]).unwrap(), 1); assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3); assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4); let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7]; assert_eq!(&writer.get_ref()[..], b); } #[test] fn test_box_slice_writer() { let mut writer = Cursor::new(vec![0u8; 9].into_boxed_slice()); assert_eq!(writer.position(), 0); assert_eq!(writer.write(&[0]).unwrap(), 1); assert_eq!(writer.position(), 1); assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3); assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4); assert_eq!(writer.position(), 8); assert_eq!(writer.write(&[]).unwrap(), 0); assert_eq!(writer.position(), 8); assert_eq!(writer.write(&[8, 9]).unwrap(), 1); assert_eq!(writer.write(&[10]).unwrap(), 0); let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8]; assert_eq!(&**writer.get_ref(), b); } #[test] fn test_buf_writer() { let mut buf = [0 as u8; 9]; { let mut writer = Cursor::new(&mut buf[..]); assert_eq!(writer.position(), 0); assert_eq!(writer.write(&[0]).unwrap(), 1); assert_eq!(writer.position(), 1); assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3); assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4); assert_eq!(writer.position(), 8); assert_eq!(writer.write(&[]).unwrap(), 0); assert_eq!(writer.position(), 8); assert_eq!(writer.write(&[8, 9]).unwrap(), 1); assert_eq!(writer.write(&[10]).unwrap(), 0); } let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8]; assert_eq!(buf, b); } #[test] fn test_buf_writer_seek() { let mut buf = [0 as u8; 8]; { let mut writer = Cursor::new(&mut buf[..]); assert_eq!(writer.position(), 0); assert_eq!(writer.write(&[1]).unwrap(), 1); assert_eq!(writer.position(), 1); assert_eq!(writer.seek(SeekFrom::Start(2)).unwrap(), 2); assert_eq!(writer.position(), 2); assert_eq!(writer.write(&[2]).unwrap(), 1); assert_eq!(writer.position(), 3); assert_eq!(writer.seek(SeekFrom::Current(-2)).unwrap(), 1); assert_eq!(writer.position(), 1); assert_eq!(writer.write(&[3]).unwrap(), 1); assert_eq!(writer.position(), 2); assert_eq!(writer.seek(SeekFrom::End(-1)).unwrap(), 7); assert_eq!(writer.position(), 7); assert_eq!(writer.write(&[4]).unwrap(), 1); assert_eq!(writer.position(), 8); } let b: &[_] = &[1, 3, 2, 0, 0, 0, 0, 4]; assert_eq!(buf, b); } #[test] fn test_buf_writer_error() { let mut buf = [0 as u8; 2]; let mut writer = Cursor::new(&mut buf[..]); assert_eq!(writer.write(&[0]).unwrap(), 1); assert_eq!(writer.write(&[0, 0]).unwrap(), 1); assert_eq!(writer.write(&[0, 0]).unwrap(), 0); } #[test] fn test_mem_reader() { let mut reader = Cursor::new(vec![0, 1, 2, 3, 4, 5, 6, 7]); let mut buf = []; assert_eq!(reader.read(&mut buf).unwrap(), 0); assert_eq!(reader.position(), 0); let mut buf = [0]; assert_eq!(reader.read(&mut buf).unwrap(), 1); assert_eq!(reader.position(), 1); let b: &[_] = &[0]; assert_eq!(buf, b); let mut buf = [0; 4]; assert_eq!(reader.read(&mut buf).unwrap(), 4); assert_eq!(reader.position(), 5); let b: &[_] = &[1, 2, 3, 4]; assert_eq!(buf, b); assert_eq!(reader.read(&mut buf).unwrap(), 3); let b: &[_] = &[5, 6, 7]; assert_eq!(&buf[..3], b); assert_eq!(reader.read(&mut buf).unwrap(), 0); } #[test] fn test_boxed_slice_reader() { let mut reader = Cursor::new(vec![0, 1, 2, 3, 4, 5, 6, 7].into_boxed_slice()); let mut buf = []; assert_eq!(reader.read(&mut buf).unwrap(), 0); assert_eq!(reader.position(), 0); let mut buf = [0]; assert_eq!(reader.read(&mut buf).unwrap(), 1); assert_eq!(reader.position(), 1); let b: &[_] = &[0]; assert_eq!(buf, b); let mut buf = [0; 4]; assert_eq!(reader.read(&mut buf).unwrap(), 4); assert_eq!(reader.position(), 5); let b: &[_] = &[1, 2, 3, 4]; assert_eq!(buf, b); assert_eq!(reader.read(&mut buf).unwrap(), 3); let b: &[_] = &[5, 6, 7]; assert_eq!(&buf[..3], b); assert_eq!(reader.read(&mut buf).unwrap(), 0); } #[test] fn read_to_end() { let mut reader = Cursor::new(vec![0, 1, 2, 3, 4, 5, 6, 7]); let mut v = Vec::new(); reader.read_to_end(&mut v).unwrap(); assert_eq!(v, [0, 1, 2, 3, 4, 5, 6, 7]); } #[test] fn test_slice_reader() { let in_buf = vec![0, 1, 2, 3, 4, 5, 6, 7]; let reader = &mut &in_buf[..]; let mut buf = []; assert_eq!(reader.read(&mut buf).unwrap(), 0); let mut buf = [0]; assert_eq!(reader.read(&mut buf).unwrap(), 1); assert_eq!(reader.len(), 7); let b: &[_] = &[0]; assert_eq!(&buf[..], b); let mut buf = [0; 4]; assert_eq!(reader.read(&mut buf).unwrap(), 4); assert_eq!(reader.len(), 3); let b: &[_] = &[1, 2, 3, 4]; assert_eq!(&buf[..], b); assert_eq!(reader.read(&mut buf).unwrap(), 3); let b: &[_] = &[5, 6, 7]; assert_eq!(&buf[..3], b); assert_eq!(reader.read(&mut buf).unwrap(), 0); } #[test] fn test_read_exact() { let in_buf = vec![0, 1, 2, 3, 4, 5, 6, 7]; let reader = &mut &in_buf[..]; let mut buf = []; assert!(reader.read_exact(&mut buf).is_ok()); let mut buf = [8]; assert!(reader.read_exact(&mut buf).is_ok()); assert_eq!(buf[0], 0); assert_eq!(reader.len(), 7); let mut buf = [0, 0, 0, 0, 0, 0, 0]; assert!(reader.read_exact(&mut buf).is_ok()); assert_eq!(buf, [1, 2, 3, 4, 5, 6, 7]); assert_eq!(reader.len(), 0); let mut buf = [0]; assert!(reader.read_exact(&mut buf).is_err()); } #[test] fn test_buf_reader() { let in_buf = vec![0, 1, 2, 3, 4, 5, 6, 7]; let mut reader = Cursor::new(&in_buf[..]); let mut buf = []; assert_eq!(reader.read(&mut buf).unwrap(), 0); assert_eq!(reader.position(), 0); let mut buf = [0]; assert_eq!(reader.read(&mut buf).unwrap(), 1); assert_eq!(reader.position(), 1); let b: &[_] = &[0]; assert_eq!(buf, b); let mut buf = [0; 4]; assert_eq!(reader.read(&mut buf).unwrap(), 4); assert_eq!(reader.position(), 5); let b: &[_] = &[1, 2, 3, 4]; assert_eq!(buf, b); assert_eq!(reader.read(&mut buf).unwrap(), 3); let b: &[_] = &[5, 6, 7]; assert_eq!(&buf[..3], b); assert_eq!(reader.read(&mut buf).unwrap(), 0); } #[test] fn seek_past_end() { let buf = [0xff]; let mut r = Cursor::new(&buf[..]); assert_eq!(r.seek(SeekFrom::Start(10)).unwrap(), 10); assert_eq!(r.read(&mut [0]).unwrap(), 0); let mut r = Cursor::new(vec![10]); assert_eq!(r.seek(SeekFrom::Start(10)).unwrap(), 10); assert_eq!(r.read(&mut [0]).unwrap(), 0); let mut buf = [0]; let mut r = Cursor::new(&mut buf[..]); assert_eq!(r.seek(SeekFrom::Start(10)).unwrap(), 10); assert_eq!(r.write(&[3]).unwrap(), 0); let mut r = Cursor::new(vec![10].into_boxed_slice()); assert_eq!(r.seek(SeekFrom::Start(10)).unwrap(), 10); assert_eq!(r.write(&[3]).unwrap(), 0); } #[test] fn seek_past_i64() { let buf = [0xff]; let mut r = Cursor::new(&buf[..]); assert_eq!(r.seek(SeekFrom::Start(6)).unwrap(), 6); assert_eq!(r.seek(SeekFrom::Current(0x7ffffffffffffff0)).unwrap(), 0x7ffffffffffffff6); assert_eq!(r.seek(SeekFrom::Current(0x10)).unwrap(), 0x8000000000000006); assert_eq!(r.seek(SeekFrom::Current(0)).unwrap(), 0x8000000000000006); assert!(r.seek(SeekFrom::Current(0x7ffffffffffffffd)).is_err()); assert_eq!(r.seek(SeekFrom::Current(-0x8000000000000000)).unwrap(), 6); let mut r = Cursor::new(vec![10]); assert_eq!(r.seek(SeekFrom::Start(6)).unwrap(), 6); assert_eq!(r.seek(SeekFrom::Current(0x7ffffffffffffff0)).unwrap(), 0x7ffffffffffffff6); assert_eq!(r.seek(SeekFrom::Current(0x10)).unwrap(), 0x8000000000000006); assert_eq!(r.seek(SeekFrom::Current(0)).unwrap(), 0x8000000000000006); assert!(r.seek(SeekFrom::Current(0x7ffffffffffffffd)).is_err()); assert_eq!(r.seek(SeekFrom::Current(-0x8000000000000000)).unwrap(), 6); let mut buf = [0]; let mut r = Cursor::new(&mut buf[..]); assert_eq!(r.seek(SeekFrom::Start(6)).unwrap(), 6); assert_eq!(r.seek(SeekFrom::Current(0x7ffffffffffffff0)).unwrap(), 0x7ffffffffffffff6); assert_eq!(r.seek(SeekFrom::Current(0x10)).unwrap(), 0x8000000000000006); assert_eq!(r.seek(SeekFrom::Current(0)).unwrap(), 0x8000000000000006); assert!(r.seek(SeekFrom::Current(0x7ffffffffffffffd)).is_err()); assert_eq!(r.seek(SeekFrom::Current(-0x8000000000000000)).unwrap(), 6); let mut r = Cursor::new(vec![10].into_boxed_slice()); assert_eq!(r.seek(SeekFrom::Start(6)).unwrap(), 6); assert_eq!(r.seek(SeekFrom::Current(0x7ffffffffffffff0)).unwrap(), 0x7ffffffffffffff6); assert_eq!(r.seek(SeekFrom::Current(0x10)).unwrap(), 0x8000000000000006); assert_eq!(r.seek(SeekFrom::Current(0)).unwrap(), 0x8000000000000006); assert!(r.seek(SeekFrom::Current(0x7ffffffffffffffd)).is_err()); assert_eq!(r.seek(SeekFrom::Current(-0x8000000000000000)).unwrap(), 6); } #[test] fn seek_before_0() { let buf = [0xff]; let mut r = Cursor::new(&buf[..]); assert!(r.seek(SeekFrom::End(-2)).is_err()); let mut r = Cursor::new(vec![10]); assert!(r.seek(SeekFrom::End(-2)).is_err()); let mut buf = [0]; let mut r = Cursor::new(&mut buf[..]); assert!(r.seek(SeekFrom::End(-2)).is_err()); let mut r = Cursor::new(vec![10].into_boxed_slice()); assert!(r.seek(SeekFrom::End(-2)).is_err()); } #[test] fn test_seekable_mem_writer() { let mut writer = Cursor::new(Vec::<u8>::new()); assert_eq!(writer.position(), 0); assert_eq!(writer.write(&[0]).unwrap(), 1); assert_eq!(writer.position(), 1); assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3); assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4); assert_eq!(writer.position(), 8); let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7]; assert_eq!(&writer.get_ref()[..], b); assert_eq!(writer.seek(SeekFrom::Start(0)).unwrap(), 0); assert_eq!(writer.position(), 0); assert_eq!(writer.write(&[3, 4]).unwrap(), 2); let b: &[_] = &[3, 4, 2, 3, 4, 5, 6, 7]; assert_eq!(&writer.get_ref()[..], b); assert_eq!(writer.seek(SeekFrom::Current(1)).unwrap(), 3); assert_eq!(writer.write(&[0, 1]).unwrap(), 2); let b: &[_] = &[3, 4, 2, 0, 1, 5, 6, 7]; assert_eq!(&writer.get_ref()[..], b); assert_eq!(writer.seek(SeekFrom::End(-1)).unwrap(), 7); assert_eq!(writer.write(&[1, 2]).unwrap(), 2); let b: &[_] = &[3, 4, 2, 0, 1, 5, 6, 1, 2]; assert_eq!(&writer.get_ref()[..], b); assert_eq!(writer.seek(SeekFrom::End(1)).unwrap(), 10); assert_eq!(writer.write(&[1]).unwrap(), 1); let b: &[_] = &[3, 4, 2, 0, 1, 5, 6, 1, 2, 0, 1]; assert_eq!(&writer.get_ref()[..], b); } #[test] fn vec_seek_past_end() { let mut r = Cursor::new(Vec::new()); assert_eq!(r.seek(SeekFrom::Start(10)).unwrap(), 10); assert_eq!(r.write(&[3]).unwrap(), 1); } #[test] fn vec_seek_before_0() { let mut r = Cursor::new(Vec::new()); assert!(r.seek(SeekFrom::End(-2)).is_err()); } #[test] #[cfg(target_pointer_width = "32")] fn vec_seek_and_write_past_usize_max() { let mut c = Cursor::new(Vec::new()); c.set_position(<usize>::max_value() as u64 + 1); assert!(c.write_all(&[1, 2, 3]).is_err()); } }
34.666179
95
0.531993
d7478801da85a79d0fc2a7228031c175c550436d
3,746
#[doc = "Register `EPINUSE` reader"] pub struct R(crate::pac::generic::R<EPINUSE_SPEC>); impl core::ops::Deref for R { type Target = crate::pac::generic::R<EPINUSE_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::pac::generic::R<EPINUSE_SPEC>> for R { #[inline(always)] fn from(reader: crate::pac::generic::R<EPINUSE_SPEC>) -> Self { R(reader) } } #[doc = "Register `EPINUSE` writer"] pub struct W(crate::pac::generic::W<EPINUSE_SPEC>); impl core::ops::Deref for W { type Target = crate::pac::generic::W<EPINUSE_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::pac::generic::W<EPINUSE_SPEC>> for W { #[inline(always)] fn from(writer: crate::pac::generic::W<EPINUSE_SPEC>) -> Self { W(writer) } } #[doc = "Field `BUF` reader - Buffer in use: This register has one bit per physical endpoint. 0: HW is accessing buffer 0. 1: HW is accessing buffer 1."] pub struct BUF_R(crate::pac::generic::FieldReader<u8, u8>); impl BUF_R { #[inline(always)] pub(crate) fn new(bits: u8) -> Self { BUF_R(crate::pac::generic::FieldReader::new(bits)) } } impl core::ops::Deref for BUF_R { type Target = crate::pac::generic::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `BUF` writer - Buffer in use: This register has one bit per physical endpoint. 0: HW is accessing buffer 0. 1: HW is accessing buffer 1."] pub struct BUF_W<'a> { w: &'a mut W, } impl<'a> BUF_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0xff << 2)) | ((value as u32 & 0xff) << 2); self.w } } impl R { #[doc = "Bits 2:9 - Buffer in use: This register has one bit per physical endpoint. 0: HW is accessing buffer 0. 1: HW is accessing buffer 1."] #[inline(always)] pub fn buf(&self) -> BUF_R { BUF_R::new(((self.bits >> 2) & 0xff) as u8) } } impl W { #[doc = "Bits 2:9 - Buffer in use: This register has one bit per physical endpoint. 0: HW is accessing buffer 0. 1: HW is accessing buffer 1."] #[inline(always)] pub fn buf(&mut self) -> BUF_W { BUF_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "USB Endpoint Buffer in use\n\nThis register you can [`read`](crate::pac::generic::generic::Reg::read), [`write_with_zero`](crate::pac::generic::generic::Reg::write_with_zero), [`reset`](crate::pac::generic::generic::Reg::reset), [`write`](crate::pac::generic::generic::Reg::write), [`modify`](crate::pac::generic::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [epinuse](index.html) module"] pub struct EPINUSE_SPEC; impl crate::pac::generic::RegisterSpec for EPINUSE_SPEC { type Ux = u32; } #[doc = "`read()` method returns [epinuse::R](R) reader structure"] impl crate::pac::generic::Readable for EPINUSE_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [epinuse::W](W) writer structure"] impl crate::pac::generic::Writable for EPINUSE_SPEC { type Writer = W; } #[doc = "`reset()` method sets EPINUSE to value 0"] impl crate::pac::generic::Resettable for EPINUSE_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
36.019231
484
0.618259
0ecb85ed78b999ffed340609f222a005a81fcce9
8,960
// // mtpng - a multithreaded parallel PNG encoder in Rust // mtpng.rs - CLI utility for testing and Rust API example // // Copyright (c) 2018 Brion Vibber // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // use std::convert::TryFrom; use std::fs::File; use std::io; use std::io::{Error, ErrorKind, Write}; // CLI options extern crate clap; use clap::{Arg, ArgMatches, Command}; // For reading an existing file extern crate png; extern crate rayon; use rayon::{ThreadPool, ThreadPoolBuilder}; // For timing! extern crate time; use time::OffsetDateTime; // Hey that's us! extern crate mtpng; use mtpng::{ColorType, CompressionLevel, Header}; use mtpng::Mode::{Adaptive, Fixed}; use mtpng::encoder::{Encoder, Options}; use mtpng::Strategy; use mtpng::Filter; pub fn err(payload: &str) -> Error { Error::new(ErrorKind::Other, payload) } fn expand(src: &[u8]) -> io::Result<Vec<u8>> { let mut v = Vec::new(); v.write_all(src)?; Ok(v) } fn read_png(filename: &str) -> io::Result<(Header, Vec<u8>, Option<Vec<u8>>, Option<Vec<u8>>)> { use png::Decoder; use png::Transformations; let mut decoder = Decoder::new(File::open(filename)?); decoder.set_transformations(Transformations::IDENTITY); let mut reader = decoder.read_info()?; let info = reader.info(); let mut header = Header::new(); header.set_size(info.width, info.height)?; header.set_color(ColorType::try_from(info.color_type as u8)?, info.bit_depth as u8)?; let palette = match info.palette { Some(ref cow) => Some(expand(&cow[..])?), None => None, }; let transparency = match info.trns { Some(ref cow) => Some(expand(&cow[..])?), None => None, }; let mut data = vec![0u8; reader.output_buffer_size()]; reader.next_frame(&mut data)?; Ok((header, data, palette, transparency)) } fn write_png(pool: &ThreadPool, args: &ArgMatches, filename: &str, header: &Header, data: &[u8], palette: &Option<Vec<u8>>, transparency: &Option<Vec<u8>>) -> io::Result<()> { let writer = File::create(filename)?; let mut options = Options::new(); // Encoding options options.set_thread_pool(pool)?; match args.value_of("chunk-size") { None => {}, Some(s) => { let n = s.parse::<usize>().map_err(|_e| err("Invalid chunk size"))?; options.set_chunk_size(n)?; }, } match args.value_of("filter") { None => {}, Some("adaptive") => options.set_filter_mode(Adaptive)?, Some("none") => options.set_filter_mode(Fixed(Filter::None))?, Some("up") => options.set_filter_mode(Fixed(Filter::Up))?, Some("sub") => options.set_filter_mode(Fixed(Filter::Sub))?, Some("average") => options.set_filter_mode(Fixed(Filter::Average))?, Some("paeth") => options.set_filter_mode(Fixed(Filter::Paeth))?, _ => return Err(err("Unsupported filter type")), } match args.value_of("level") { None => {}, Some("default") => options.set_compression_level(CompressionLevel::Default)?, Some("1") => options.set_compression_level(CompressionLevel::Fast)?, Some("9") => options.set_compression_level(CompressionLevel::High)?, _ => return Err(err("Unsupported compression level (try default, 1, or 9)")), } match args.value_of("strategy") { None => {}, Some("auto") => options.set_strategy_mode(Adaptive)?, Some("default") => options.set_strategy_mode(Fixed(Strategy::Default))?, Some("filtered") => options.set_strategy_mode(Fixed(Strategy::Filtered))?, Some("huffman") => options.set_strategy_mode(Fixed(Strategy::HuffmanOnly))?, Some("rle") => options.set_strategy_mode(Fixed(Strategy::Rle))?, Some("fixed") => options.set_strategy_mode(Fixed(Strategy::Fixed))?, _ => return Err(err("Invalid compression strategy mode"))?, } match args.value_of("streaming") { None => {}, Some("yes") => options.set_streaming(true)?, Some("no") => options.set_streaming(false)?, _ => return Err(err("Invalid streaming mode, try yes or no.")) } let mut encoder = Encoder::new(writer, &options); // Image data encoder.write_header(&header)?; match palette { Some(v) => encoder.write_palette(&v)?, None => {}, } match transparency { Some(v) => encoder.write_transparency(&v)?, None => {}, } encoder.write_image_rows(&data)?; encoder.finish()?; Ok(()) } fn doit(args: ArgMatches) -> io::Result<()> { let threads = match args.value_of("threads") { None => 0, // Means default Some(s) => { s.parse::<usize>().map_err(|_e| err("invalid threads"))? }, }; let pool = ThreadPoolBuilder::new().num_threads(threads) .build() .map_err(|e| err(&e.to_string()))?; eprintln!("Using {} threads", pool.current_num_threads()); let reps = match args.value_of("repeat") { Some(s) => { s.parse::<usize>().map_err(|_e| err("invalid repeat"))? }, None => 1, }; // input and output are guaranteed to be present let infile = args.value_of("input").unwrap(); let outfile = args.value_of("output").unwrap(); println!("{} -> {}", infile, outfile); let (header, data, palette, transparency) = read_png(&infile)?; for _i in 0 .. reps { let start_time = OffsetDateTime::now_utc(); write_png(&pool, &args, &outfile, &header, &data, &palette, &transparency)?; let delta = OffsetDateTime::now_utc() - start_time; println!("Done in {} ms", (delta.as_seconds_f64() * 1000.0).round()); } Ok(()) } pub fn main() { let matches = Command::new("mtpng parallel PNG encoder") .version("0.4.0") .author("Brion Vibber <[email protected]>") .about("Re-encodes PNG images using multiple CPU cores to exercise the mtpng library.") .arg(Arg::new("chunk-size") .long("chunk-size") .value_name("bytes") .help("Divide image into chunks of at least this given size.") .takes_value(true)) .arg(Arg::new("filter") .long("filter") .value_name("filter") .help("Set a fixed filter: one of none, sub, up, average, or paeth.")) .arg(Arg::new("level") .long("level") .value_name("level") .help("Set deflate compression level, from 1-9.")) .arg(Arg::new("strategy") .long("strategy") .value_name("strategy") .help("Deflate strategy: one of filtered, huffman, rle, or fixed.")) .arg(Arg::new("streaming") .long("streaming") .value_name("streaming") .help("Use streaming output mode; trades off file size for lower latency and memory usage")) .arg(Arg::new("threads") .long("threads") .value_name("threads") .help("Override default number of threads.")) .arg(Arg::new("repeat") .long("repeat") .value_name("n") .help("Run conversion n times, as load benchmarking helper.")) .arg(Arg::new("input") .help("Input filename, must be another PNG.") .required(true) .index(1)) .arg(Arg::new("output") .help("Output filename.") .required(true) .index(2)) .get_matches(); match doit(matches) { Ok(()) => {}, Err(e) => eprintln!("Error: {}", e), } }
34.329502
104
0.586272
d6aa82185658dd7bb2acce394ae62913b14df7ba
495
use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use cosmwasm_std::Addr; use cw_storage_plus::Item; #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] pub struct State { pub pharmacy: Addr, pub patient: Addr, pub source: Addr, pub medication : String,// medication name pub dosage : String, //ml or g pub price : i32, pub count: i32, // count medication sold to patient } pub const STATE: Item<State> = Item::new("state");
24.75
70
0.682828
6acd0d8b48a77094336396f9708cf8dea527bf3e
488
bitflags! { pub struct FirmwareFlags: u64 { /// Set if the kernel was booted from a BIOS bootloader, clear if booted from an UEFI bootloader const BIOS_BOOT = 0x1; } } /// The system firmware info tag passed by the bootloader #[repr(packed)] pub struct FirmwareTag { _identifier: u64, _next: u64, flags: FirmwareFlags, } impl FirmwareTag { /// Get the firmware and boot flags pub fn flags(&self) -> FirmwareFlags { self.flags } }
22.181818
104
0.64959
01e95f4ee007ab7490cb8a83614017dc80efbf58
8,684
#[macro_use] extern crate lazy_static; #[macro_use] extern crate log; #[macro_use] extern crate serde_derive; use std::{env, fmt}; use std::{ error::Error, time::{Instant, SystemTimeError}, }; use std::{io::ErrorKind, num::ParseIntError}; use std::{ num::ParseFloatError, process::{Command, Output}, }; use std::{ str::Utf8Error, sync::{Arc, Mutex}, }; use std::str; pub mod bridge_tools; mod check_cron; mod counter; mod create_wg_key; mod delete_tunnel; mod dns; pub mod exit_client_tunnel; mod exit_server_tunnel; pub mod file_io; mod fs_sync; mod get_neighbors; pub mod hardware_info; mod interface_tools; mod ip_addr; mod ip_route; mod iptables; mod is_openwrt; mod link_local_tools; mod manipulate_uci; pub mod open_tunnel; mod openwrt_ubus; pub mod opkg_feeds; mod ping_check; mod set_system_password; mod setup_wg_if; mod traffic_control; mod udp_socket_table; pub mod upgrade; pub mod wg_iface_counter; use althea_types::error::AltheaTypesError; use oping::PingError; pub use crate::counter::FilterTarget; pub use crate::create_wg_key::WgKeypair; pub use crate::exit_server_tunnel::ExitClient; pub use crate::ip_route::DefaultRoute; pub use crate::ip_route::IpRoute; pub use crate::ip_route::ToSubnet; use std::fmt::Result as FormatResult; use std::io::Error as IoError; use std::net::AddrParseError; use std::string::FromUtf8Error; type CommandFunction = Box<dyn FnMut(String, Vec<String>) -> Result<Output, KernelInterfaceError> + Send>; #[derive(Clone, Debug)] pub enum KernelInterfaceError { RuntimeError(String), NoInterfaceError(String), AddressNotReadyError(String), WgExistsError, FailedToGetMemoryUsage, FailedToGetMemoryInfo, FailedToGetLoadAverage, NoAltheaReleaseFeedFound, EmptyRouteString, InvalidRouteString(String), TrafficControlError(String), InvalidArchString(String), FailedToGetSystemTime, FailedToGetSystemKernelVersion, } impl fmt::Display for KernelInterfaceError { fn fmt(&self, f: &mut fmt::Formatter) -> FormatResult { match self { KernelInterfaceError::RuntimeError(val) => write!(f, "Runtime Error: {}", val), KernelInterfaceError::NoInterfaceError(val) => { write!(f, "No interface by the name: {}", val) } KernelInterfaceError::AddressNotReadyError(val) => { write!(f, "Address isn't ready yet: {}", val) } KernelInterfaceError::WgExistsError => write!(f, "Wireguard Interface Already exists"), KernelInterfaceError::FailedToGetMemoryUsage => { write!(f, "Failed to get accurate memory usage!") } KernelInterfaceError::FailedToGetLoadAverage => { write!(f, "Failed to get load average!") } KernelInterfaceError::FailedToGetMemoryInfo => write!(f, "Failed to get memory info!"), KernelInterfaceError::NoAltheaReleaseFeedFound => { write!(f, "Could not pares /etc/opkg/customfeeds.conf") } KernelInterfaceError::TrafficControlError(val) => { write!(f, "TrafficControl error {}", val) } KernelInterfaceError::EmptyRouteString => { write!(f, "Can't parse an empty string into a route!") } KernelInterfaceError::InvalidRouteString(val) => { write!(f, "InvalidRouteString {}", val) } KernelInterfaceError::InvalidArchString(val) => { write!(f, "InvalidArchString {}", val) } KernelInterfaceError::FailedToGetSystemTime => { write!(f, "Failed to get system time!") } KernelInterfaceError::FailedToGetSystemKernelVersion => { write!(f, "Failed to get system kernel version!") } } } } impl Error for KernelInterfaceError {} impl From<FromUtf8Error> for KernelInterfaceError { fn from(e: FromUtf8Error) -> Self { KernelInterfaceError::RuntimeError(format!("{}", e)) } } impl From<IoError> for KernelInterfaceError { fn from(e: IoError) -> Self { KernelInterfaceError::RuntimeError(format!("{}", e)) } } impl From<AddrParseError> for KernelInterfaceError { fn from(e: AddrParseError) -> Self { KernelInterfaceError::RuntimeError(format!("{}", e)) } } impl From<ParseIntError> for KernelInterfaceError { fn from(e: ParseIntError) -> Self { KernelInterfaceError::RuntimeError(format!("{}", e)) } } impl From<ParseFloatError> for KernelInterfaceError { fn from(e: ParseFloatError) -> Self { KernelInterfaceError::RuntimeError(format!("{}", e)) } } impl From<AltheaTypesError> for KernelInterfaceError { fn from(e: AltheaTypesError) -> Self { KernelInterfaceError::RuntimeError(format!("{}", e)) } } impl From<Utf8Error> for KernelInterfaceError { fn from(e: Utf8Error) -> Self { KernelInterfaceError::RuntimeError(format!("{}", e)) } } impl From<SystemTimeError> for KernelInterfaceError { fn from(e: SystemTimeError) -> Self { KernelInterfaceError::RuntimeError(format!("{}", e)) } } impl From<PingError> for KernelInterfaceError { fn from(e: PingError) -> Self { KernelInterfaceError::RuntimeError(format!("{}", e)) } } #[cfg(test)] lazy_static! { pub static ref KI: Box<dyn KernelInterface> = Box::new(TestCommandRunner { run_command: Arc::new(Mutex::new(Box::new(|_program, _args| { panic!("kernel interface used before initialized"); }))) }); } #[cfg(not(test))] lazy_static! { pub static ref KI: Box<dyn KernelInterface> = Box::new(LinuxCommandRunner {}); } pub trait CommandRunner { fn run_command(&self, program: &str, args: &[&str]) -> Result<Output, KernelInterfaceError>; fn set_mock(&self, mock: CommandFunction); } // a quick throwaway function to print arguments arrays so that they can be copy/pasted from logs fn print_str_array(input: &[&str]) -> String { let mut output = String::new(); for item in input { output = output + " " + item; } output } pub struct LinuxCommandRunner; impl CommandRunner for LinuxCommandRunner { fn run_command(&self, program: &str, args: &[&str]) -> Result<Output, KernelInterfaceError> { let start = Instant::now(); let output = match Command::new(program).args(args).output() { Ok(o) => o, Err(e) => { if e.kind() == ErrorKind::NotFound { error!("The {:?} binary was not found. Please install a package that provides it. PATH={:?}", program, env::var("PATH")); } return Err(e.into()); } }; trace!( "Command {} {} returned: {:?}", program, print_str_array(args), output ); if !output.status.success() { trace!( "Command {} {} returned: an error {:?}", program, print_str_array(args), output ); } trace!( "command completed in {}s {}ms", start.elapsed().as_secs(), start.elapsed().subsec_millis() ); if start.elapsed().as_secs() > 5 { error!( "Command {} {} took more than five seconds to complete!", program, print_str_array(args) ); } else if start.elapsed().as_secs() > 1 { warn!( "Command {} {} took more than one second to complete!", program, print_str_array(args) ); } Ok(output) } fn set_mock( &self, _mock: Box<dyn FnMut(String, Vec<String>) -> Result<Output, KernelInterfaceError> + Send>, ) { unimplemented!() } } pub struct TestCommandRunner { pub run_command: Arc<Mutex<CommandFunction>>, } impl CommandRunner for TestCommandRunner { fn run_command(&self, program: &str, args: &[&str]) -> Result<Output, KernelInterfaceError> { let mut args_owned = Vec::new(); for a in args { args_owned.push((*a).to_string()) } (*self.run_command.lock().unwrap())(program.to_string(), args_owned) } fn set_mock(&self, mock: CommandFunction) { *self.run_command.lock().unwrap() = mock } } pub trait KernelInterface: CommandRunner + Sync + Send {} impl KernelInterface for LinuxCommandRunner {} impl KernelInterface for TestCommandRunner {}
28.850498
141
0.614463
76613c5cd9c4ad733096ad92128dadda0fd5aee5
1,809
//! Implements vertical (lane-wise) floating-point `sin`. macro_rules! impl_math_float_sin { ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => { impl $id { /// Sine. #[inline] pub fn sin(self) -> Self { use crate::codegen::math::float::sin::Sin; Sin::sin(self) } /// Sine of `self * PI`. #[inline] pub fn sin_pi(self) -> Self { use crate::codegen::math::float::sin_pi::SinPi; SinPi::sin_pi(self) } /// Sine and cosine of `self * PI`. #[inline] pub fn sin_cos_pi(self) -> (Self, Self) { use crate::codegen::math::float::sin_cos_pi::SinCosPi; SinCosPi::sin_cos_pi(self) } } test_if!{ $test_tt: paste::item! { pub mod [<$id _math_sin>] { use super::*; #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] fn sin() { use crate::$elem_ty::consts::PI; let z = $id::splat(0 as $elem_ty); let p = $id::splat(PI as $elem_ty); let ph = $id::splat(PI as $elem_ty / 2.); let o_r = $id::splat((PI as $elem_ty / 2.).sin()); let z_r = $id::splat((PI as $elem_ty).sin()); assert_eq!(z, z.sin()); assert_eq!(o_r, ph.sin()); assert_eq!(z_r, p.sin()); } } } } }; }
35.470588
122
0.392482
90cf5371488c883401789fee1c3eeedda7a53fbc
42,519
use super::Class; use super::Status; use super::Rounding; use context::*; use libc::c_char; #[cfg(feature = "ord_subset")] use ord_subset; #[cfg(feature = "rustc-serialize")] use rustc_serialize::{Decodable, Decoder, Encodable, Encoder}; #[cfg(feature = "serde")] use serde; use std::borrow::Borrow; use std::cell::RefCell; use std::default::Default; use std::ffi::{CStr, CString}; use std::fmt; use std::hash::{Hash, Hasher}; use std::iter::Sum; use std::mem::uninitialized; use std::num::FpCategory; use std::ops::{Add, AddAssign, Sub, SubAssign, Mul, MulAssign, Div, DivAssign, Rem, RemAssign, Neg, BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Not, Shl, ShlAssign, Shr, ShrAssign}; use std::str::FromStr; use std::str::from_utf8_unchecked; thread_local!(static CTX: RefCell<Context> = RefCell::new(d128::default_context())); #[repr(C)] #[derive(Clone, Copy)] /// A 128-bit decimal floating point type. pub struct d128 { bytes: [u8; 16], } #[repr(C)] #[derive(Clone, Copy)] struct decNumber { digits: i32, exponent: i32, bits: u8, // DECPUN = 3 because this is the fastest for conversion between decNumber and decQuad // DECNUMDIGITS = 34 because we use decQuad only // 12 = ((DECNUMDIGITS+DECDPUN-1)/DECDPUN) lsu: [u16; 12], } impl Default for d128 { fn default() -> Self { d128::zero() } } #[cfg(feature = "ord_subset")] impl ord_subset::OrdSubset for d128 { fn is_outside_order(&self) -> bool { self.is_nan() } } #[cfg(feature = "ord_subset")] impl Into<ord_subset::OrdVar<d128>> for d128 { fn into(self) -> ord_subset::OrdVar<d128> { ord_subset::OrdVar::new(self) } } #[cfg(feature = "rustc-serialize")] impl Decodable for d128 { fn decode<D: Decoder>(d: &mut D) -> Result<Self, D::Error> { let s = try!(d.read_str()); Ok(Self::from_str(&s).expect("unreachable")) } } #[cfg(feature = "rustc-serialize")] impl Encodable for d128 { fn encode<E: Encoder>(&self, e: &mut E) -> Result<(), E::Error> { e.emit_str(&format!("{}", self)) } } impl Hash for d128 { fn hash<H: Hasher>(&self, state: &mut H) { self.bytes.hash(state); } } #[cfg(feature = "serde")] impl serde::ser::Serialize for d128 { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::ser::Serializer { serializer.serialize_str(&self.to_string()) } } #[cfg(feature = "serde")] impl<'de> serde::de::Deserialize<'de> for d128 { fn deserialize<D>(deserializer: D) -> Result<d128, D::Error> where D: serde::de::Deserializer<'de> { deserializer.deserialize_str(d128Visitor) } } #[cfg(feature = "serde")] #[allow(non_camel_case_types)] struct d128Visitor; #[cfg(feature = "serde")] impl<'de> serde::de::Visitor<'de> for d128Visitor { type Value = d128; fn expecting(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "a d128 value") } fn visit_str<E>(self, s: &str) -> Result<d128, E> where E: serde::de::Error { use serde::de::Unexpected; d128::from_str(s).map_err(|_| E::invalid_value(Unexpected::Str(s), &self)) } } /// Converts an i32 to d128. The result is exact and no error is possible. impl From<i32> for d128 { fn from(val: i32) -> d128 { unsafe { let mut res: d128 = uninitialized(); *decQuadFromInt32(&mut res, val) } } } /// Converts an u32 to d128. The result is exact and no error is possible. impl From<u32> for d128 { fn from(val: u32) -> d128 { unsafe { let mut res: d128 = uninitialized(); *decQuadFromUInt32(&mut res, val) } } } /// Converts an u64 to d128. The result is exact and no error is possible. impl From<u64> for d128 { fn from(mut val: u64) -> d128 { let mut bcd = [0; 34]; let mut i = 33; while val > 0 { bcd[i] = (val % 10) as u8; val /= 10; i -= 1; } unsafe { let mut res: d128 = uninitialized(); *decQuadFromBCD(&mut res, 0, bcd.as_ptr(), 0) } } } /// Converts an i64 to d128. The result is exact and no error is possible. impl From<i64> for d128 { fn from(val: i64) -> d128 { if val < 0 { -d128::from(!(val as u64) + 1) } else { d128::from(val as u64) } } } impl AsRef<d128> for d128 { fn as_ref(&self) -> &d128 { &self } } /// Converts a string to d128. The length of the coefficient and the size of the exponent are /// checked by this routine, so rounding will be applied if necessary, and this may set status /// flags (`UNDERFLOW`, `OVERFLOW`) will be reported, or rounding applied, as necessary. There is /// no limit to the coefficient length for finite inputs; NaN payloads must be integers with no /// more than 33 digits. Exponents may have up to nine significant digits. The syntax of the string /// is fully checked; if it is not valid, the result will be a quiet NaN and an error flag will be /// set. impl FromStr for d128 { type Err = (); fn from_str(s: &str) -> Result<Self, ()> { let cstr = match CString::new(s) { Err(..) => CString::new("qNaN").unwrap(), Ok(cstr) => cstr, }; d128::with_context(|ctx| { let mut res: d128; unsafe { res = uninitialized(); decQuadFromString(&mut res, cstr.as_ptr(), ctx); } Ok(res) }) } } /// Converts this d128 to an i32. It uses Rounding::HalfEven. impl Into<i32> for d128 { fn into(self) -> i32 { d128::with_context(|ctx| unsafe { decQuadToInt32(&self, ctx, ctx.rounding) }) } } /// Converts this d128 to an u32. It uses Rounding::HalfEven. impl Into<u32> for d128 { fn into(self) -> u32 { d128::with_context(|ctx| unsafe { decQuadToUInt32(&self, ctx, ctx.rounding) }) } } /// Formats a d128. Finite numbers will be converted to a string with exponential notation if the /// exponent is positive or if the magnitude of x is less than 1 and would require more than five /// zeros between the decimal point and the first significant digit. Note that strings which are /// not simply numbers (one of Infinity, –Infinity, NaN, or sNaN) are possible. A NaN string may /// have a leading – sign and/or following payload digits. No digits follow the NaN string if the /// payload is 0. impl fmt::Display for d128 { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { let mut buf = [0; 43]; unsafe { decQuadToString(self, buf.as_mut().as_mut_ptr()); let cstr = CStr::from_ptr(buf.as_ptr()); fmt.pad(from_utf8_unchecked(cstr.to_bytes())) } } } /// Same as `fmt::Display`. impl fmt::Debug for d128 { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(self, fmt) } } /// Formats a d128 with engineering notation. This is the same as fmt::Display except that if /// exponential notation is used the exponent will be a multiple of 3. impl fmt::LowerExp for d128 { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { let mut buf = [0; 43]; unsafe { decQuadToEngString(self, buf.as_mut().as_mut_ptr()); let cstr = CStr::from_ptr(buf.as_ptr()); fmt.pad(from_utf8_unchecked(cstr.to_bytes())) } } } /// Formats a d128 to hexadecimal binary representation. impl fmt::LowerHex for d128 { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { for b in self.bytes.iter().rev() { try!(write!(fmt, "{:02x}", b)); } Ok(()) } } impl PartialEq<d128> for d128 { fn eq(&self, other: &d128) -> bool { self.compare(other).is_zero() } } impl PartialOrd<d128> for d128 { fn partial_cmp(&self, other: &d128) -> Option<::std::cmp::Ordering> { use std::cmp::Ordering; match self.compare(other) { v if v.is_nan() => None, v if v.is_zero() => Some(Ordering::Equal), v if v.is_positive() => Some(Ordering::Greater), v if v.is_negative() => Some(Ordering::Less), _ => unreachable!(), } } } macro_rules! ffi_unary_op { ($(#[$attr:meta])* impl $op:ident, $method:ident, $ffi:ident for $t:ident) => { $(#[$attr])* impl $op for $t { type Output = $t; fn $method(mut self) -> $t { $t::with_context(|ctx| { unsafe { *$ffi(&mut self, &self, ctx)} }) } } impl<'a> $op for &'a $t { type Output = $t; fn $method(self) -> $t { $t::with_context(|ctx| { unsafe { let mut res: $t = uninitialized(); *$ffi(&mut res, self, ctx)} }) } } } } macro_rules! ffi_binary_op { ($(#[$attr:meta])* impl $op:ident, $method:ident, $ffi:ident for $t:ident) => { $(#[$attr])* impl $op<$t> for $t { type Output = $t; fn $method(mut self, other: $t) -> $t { $t::with_context(|ctx| { unsafe { *$ffi(&mut self, &self, &other, ctx)} }) } } impl<'a> $op<$t> for &'a $t { type Output = $t; fn $method(self, mut other: $t) -> $t { $t::with_context(|ctx| { unsafe { *$ffi(&mut other, self, &other, ctx) } }) } } impl<'a> $op<&'a$t> for $t { type Output = $t; fn $method(mut self, other: &'a $t) -> $t { $t::with_context(|ctx| { unsafe { *$ffi(&mut self, &self, other, ctx) } }) } } impl<'a, 'b> $op<&'a $t> for &'b $t { type Output = $t; fn $method(self, other: &'a $t) -> $t { $t::with_context(|ctx| { unsafe { let mut res: $t = uninitialized(); *$ffi(&mut res, self, other, ctx) } }) } } } } macro_rules! ffi_unary_assign_op { ($(#[$attr:meta])* impl $op:ident, $method:ident, $ffi:ident for $t:ident) => { $(#[$attr])* impl $op<$t> for $t { fn $method(&mut self, other: $t) { $t::with_context(|ctx| { unsafe { $ffi(self, self, &other, ctx); } }) } } } } ffi_binary_op!(impl Add, add, decQuadAdd for d128); ffi_binary_op!(impl Sub, sub, decQuadSubtract for d128); ffi_binary_op!(impl Mul, mul, decQuadMultiply for d128); ffi_binary_op!(impl Div, div, decQuadDivide for d128); ffi_binary_op!( /// The operands must be zero or positive, an integer (finite with zero exponent) and comprise /// only zeros and/or ones; if not, INVALID_OPERATION is set. impl BitAnd, bitand, decQuadAnd for d128); ffi_binary_op!( /// The operands must be zero or positive, an integer (finite with zero exponent) and comprise /// only zeros and/or ones; if not, INVALID_OPERATION is set. impl BitOr, bitor, decQuadOr for d128); ffi_binary_op!( /// The operands must be zero or positive, an integer (finite with zero exponent) and comprise /// only zeros and/or ones; if not, INVALID_OPERATION is set. impl BitXor, bitxor, decQuadXor for d128); ffi_binary_op!(impl Rem, rem, decQuadRemainder for d128); ffi_unary_assign_op!(impl AddAssign, add_assign, decQuadAdd for d128); ffi_unary_assign_op!(impl SubAssign, sub_assign, decQuadSubtract for d128); ffi_unary_assign_op!(impl MulAssign, mul_assign, decQuadMultiply for d128); ffi_unary_assign_op!(impl DivAssign, div_assign, decQuadDivide for d128); ffi_unary_assign_op!(impl BitAndAssign, bitand_assign, decQuadAnd for d128); ffi_unary_assign_op!(impl BitOrAssign, bitor_assign, decQuadOr for d128); ffi_unary_assign_op!(impl BitXorAssign, bitxor_assign, decQuadXor for d128); ffi_unary_assign_op!(impl RemAssign, rem_assign, decQuadRemainder for d128); ffi_unary_op!(impl Neg, neg, decQuadMinus for d128); ffi_unary_op!( /// The operand must be zero or positive, an integer (finite with zero exponent) and comprise /// only zeros and/or ones; if not, INVALID_OPERATION is set. impl Not, not, decQuadInvert for d128); /// The result is `self` with the digits of the coefficient shifted to the left without adjusting /// the exponent or the sign of `self`. Any digits ‘shifted in’ from the right will be 0. `amount` /// is the count of positions to shift and must be a in the range –34 through +34. NaNs are /// propagated as usual. If `self` is infinite the result is Infinity of the same sign. No status /// is set unless `amount` is invalid or `self` is an sNaN. impl Shl<usize> for d128 { type Output = d128; fn shl(mut self, amount: usize) -> d128 { let shift = d128::from(amount as u32); d128::with_context(|ctx| unsafe { *decQuadShift(&mut self, &self, &shift, ctx) }) } } impl<'a> Shl<usize> for &'a d128 { type Output = d128; fn shl(self, amount: usize) -> d128 { let shift = d128::from(amount as u32); d128::with_context(|ctx| { unsafe { let mut res: d128 = uninitialized(); *decQuadShift(&mut res, self, &shift, ctx) } }) } } impl ShlAssign<usize> for d128 { fn shl_assign(&mut self, amount: usize) { let shift = d128::from(amount as u32); d128::with_context(|ctx| { unsafe { decQuadShift(self, self, &shift, ctx); } }) } } /// The result is `self` with the digits of the coefficient shifted to the right without adjusting /// the exponent or the sign of `self`. Any digits ‘shifted in’ from the left will be 0. `amount` /// is the count of positions to shift and must be a in the range –34 through +34. NaNs are /// propagated as usual. If `self` is infinite the result is Infinity of the same sign. No status /// is set unless `amount` is invalid or `self` is an sNaN. impl Shr<usize> for d128 { type Output = d128; fn shr(mut self, amount: usize) -> d128 { let shift = -d128::from(amount as u32); d128::with_context(|ctx| unsafe { *decQuadShift(&mut self, &self, &shift, ctx) }) } } impl<'a> Shr<usize> for &'a d128 { type Output = d128; fn shr(self, amount: usize) -> d128 { let shift = -d128::from(amount as u32); d128::with_context(|ctx| { unsafe { let mut res: d128 = uninitialized(); *decQuadShift(&mut res, self, &shift, ctx) } }) } } impl ShrAssign<usize> for d128 { fn shr_assign(&mut self, amount: usize) { let shift = -d128::from(amount as u32); d128::with_context(|ctx| { unsafe { decQuadShift(self, self, &shift, ctx); } }) } } impl<T> Sum<T> for d128 where T: Borrow<d128> { fn sum<I: IntoIterator<Item = T>>(iter: I) -> d128 { iter.into_iter() .fold(d128::zero(), |acc, val| acc + val.borrow()) } } impl d128 { fn default_context() -> Context { unsafe { let mut res: Context = uninitialized(); *decContextDefault(&mut res, 128) } } fn with_context<F, R>(f: F) -> R where F: FnOnce(&mut Context) -> R { CTX.with(|ctx| f(&mut ctx.borrow_mut())) } /// Creates a d128 from raw bytes. Endianess is host dependent. pub unsafe fn from_raw_bytes(bytes: [u8; 16]) -> d128 { d128 { bytes: bytes } } /// Returns raw bytes for this d128. Endianess is host dependent. pub fn to_raw_bytes(&self) -> [u8; 16] { self.bytes } /// Returns the thread local status. pub fn get_status() -> Status { d128::with_context(|ctx| Status::from_bits_truncate(ctx.status)) } /// Sets the thread local status. pub fn set_status(status: Status) { d128::with_context(|ctx| ctx.status = status.bits()); } /// Reads the hex binary representation from a string. This is the reverse of formatting with /// {:x}. pub fn from_hex(s: &str) -> d128 { if s.len() != 32 { Self::from_str("qNaN").unwrap() } else { unsafe { let mut res: d128 = uninitialized(); for (i, octet) in s.as_bytes().chunks(2).rev().enumerate() { res.bytes[i] = match u8::from_str_radix(from_utf8_unchecked(octet), 16) { Ok(val) => val, Err(..) => return Self::from_str("qNaN").unwrap(), }; } res } } } // Utilities and conversions, extractors, etc. /// Returns the d128 representing +0. pub fn zero() -> d128 { unsafe { let mut res = uninitialized(); *decQuadZero(&mut res) } } /// Returns the d128 representing +Infinity. pub fn infinity() -> d128 { d128!(Infinity) } /// Returns the d128 representing -Infinity. pub fn neg_infinity() -> d128 { d128!(-Infinity) } // Computational. /// Returns the absolute value of `self`. pub fn abs(mut self) -> d128 { d128::with_context(|ctx| unsafe { *decQuadAbs(&mut self, &self, ctx) }) } /// Calculates the fused multiply-add `self` × `a` + `b` and returns the result. The multiply /// is carried out first and is exact, so this operation has only the one, final, rounding. pub fn mul_add<O: AsRef<d128>>(mut self, a: O, b: O) -> d128 { d128::with_context(|ctx| unsafe { *decQuadFMA(&mut self, &self, a.as_ref(), b.as_ref(), ctx) }) } /// Returns the adjusted exponent of `self`, according to IEEE 754 rules. That is, the exponent /// returned is calculated as if the decimal point followed the first significant digit (so, /// for example, if `self` were 123 then the result would be 2). If `self` is infinite, the /// result is +Infinity. If `self` is a zero, the result is –Infinity, and the /// `DIVISION_BY_ZERO` flag is set. If `self` is less than zero, the absolute value of `self` /// is used. If `self` is 1, the result is 0. NaNs are handled (propagated) as for arithmetic /// operations. pub fn logb(mut self) -> d128 { d128::with_context(|ctx| unsafe { *decQuadLogB(&mut self, &self, ctx) }) } /// If both `self` and `other` are numeric (not NaNs) this returns the larger of the two /// (compared using total ordering, to give a well-defined result). If either (but not both of) /// is a quiet NaN then the other argument is the result; otherwise NaNs are handled as for /// arithmetic operations. pub fn max<O: AsRef<d128>>(mut self, other: O) -> d128 { d128::with_context(|ctx| unsafe { *decQuadMax(&mut self, &self, other.as_ref(), ctx) }) } /// If both `self` and `other` are numeric (not NaNs) this returns the smaller of the two /// (compared using total ordering, to give a well-defined result). If either (but not both of) /// is a quiet NaN then the other argument is the result; otherwise NaNs are handled as for /// arithmetic operations. pub fn min<O: AsRef<d128>>(mut self, other: O) -> d128 { d128::with_context(|ctx| unsafe { *decQuadMin(&mut self, &self, other.as_ref(), ctx) }) } /// Returns the ‘next’ d128 to `self` in the direction of +Infinity according to IEEE 754 rules /// for nextUp. The only status possible is `INVALID_OPERATION` (from an sNaN). pub fn next(mut self) -> d128 { d128::with_context(|ctx| unsafe { *decQuadNextPlus(&mut self, &self, ctx) }) } /// Returns the ‘next’ d128 to `self` in the direction of –Infinity according to IEEE 754 rules /// for nextDown. The only status possible is `INVALID_OPERATION` (from an sNaN). pub fn previous(mut self) -> d128 { d128::with_context(|ctx| unsafe { *decQuadNextMinus(&mut self, &self, ctx) }) } /// The number is set to the result of raising `self` to the power of `exp`. Results will be /// exact when `exp` has an integral value and the result does not need to be rounded, and also /// will be exact in certain special cases, such as when `self` is a zero (see the arithmetic /// specification for details). Inexact results will always be full precision, and will almost /// always be correctly rounded, but may be up to 1 _ulp_ (unit in last place) in error in rare /// cases. This is a mathematical function; the 10<sup>6</sup> restrictions on precision and /// range apply as described above, except that the normal range of values is allowed if `exp` /// has an integral value in the range –1999999997 through +999999999. pub fn pow<O: AsRef<d128>>(mut self, exp: O) -> d128 { d128::with_context(|ctx| unsafe { let mut num_self: decNumber = uninitialized(); let mut num_exp: decNumber = uninitialized(); decimal128ToNumber(&self, &mut num_self); decimal128ToNumber(exp.as_ref(), &mut num_exp); decNumberPower(&mut num_self, &num_self, &num_exp, ctx); *decimal128FromNumber(&mut self, &num_self, ctx) }) } /// The number is set to _e_ raised to the power of `exp`. Finite results will always be full /// precision and inexact, except when `exp` is a zero or –Infinity (giving 1 or 0 /// respectively). Inexact results will almost always be correctly rounded, but may be up to 1 /// ulp (unit in last place) in error in rare cases. This is a mathematical function; the /// 10<sup>6</sup> restrictions on precision and range apply as described above. pub fn exp(mut self) -> d128 { d128::with_context(|ctx| unsafe { let mut num_self: decNumber = uninitialized(); decimal128ToNumber(&self, &mut num_self); decNumberExp(&mut num_self, &num_self, ctx); *decimal128FromNumber(&mut self, &num_self, ctx) }) } /// The number is set to the natural logarithm (logarithm in base e) of `self`. `self` must be /// positive or a zero. Finite results will always be full precision and inexact, except when /// `self` is equal to 1, which gives an exact result of 0. Inexact results will almost always /// be correctly rounded, but may be up to 1 ulp (unit in last place) in error in rare cases. /// This is a mathematical function; the 10<sup>6</sup> restrictions on precision and range /// apply as described above. pub fn ln(mut self) -> d128 { d128::with_context(|ctx| unsafe { let mut num_self: decNumber = uninitialized(); decimal128ToNumber(&self, &mut num_self); decNumberLn(&mut num_self, &num_self, ctx); *decimal128FromNumber(&mut self, &num_self, ctx) }) } /// The number is set to the logarithm in base ten of `self`. `self` must be positive or a /// zero. Finite results will always be full precision and inexact, except when `self` is equal /// to an integral power of ten, in which case the result is the exact integer. Inexact results /// will almost always be correctly rounded, but may be up to 1 ulp (unit in last place) in /// error in rare cases. This is a mathematical function; the 10<sup>6</sup> restrictions on /// precision and range apply as described above. pub fn log10(mut self) -> d128 { d128::with_context(|ctx| unsafe { let mut num_self: decNumber = uninitialized(); decimal128ToNumber(&self, &mut num_self); decNumberLog10(&mut num_self, &num_self, ctx); *decimal128FromNumber(&mut self, &num_self, ctx) }) } /// Returns the ‘next’ d128 to `self` in the direction of `other` according to proposed IEEE /// 754 rules for nextAfter. If `self` == `other` the result is `self`. If either operand is /// a NaN the result is as for arithmetic operations. Otherwise (the operands are numeric and /// different) the result of adding (or subtracting) an infinitesimal positive amount to `self` /// and rounding towards +Infinity (or –Infinity) is returned, depending on whether `other` is /// larger (or smaller) than `self`. The addition will set flags, except that if the result is /// normal (finite, non-zero, and not subnormal) no flags are set. pub fn towards<O: AsRef<d128>>(mut self, other: O) -> d128 { d128::with_context(|ctx| unsafe { *decQuadNextToward(&mut self, &self, other.as_ref(), ctx) }) } /// Returns `self` set to have the same quantum as `other`, if possible (that is, numerically /// the same value but rounded or padded if necessary to have the same exponent as `other`, for /// example to round a monetary quantity to cents). pub fn quantize<O: AsRef<d128>>(mut self, other: O) -> d128 { d128::with_context(|ctx| unsafe { *decQuadQuantize(&mut self, &self, other.as_ref(), ctx) }) } /// Returns a copy of `self` with its coefficient reduced to its shortest possible form without /// changing the value of the result. This removes all possible trailing zeros from the /// coefficient (some may remain when the number is very close to the most positive or most /// negative number). Infinities and NaNs are unchanged and no status is set unless `self` is /// an sNaN. If `self` is a zero the result exponent is 0. pub fn reduce(mut self) -> d128 { d128::with_context(|ctx| unsafe { *decQuadReduce(&mut self, &self, ctx) }) } /// The result is a copy of `self` with the digits of the coefficient rotated to the left (if /// `amount` is positive) or to the right (if `amount` is negative) without adjusting the /// exponent or the sign of `self`. `amount` is the count of positions to rotate and must be a /// finite integer (with exponent=0) in the range -34 through +34. NaNs are propagated as /// usual. If `self` is infinite the result is Infinity of the same sign. No status is set /// unless `amount` is invalid or an operand is an sNaN. pub fn rotate<O: AsRef<d128>>(mut self, amount: O) -> d128 { d128::with_context(|ctx| unsafe { *decQuadRotate(&mut self, &self, amount.as_ref(), ctx) }) } /// This calculates `self` × 10<sup>`other`</sup> and returns the result. `other` must be an /// integer (finite with exponent=0) in the range ±2 × (34 + 6144), typically resulting from /// `logb`. Underflow and overflow might occur. NaNs propagate as usual. pub fn scaleb<O: AsRef<d128>>(mut self, other: O) -> d128 { d128::with_context(|ctx| unsafe { *decQuadScaleB(&mut self, &self, other.as_ref(), ctx) }) } // Comparisons. /// Compares `self` and `other` numerically and returns the result. The result may be –1, 0, 1, /// or NaN (unordered); –1 indicates that `self` is less than `other`, 0 indicates that they /// are numerically equal, and 1 indicates that `self` is greater than `other`. NaN is returned /// only if `self` or `other` is a NaN. pub fn compare<O: AsRef<d128>>(&self, other: O) -> d128 { d128::with_context(|ctx| unsafe { let mut res: d128 = uninitialized(); *decQuadCompare(&mut res, self, other.as_ref(), ctx) }) } /// Compares `self` and `other` using the IEEE 754 total ordering (which takes into account the /// exponent) and returns the result. No status is set (a signaling NaN is ordered between /// Infinity and NaN). The result will be –1, 0, or 1. pub fn compare_total<O: AsRef<d128>>(&self, other: O) -> d128 { d128::with_context(|ctx| unsafe { let mut res: d128 = uninitialized(); *decQuadCompareTotal(&mut res, self, other.as_ref(), ctx) }) } // Copies. /// Returns `self` ensuring that the encoding is canonical. pub fn canonical(mut self) -> d128 { unsafe { *decQuadCanonical(&mut self, &self) } } // Non-computational. /// Returns the class of `self`. pub fn class(&self) -> Class { unsafe { decQuadClass(self) } } /// Same as `class()` but returns `std::num::FpCategory`. pub fn classify(&self) -> FpCategory { use std::num::FpCategory::*; use super::Class::*; match self.class() { Qnan | Snan => Nan, PosInf | NegInf => Infinite, PosZero | NegZero => Zero, PosNormal | NegNormal => Normal, PosSubnormal | NegSubnormal => Subnormal, } } /// Returns the number of significant digits in `self`. If `self` is a zero or is infinite, 1 /// is returned. If `self` is a NaN then the number of digits in the payload is returned. pub fn digits(&self) -> u32 { unsafe { decQuadDigits(self) } } /// Returns `true` if the encoding of `self` is canonical, or `false` otherwise. pub fn is_canonical(&self) -> bool { unsafe { decQuadIsCanonical(self) != 0 } } /// Returns `true` if `self` is neither infinite nor a NaN, or `false` otherwise. pub fn is_finite(&self) -> bool { unsafe { decQuadIsFinite(self) != 0 } } /// Returns `true` if `self` is finite and its exponent is zero, or `false` otherwise. pub fn is_integer(&self) -> bool { unsafe { decQuadIsInteger(self) != 0 } } /// Returns `true` if `self` is a valid argument for logical operations (that is, `self` is /// zero or positive, an integer (finite with a zero exponent) and comprises only zeros and/or /// ones), or `false` otherwise. pub fn is_logical(&self) -> bool { unsafe { decQuadIsLogical(self) != 0 } } /// Returns `true` if the encoding of `self` is an infinity, or `false` otherwise. pub fn is_infinite(&self) -> bool { unsafe { decQuadIsInfinite(self) != 0 } } /// Returns `true` if `self` is a NaN (quiet or signaling), or `false` otherwise. pub fn is_nan(&self) -> bool { unsafe { decQuadIsNaN(self) != 0 } } /// Returns `true` if `self` is less than zero and not a NaN, or `false` otherwise. pub fn is_negative(&self) -> bool { unsafe { decQuadIsNegative(self) != 0 } } /// Returns `true` if `self` is a normal number (that is, is finite, non-zero, and not /// subnormal), or `false` otherwise. pub fn is_normal(&self) -> bool { unsafe { decQuadIsNormal(self) != 0 } } /// Returns `true` if `self` is greater than zero and not a NaN, or `false` otherwise. pub fn is_positive(&self) -> bool { unsafe { decQuadIsPositive(self) != 0 } } /// Returns `true` if `self` is a signaling NaN, or `false` otherwise. pub fn is_signaling(&self) -> bool { unsafe { decQuadIsSignaling(self) != 0 } } /// Returns `true` if `self` has a minus sign, or `false` otherwise. Note that zeros and NaNs /// may have a minus sign. pub fn is_signed(&self) -> bool { unsafe { decQuadIsSigned(self) != 0 } } /// Returns `true` if `self` is subnormal (that is, finite, non-zero, and with magnitude less /// than 10<sup>-6143</sup>), or `false` otherwise. pub fn is_subnormal(&self) -> bool { unsafe { decQuadIsSubnormal(self) != 0 } } /// Returns `true` if `self` is zero, or `false` otherwise. pub fn is_zero(&self) -> bool { unsafe { decQuadIsZero(self) != 0 } } } extern "C" { // Context. fn decContextDefault(ctx: *mut Context, kind: u32) -> *mut Context; // Utilities and conversions, extractors, etc. fn decQuadFromBCD(res: *mut d128, exp: i32, bcd: *const u8, sign: i32) -> *mut d128; fn decQuadFromInt32(res: *mut d128, src: i32) -> *mut d128; fn decQuadFromString(res: *mut d128, s: *const c_char, ctx: *mut Context) -> *mut d128; fn decQuadFromUInt32(res: *mut d128, src: u32) -> *mut d128; fn decQuadToString(src: *const d128, s: *mut c_char) -> *mut c_char; fn decQuadToInt32(src: *const d128, ctx: *mut Context, round: Rounding) -> i32; fn decQuadToUInt32(src: *const d128, ctx: *mut Context, round: Rounding) -> u32; fn decQuadToEngString(res: *const d128, s: *mut c_char) -> *mut c_char; fn decQuadZero(res: *mut d128) -> *mut d128; // Computational. fn decQuadAbs(res: *mut d128, src: *const d128, ctx: *mut Context) -> *mut d128; fn decQuadAdd(res: *mut d128, a: *const d128, b: *const d128, ctx: *mut Context) -> *mut d128; fn decQuadAnd(res: *mut d128, a: *const d128, b: *const d128, ctx: *mut Context) -> *mut d128; fn decQuadDivide(res: *mut d128, a: *const d128, b: *const d128, ctx: *mut Context) -> *mut d128; fn decQuadFMA(res: *mut d128, a: *const d128, b: *const d128, c: *const d128, ctx: *mut Context) -> *mut d128; fn decQuadInvert(res: *mut d128, src: *const d128, ctx: *mut Context) -> *mut d128; fn decQuadLogB(res: *mut d128, src: *const d128, ctx: *mut Context) -> *mut d128; fn decQuadMax(res: *mut d128, a: *const d128, b: *const d128, ctx: *mut Context) -> *mut d128; fn decQuadMin(res: *mut d128, a: *const d128, b: *const d128, ctx: *mut Context) -> *mut d128; fn decQuadMinus(res: *mut d128, src: *const d128, ctx: *mut Context) -> *mut d128; fn decQuadMultiply(res: *mut d128, a: *const d128, b: *const d128, ctx: *mut Context) -> *mut d128; fn decQuadNextMinus(res: *mut d128, src: *const d128, ctx: *mut Context) -> *mut d128; fn decQuadNextPlus(res: *mut d128, src: *const d128, ctx: *mut Context) -> *mut d128; fn decQuadNextToward(res: *mut d128, src: *const d128, other: *const d128, ctx: *mut Context) -> *mut d128; fn decQuadOr(res: *mut d128, a: *const d128, b: *const d128, ctx: *mut Context) -> *mut d128; fn decQuadQuantize(res: *mut d128, a: *const d128, b: *const d128, ctx: *mut Context) -> *mut d128; fn decQuadReduce(res: *mut d128, src: *const d128, ctx: *mut Context) -> *mut d128; fn decQuadRemainder(res: *mut d128, a: *const d128, b: *const d128, ctx: *mut Context) -> *mut d128; fn decQuadRotate(res: *mut d128, a: *const d128, b: *const d128, ctx: *mut Context) -> *mut d128; fn decQuadScaleB(res: *mut d128, a: *const d128, b: *const d128, ctx: *mut Context) -> *mut d128; fn decQuadShift(res: *mut d128, a: *const d128, b: *const d128, ctx: *mut Context) -> *mut d128; fn decQuadSubtract(res: *mut d128, a: *const d128, b: *const d128, ctx: *mut Context) -> *mut d128; fn decQuadXor(res: *mut d128, a: *const d128, b: *const d128, ctx: *mut Context) -> *mut d128; // Comparisons. fn decQuadCompare(res: *mut d128, a: *const d128, b: *const d128, ctx: *mut Context) -> *mut d128; fn decQuadCompareTotal(res: *mut d128, a: *const d128, b: *const d128, ctx: *mut Context) -> *mut d128; // Copies. fn decQuadCanonical(res: *mut d128, src: *const d128) -> *mut d128; // Non-computational. fn decQuadClass(src: *const d128) -> Class; fn decQuadDigits(src: *const d128) -> u32; fn decQuadIsCanonical(src: *const d128) -> u32; fn decQuadIsFinite(src: *const d128) -> u32; fn decQuadIsInteger(src: *const d128) -> u32; fn decQuadIsLogical(src: *const d128) -> u32; fn decQuadIsInfinite(src: *const d128) -> u32; fn decQuadIsNaN(src: *const d128) -> u32; fn decQuadIsNegative(src: *const d128) -> u32; fn decQuadIsNormal(src: *const d128) -> u32; fn decQuadIsPositive(src: *const d128) -> u32; fn decQuadIsSignaling(src: *const d128) -> u32; fn decQuadIsSigned(src: *const d128) -> u32; fn decQuadIsSubnormal(src: *const d128) -> u32; fn decQuadIsZero(src: *const d128) -> u32; // decNumber stuff. fn decimal128FromNumber(res: *mut d128, src: *const decNumber, ctx: *mut Context) -> *mut d128; fn decimal128ToNumber(src: *const d128, res: *mut decNumber) -> *mut decNumber; fn decNumberPower(res: *mut decNumber, lhs: *const decNumber, rhs: *const decNumber, ctx: *mut Context) -> *mut decNumber; fn decNumberExp(res: *mut decNumber, rhs: *const decNumber, ctx: *mut Context) -> *mut decNumber; fn decNumberLn(res: *mut decNumber, rhs: *const decNumber, ctx: *mut Context) -> *mut decNumber; fn decNumberLog10(res: *mut decNumber, rhs: *const decNumber, ctx: *mut Context) -> *mut decNumber; } #[cfg(test)] mod tests { #[cfg(any(feature = "ord_subset", feature = "rustc-serialize"))] use super::*; #[cfg(any(feature = "ord_subset", feature = "serde"))] use std::collections::BTreeMap; #[cfg(feature = "ord_subset")] use ord_subset; #[cfg(feature = "rustc-serialize")] use rustc_serialize::json; #[cfg(feature = "serde")] use serde_json::{from_str, to_string}; #[test] fn default() { assert_eq!(d128::zero(), d128::default()); assert_eq!(d128::zero(), Default::default()); } #[test] fn special() { assert!(d128::infinity().is_infinite()); assert!(!d128::infinity().is_negative()); assert!(d128::neg_infinity().is_infinite()); assert!(d128::neg_infinity().is_negative()); assert_eq!(d128::infinity() + d128!(1), d128::infinity()); } #[cfg(feature = "ord_subset")] #[test] #[should_panic] fn test_ord_subset_nan() { ord_subset::OrdVar::new(d128!(NaN)); } #[cfg(feature = "ord_subset")] #[test] #[should_panic] fn test_ord_subset_qnan() { ord_subset::OrdVar::new(d128!(qNaN)); } #[cfg(feature = "ord_subset")] #[test] fn test_ord_subset_zero() { assert_eq!(*ord_subset::OrdVar::new(d128::zero()), d128::zero()); } #[cfg(feature = "ord_subset")] #[test] fn test_into_for_btreemap() { let mut m = BTreeMap::<ord_subset::OrdVar<d128>, i64>::new(); m.insert(d128!(1.1).into(), 1); assert_eq!(m[&d128!(1.1).into()], 1); } #[cfg(feature = "rustc-serialize")] #[test] fn test_rustc_serialize() { #[derive(RustcDecodable, RustcEncodable, PartialEq, Debug)] struct Test { price: d128, }; let a = Test { price: d128!(12.3456) }; assert_eq!(json::encode(&a).unwrap(), "{\"price\":\"12.3456\"}"); let b = json::decode("{\"price\":\"12.3456\"}").unwrap(); assert_eq!(a, b); } #[cfg(feature = "serde")] #[test] fn test_serde() { let mut a = BTreeMap::new(); a.insert("price".to_string(), d128!(432.232)); a.insert("amt".to_string(), d128!(9.9)); assert_eq!(&to_string(&a).unwrap(), "{\"amt\":\"9.9\",\"price\":\"432.232\"}"); let b = from_str("{\"price\":\"432.232\",\"amt\":\"9.9\"}").unwrap(); assert_eq!(a, b); } #[test] fn unary_op() { assert_eq!(d128!(-1.1), -d128!(1.1)); assert_eq!(d128!(-1.1), -&d128!(1.1)); } #[test] fn binary_op() { assert_eq!(d128!(3.33), d128!(1.11) + d128!(2.22)); assert_eq!(d128!(3.33), &d128!(1.11) + d128!(2.22)); assert_eq!(d128!(3.33), d128!(1.11) + &d128!(2.22)); assert_eq!(d128!(3.33), &d128!(1.11) + &d128!(2.22)); assert_eq!(d128!(5) << 2, d128!(500)); assert_eq!(d128!(500) >> 1, d128!(50)); } #[test] fn assign_op() { let mut x = d128!(1); x += d128!(2); assert_eq!(x, d128!(3)); x *= d128!(3); assert_eq!(x, d128!(9)); x -= d128!(1); assert_eq!(x, d128!(8)); x /= d128!(16); assert_eq!(x, d128!(0.5)); x <<= 2; assert_eq!(x, d128!(50)); x >>= 1; assert_eq!(x, d128!(5)); } #[test] fn as_ref_operand() { assert_eq!(d128!(1.1), d128!(1.1).min(d128!(2.2))); assert_eq!(d128!(1.1), d128!(1.1).min(&d128!(2.2))); } #[test] fn from_i64() { assert_eq!(d128::from_str(&::std::i64::MAX.to_string()).unwrap(), d128::from(::std::i64::MAX)); assert_eq!(d128::from(0i32), d128::from(0i64)); assert_eq!(d128::from_str(&(::std::i64::MIN).to_string()).unwrap(), d128::from(::std::i64::MIN)); } #[test] fn from_u64() { assert_eq!(d128::from_str(&::std::u64::MAX.to_string()).unwrap(), d128::from(::std::u64::MAX)); assert_eq!(d128::from(0i32), d128::from(0u64)); assert_eq!(d128::from_str(&(::std::u64::MIN).to_string()).unwrap(), d128::from(::std::u64::MIN)); } #[test] fn test_sum() { let decimals = vec![d128!(1), d128!(2), d128!(3), d128!(4)]; assert_eq!(d128!(10), decimals.iter().sum()); assert_eq!(d128!(10), decimals.into_iter().sum()); } #[test] fn math_op() { assert_eq!(d128!(1), d128!(1).ln().exp()); } }
37.330114
100
0.577436
e6a3e6be3ca0e65c947947333ccedd116067abf5
4,929
//! Mocks for the loans module. #![cfg(test)] use frame_support::{construct_runtime, parameter_types}; use frame_support::{pallet_prelude::*}; use orml_traits::parameter_type_with_key; use sp_core::H256; use sp_runtime::{ModuleId, testing::Header, testing::TestXt, traits::IdentityLookup}; use sp_runtime::FixedPointNumber; use model::Ratio; use super::*; pub type AccountId = u128; pub type BlockNumber = u64; pub const ALICE: AccountId = 1; pub const BOB: AccountId = 2; pub const DOT: CurrencyId = CurrencyId::Token(1); pub const BTC: CurrencyId = CurrencyId::Token(3); mod loans { pub use super::super::*; } parameter_types! { pub const BlockHashCount: u64 = 250; } impl frame_system::Config for Runtime { type Origin = Origin; type Index = u64; type BlockNumber = BlockNumber; type Call = Call; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup<Self::AccountId>; type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; type BlockWeights = (); type BlockLength = (); type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData<Balance>; type OnNewAccount = (); type OnKilledAccount = (); type DbWeight = (); type BaseCallFilter = (); type SystemWeightInfo = (); type SS58Prefix = (); } parameter_type_with_key! { pub ExistentialDeposits: |_currency_id: CurrencyId| -> Balance { Default::default() }; } impl orml_tokens::Config for Runtime { type Event = Event; type Balance = Balance; type Amount = Amount; type CurrencyId = CurrencyId; type WeightInfo = (); type ExistentialDeposits = ExistentialDeposits; type OnDust = (); } parameter_types! { pub const ExistentialDeposit: Balance = 0; } impl pallet_balances::Config for Runtime { type Balance = Balance; type DustRemoval = (); type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = frame_system::Module<Runtime>; type MaxLocks = (); type WeightInfo = (); } parameter_types! { pub const LoansModuleId: ModuleId = ModuleId(*b"mybankdc"); pub AssetPoolIds: Vec<CurrencyId> = vec![DOT]; pub BlockPercentEachYear: Ratio = Ratio::one(); pub const UnsignedPriority: u64 = 1 << 20; } pub type Extrinsic = TestXt<Call, ()>; impl<LocalCall> SendTransactionTypes<LocalCall> for Runtime where Call: From<LocalCall>, { type OverarchingCall = Call; type Extrinsic = Extrinsic; } impl Config for Runtime { type Event = Event; type Currency = Tokens; type ModuleId = LoansModuleId; type AssetPoolIds = AssetPoolIds; type BlockPercentEachYear = BlockPercentEachYear; type UnsignedPriority = UnsignedPriority; type OnUpdateLoan = (); } type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Runtime>; type Block = frame_system::mocking::MockBlock<Runtime>; construct_runtime!( pub enum Runtime where Block = Block, NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { System: frame_system::{Module, Call, Storage, Config, Event<T>}, LoansModule: loans::{Module, Storage, Call, Event<T>}, Tokens: orml_tokens::{Module, Storage, Event<T>, Config<T>}, PalletBalances: pallet_balances::{Module, Call, Storage, Config<T>, Event<T>}, } ); pub struct ExtBuilder { endowed_accounts: Vec<(AccountId, CurrencyId, Balance)>, asset_pool_params: Vec<(CurrencyId, AssetPoolInfo)>, } impl Default for ExtBuilder { fn default() -> Self { Self { endowed_accounts: vec![ (ALICE, DOT, 1000), (ALICE, BTC, 1000), (BOB, DOT, 1000), (BOB, BTC, 1000), ], asset_pool_params: vec![ (DOT, AssetPoolInfo { maximum_total_debit_ratio: Ratio::saturating_from_rational(90, 100), minimum_deposit: 10u128.pow(1), minimum_debit: 10u128.pow(1), health_ratio: Ratio::saturating_from_rational(75, 100), total_deposit: 0, total_debit: 0, deposit_rate: Ratio::saturating_from_rational(100, 100), debit_rate: Ratio::saturating_from_rational(100, 100), deposit_apy: Ratio::saturating_from_rational(0, 100), debit_apy: Ratio::saturating_from_rational(0, 100), reserve_factor: Ratio::saturating_from_rational(90, 100), interest_info: InterestInfo { critical_point: Ratio::saturating_from_rational(90, 100), base: Ratio::saturating_from_rational(0, 100), slope_1: Ratio::saturating_from_rational(4, 100), slope_2: Ratio::saturating_from_rational(100, 100), }, }), ] } } } impl ExtBuilder { pub fn build(self) -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default() .build_storage::<Runtime>() .unwrap(); orml_tokens::GenesisConfig::<Runtime> { endowed_accounts: self.endowed_accounts, }.assimilate_storage(&mut t).unwrap(); loans::GenesisConfig { asset_pool_params: self.asset_pool_params, }.assimilate_storage::<Runtime>(&mut t).unwrap(); t.into() } }
26.218085
85
0.716778
5da57ccfd9aea59ac56711ef09a4bc0ac855dbde
3,276
// This file is part of Substrate. // Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Fuzzing for sequential phragmen with potential balancing. mod common; use common::*; use honggfuzz::fuzz; use rand::{self, SeedableRng}; use sp_npos_elections::{ assignment_ratio_to_staked_normalized, is_score_better, seq_phragmen, to_supports, to_without_backing, EvaluateSupport, VoteWeight, }; use sp_runtime::Perbill; fn main() { loop { fuzz!(|data: (usize, usize, usize, usize, u64)| { let (mut target_count, mut voter_count, mut iterations, mut to_elect, seed) = data; let rng = rand::rngs::SmallRng::seed_from_u64(seed); target_count = to_range(target_count, 100, 200); voter_count = to_range(voter_count, 100, 200); iterations = to_range(iterations, 0, 30); to_elect = to_range(to_elect, 25, target_count); println!( "++ [voter_count: {} / target_count:{} / to_elect:{} / iterations:{}]", voter_count, target_count, to_elect, iterations, ); let (unbalanced, candidates, voters, stake_of_tree) = generate_random_npos_result( voter_count as u64, target_count as u64, to_elect, rng, ElectionType::Phragmen(None), ); let stake_of = |who: &AccountId| -> VoteWeight { *stake_of_tree.get(who).unwrap() }; let unbalanced_score = { let staked = assignment_ratio_to_staked_normalized( unbalanced.assignments.clone(), &stake_of, ) .unwrap(); let winners = to_without_backing(unbalanced.winners.clone()); let score = to_supports(winners.as_ref(), staked.as_ref()).unwrap().evaluate(); if score[0] == 0 { // such cases cannot be improved by balancing. return } score }; if iterations > 0 { let balanced = seq_phragmen::<AccountId, sp_runtime::Perbill>( to_elect, candidates, voters, Some((iterations, 0)), ) .unwrap(); let balanced_score = { let staked = assignment_ratio_to_staked_normalized( balanced.assignments.clone(), &stake_of, ) .unwrap(); let winners = to_without_backing(balanced.winners); to_supports(winners.as_ref(), staked.as_ref()).unwrap().evaluate() }; let enhance = is_score_better(balanced_score, unbalanced_score, Perbill::zero()); println!( "iter = {} // {:?} -> {:?} [{}]", iterations, unbalanced_score, balanced_score, enhance, ); // The only guarantee of balancing is such that the first and third element of the // score cannot decrease. assert!( balanced_score[0] >= unbalanced_score[0] && balanced_score[1] == unbalanced_score[1] && balanced_score[2] <= unbalanced_score[2] ); } }); } }
30.333333
87
0.679487
727fc8ea60a7d2806cc0d0e0278ae1b6f8be9b39
3,005
#[macro_use] extern crate prettytable; extern crate term; use prettytable::Table; use std::io; use std::io::Write; use std::str::FromStr; const CROSS: &'static str = "X"; const EMPTY: &'static str = " "; const ROUND: &'static str = "O"; fn main() { let mut table = table![ [EMPTY, EMPTY, EMPTY], [EMPTY, EMPTY, EMPTY], [EMPTY, EMPTY, EMPTY] ]; let mut height = table.printstd(); let stdin = io::stdin(); let mut stdout = io::stdout(); let mut current = CROSS; let mut terminal = term::stdout().unwrap(); loop { let mut line = String::new(); print!("{} plays > ", current); height += 1; stdout.flush().unwrap(); stdin.read_line(&mut line).expect("Cannot read input"); let i = match usize::from_str(line.trim()) { Ok(i) => i, _ => { println!("Bad input"); height += 1; continue; } }; if i < 1 || i > 9 { println!("Bad input, should be between 1 and 9"); height += 1; continue; } let x = (i - 1) % 3; let y = (i - 1) / 3; { let row = table.get_mut_row(y).unwrap(); if row.get_cell(x).unwrap().to_string() != EMPTY { println!("There's already someone there"); height += 1; continue; } row.set_cell(cell!(current), x).unwrap(); } for _ in 0..height { terminal.cursor_up().unwrap(); terminal.delete_line().unwrap(); } height = table.printstd(); if check(&table) { return; } if current == CROSS { current = ROUND; } else { current = CROSS; } } } fn get(table: &Table, x: usize, y: usize) -> String { match table.get_row(y) { Some(r) => match r.get_cell(x) { Some(c) => c.to_string(), _ => EMPTY.to_string(), }, _ => EMPTY.to_string(), } } fn is(table: &Table, s: &str, x: usize, y: usize) -> bool { get(table, x, y).as_str() == s } fn check(table: &Table) -> bool { let mut full = true; for y in 0..3 { for x in 0..3 { if is(table, EMPTY, x, y) { full = false; continue; } let current = get(table, x, y); let c = current.as_str(); if is(table, c, x + 1, y) && is(table, c, x + 2, y) || is(table, c, x + 1, y + 1) && is(table, c, x + 2, y + 2) || x >= 2 && is(table, c, x - 1, y + 1) && is(table, c, x - 2, y + 2) || is(table, c, x, y + 1) && is(table, c, x, y + 2) { println!("Game is over. {} is the winner", current); return true; } } } if full { println!("Game is over. It's a draw"); } full }
27.072072
85
0.439268
ffa47be4a773d170931efdb8e11031179926b07d
1,527
use std::io::Error; use bytes::BytesMut; use super::{framed_write::FramedWriteImpl, fuse::Fuse}; /// Decoding of frames via buffers, for use with `FramedRead`. pub trait Decoder { /// The type of items returned by `decode` type Item; /// The type of decoding errors. type Error: From<Error>; /// Decode an item from the src `BytesMut` into an item fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error>; /// Called when the input stream reaches EOF, signaling a last attempt to decode /// /// # Notes /// /// The default implementation of this method invokes the `Decoder::decode` method. fn decode_eof(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> { self.decode(src) } } impl<T, U: Decoder> Decoder for Fuse<T, U> { type Item = U::Item; type Error = U::Error; fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> { self.codec.decode(src) } fn decode_eof(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> { self.codec.decode_eof(src) } } impl<T: Decoder> Decoder for FramedWriteImpl<T> { type Item = T::Item; type Error = T::Error; fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> { self.inner.decode(src) } fn decode_eof(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> { self.inner.decode_eof(src) } }
29.365385
93
0.629339
693455743a2e6340f9c2782dbfa36a5ae68400c1
617
#![feature(inclusive_range_syntax)] #![feature(test)] extern crate test; /// Main module, contains basics `Structures`. pub mod board; mod color; mod functions; mod axes; mod directions; pub use color::*; pub use functions::*; pub use axes::*; pub use directions::*; /// Represent the `width` and `height` of the `Grid`. pub const GRID_LEN: usize = 19; /// Represent a position on the `Grid`. pub type Position = (usize, usize); /// Represent a `Grid` tile, `None` indicates no stone. pub type Tile = Option<color::Color>; /// Represent the Gomoku `grid` (19x19). pub type Grid = [[Tile; GRID_LEN]; GRID_LEN];
22.035714
55
0.688817
1cc64ea17e79e71cea76cefb6aa216f23f195500
418
#![feature(type_alias_impl_trait)] type Foo = impl std::fmt::Debug; fn foo(b: bool) -> Foo { if b { return vec![]; } let x: Vec<i32> = foo(false); std::iter::empty().collect() //~ ERROR `Foo` cannot be built from an iterator } fn bar(b: bool) -> impl std::fmt::Debug { if b { return vec![] } let x: Vec<i32> = bar(false); std::iter::empty().collect() } fn main() {}
19
81
0.543062
8f5e531cefc6624c73759105968ccecfee1ccc9a
2,151
use std::io::Write; use rmp::encode::{write_nil, write_bool, write_uint, write_sint, write_f32, write_f64, write_str, write_bin, write_array_len, write_map_len, write_ext_meta}; use crate::{Integer, IntPriv, Utf8String, Value}; use super::Error; /// Encodes and attempts to write the most efficient representation of the given Value. /// /// # Note /// /// All instances of `ErrorKind::Interrupted` are handled by this function and the underlying /// operation is retried. pub fn write_value<W>(wr: &mut W, val: &Value) -> Result<(), Error> where W: Write { match *val { Value::Nil => { write_nil(wr).map_err(|err| Error::InvalidMarkerWrite(err))?; } Value::Boolean(val) => { write_bool(wr, val).map_err(|err| Error::InvalidMarkerWrite(err))?; } Value::Integer(Integer { n }) => { match n { IntPriv::PosInt(n) => { write_uint(wr, n)?; } IntPriv::NegInt(n) => { write_sint(wr, n)?; } } } Value::F32(val) => { write_f32(wr, val)?; } Value::F64(val) => { write_f64(wr, val)?; } Value::String(Utf8String { ref s }) => { match *s { Ok(ref val) => write_str(wr, &val)?, Err(ref err) => write_bin(wr, &err.0)?, } } Value::Binary(ref val) => { write_bin(wr, &val)?; } Value::Array(ref vec) => { write_array_len(wr, vec.len() as u32)?; for v in vec { write_value(wr, v)?; } } Value::Map(ref map) => { write_map_len(wr, map.len() as u32)?; for &(ref key, ref val) in map { write_value(wr, key)?; write_value(wr, val)?; } } Value::Ext(ty, ref data) => { write_ext_meta(wr, data.len() as u32, ty)?; wr.write_all(data).map_err(|err| Error::InvalidDataWrite(err))?; } } Ok(()) }
30.295775
97
0.481172
bfe59e9b9536be59b41ccbe5d6ec13e67a3eab38
3,658
//! Test suite for the Web and headless browsers. #![cfg(target_arch = "wasm32")] extern crate wasm_bindgen_test; use wasm_bindgen_test::*; use music_analyzer_wasm_rs::*; extern crate web_sys; // wasm_bindgen_test_configure!(run_in_browser); // A macro to provide `println!(..)`-style syntax for `console.log` logging. macro_rules! console_log_test { ( $( $t:tt )* ) => { web_sys::console::log_1(&format!( $( $t )* ).into()); } } fn print_detector_state(detector: &pitch_detector::PitchDetector) { console_log_test!( "time_of_first_sample {} time_of_next_unprocessed_sample {} index_of_next_unprocessed_sample {} num_audio_samples {}", detector.time_of_first_sample, detector.time_of_next_unprocessed_sample, detector.index_of_next_unprocessed_sample(), detector.num_audio_samples() ); // let array = js_sys::Array::new(); // array.push( // &format!( // ) // .into(), // ); // web_sys::console::log(&array); } #[wasm_bindgen_test] fn returns_error_if_no_samples() { let processor = audio_samples_processor::AudioSamplesProcessor::new(); let mut detector = processor .create_pitch_detector(String::from("McLeod"), 1024, 44100, 0.7, 0.6) .unwrap(); let result = detector.pitches(); assert_eq!(result.code(), "not_enough_samples"); assert_eq!(result.message(), "pitches() requires at least 1024 samples and there are currently 0. Ensure set_audio_samples() has been called once enough samples are available."); assert_eq!(result.pitches().length(), 0); } #[wasm_bindgen_test] fn adding_data() { let mut processor = audio_samples_processor::AudioSamplesProcessor::new(); const SAMPLE_RATE: usize = 48000; const FREQUENCY: f32 = 440.0; const DURATION: f32 = 0.2; const SAMPLE_SIZE: usize = (SAMPLE_RATE as f32 * DURATION) as usize; let sine_wave_samples = test_utils::sin_signal(FREQUENCY, SAMPLE_SIZE, SAMPLE_RATE); for i in 0..32 { processor.add_samples_chunk( sine_wave_samples[(i * 128)..((i + 1) * 128)] .iter() .cloned() .collect(), ); } let mut detector = processor .create_pitch_detector(String::from("McLeod"), 2048, 44100, 0.7, 0.6) .unwrap(); processor.set_latest_samples_on(&mut detector); print_detector_state(&detector); let result = detector.pitches(); // Generates four pitches (one for each sliding window). assert_eq!(result.pitches().length(), 4); // // Calling again returns nothing. // pitches = detector.pitches(); // assert_eq!(pitches.length(), 0); // print_detector_state(&detector); // // Add more samples // for i in 0..4 { // processor.add_samples_chunk( // sine_wave_samples[(i * 128)..((i + 1) * 128)] // .iter() // .cloned() // .collect(), // ); // } // processor.set_latest_samples_on(&mut detector); // print_detector_state(&detector); // pitches = detector.pitches(); // assert_eq!(pitches.length(), 1); // print_detector_state(&detector); // // Try getting more pitches when we've exhausted the available samples. // pitches = detector.pitches(); // assert_eq!(pitches.length(), 0); // print_detector_state(&detector); // // Add lots more samples (more than the internal buffer size) // for i in 0..48 { // processor.add_samples_chunk( // sine_wave_samples[(i * 128)..((i + 1) * 128)] // .iter() // .cloned() // .collect(), // ); // } // processor.set_latest_samples_on(&mut detector); // print_detector_state(&detector); // pitches = detector.pitches(); // assert_eq!(pitches.length(), 12); // print_detector_state(&detector); }
28.578125
180
0.660744
ed05d65f71a70b2cab89ebfb5ced7bcb59554fdc
1,528
mod graph; mod levenshtein; mod dijkstra; use graph::Node; use std::collections::HashSet; use typed_arena::Arena; fn init<'a>(arena: &'a Arena<Node<'a>>) -> &'a Node<'a> { let root = Node::new("A", arena); let b = Node::new("B", arena); let c = Node::new("C", arena); let d = Node::new("D", arena); let e = Node::new("E", arena); let f = Node::new("F", arena); let g = Node::new("G", arena); unsafe { (*root.edges.get()).push((b, 4)); (*root.edges.get()).push((c, 7)); (*root.edges.get()).push((d, 2)); (*b.edges.get()).push((e, 2)); (*c.edges.get()).push((e, 2)); (*c.edges.get()).push((f, 3)); (*c.edges.get()).push((root, 1)); (*d.edges.get()).push((f, 3)); (*e.edges.get()).push((g, 2)); (*f.edges.get()).push((g, 3)); (*f.edges.get()).push((c, 1)); } root } fn foo<'a>(node: &'a Node<'a>) { println!("foo: {}", node.datum); } fn main() { let arena = Arena::new(); let g = init(&arena); g.traverse(&|d| println!("{}", d), &mut HashSet::new()); foo(g.first()); for (node, _) in g.children() { println!("{}", node); } let target = "C"; if dijkstra::dijkstra(g, target).is_err() { println!("Target {} could not be found.", target) } let str1 = "test"; let str2 = "fest"; println!( "Levenshtein distance between {} and {}: {}", str1, str2, levenshtein::levenshtein(str1, str2) ); }
22.470588
60
0.48233
e6b3c52a155b59e298f45bd7c5b812890f6455b8
31,183
use std::borrow::BorrowMut; use std::io::{Read, Seek, SeekFrom}; use std::iter::{Iterator, once, repeat, Rev}; use std::slice::ChunksMut; use byteorder::{ReadBytesExt, LittleEndian}; use image::{ DecodingResult, ImageResult, ImageDecoder, ImageError }; use color::ColorType; const BITMAPCOREHEADER_SIZE: u32 = 12; const BITMAPINFOHEADER_SIZE: u32 = 40; const BITMAPV2HEADER_SIZE: u32 = 52; const BITMAPV3HEADER_SIZE: u32 = 56; const BITMAPV4HEADER_SIZE: u32 = 108; const BITMAPV5HEADER_SIZE: u32 = 124; static LOOKUP_TABLE_4_BIT_TO_8_BIT: [u8; 16] = [0, 17, 34, 51, 68, 85, 102, 119, 136, 153, 170, 187, 204, 221, 238, 255]; static LOOKUP_TABLE_5_BIT_TO_8_BIT: [u8; 32] = [0, 8, 16, 25, 33, 41, 49, 58, 66, 74, 82, 90, 99, 107, 115, 123, 132, 140, 148, 156, 165, 173, 181, 189, 197, 206, 214, 222, 230, 239, 247, 255]; static LOOKUP_TABLE_6_BIT_TO_8_BIT: [u8; 64] = [0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 45, 49, 53, 57, 61, 65, 69, 73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 130, 134, 138, 142, 146, 150, 154, 158, 162, 166, 170, 174, 178, 182, 186, 190, 194, 198, 202, 206, 210, 215, 219, 223, 227, 231, 235, 239, 243, 247, 251, 255]; const R4_G4_B4_COLOR_MASK: (u32, u32, u32) = (0xF00, 0xF0, 0xF); const R5_G5_B5_COLOR_MASK: (u32, u32, u32) = (0x7c00, 0x03e0, 0x1f); const R5_G6_B5_COLOR_MASK: (u32, u32, u32) = (0xf800, 0x07e0, 0x1f); const R8_G8_B8_COLOR_MASK: (u32, u32, u32) = (0xff000000, 0xff0000, 0xff00); const RLE_ESCAPE: u8 = 0; const RLE_ESCAPE_EOL: u8 = 0; const RLE_ESCAPE_EOF: u8 = 1; const RLE_ESCAPE_DELTA: u8 = 2; #[derive(PartialEq, Copy, Clone)] enum ImageType { RGB, RLE8, RLE4, Bitfields, } #[derive(PartialEq)] enum BMPHeaderType { CoreHeader, InfoHeader, V2Header, V3Header, V4Header, V5Header, } #[derive(PartialEq)] enum Format16Bit { Format444, Format555, Format565 } #[derive(PartialEq)] enum FormatFullBytes { FormatRGB24, FormatRGB32, FormatRGBA32, Format888 } enum Chunker<'a> { FromTop(ChunksMut<'a, u8>), FromBottom(Rev<ChunksMut<'a, u8>>), } pub struct RowIterator<'a> { chunks: Chunker<'a> } impl<'a> Iterator for RowIterator<'a> { type Item = &'a mut [u8]; #[inline(always)] fn next(&mut self) -> Option<&'a mut [u8]> { match self.chunks { Chunker::FromTop(ref mut chunks) => chunks.next(), Chunker::FromBottom(ref mut chunks) => chunks.next() } } } fn set_8bit_pixel_run<'a, T: Iterator<Item=&'a u8>>(pixel_iter: &mut ChunksMut<u8>, palette: &Vec<(u8, u8, u8)>, indices: T, n_pixels: usize) -> bool { for idx in indices.take(n_pixels) { if let Some(pixel) = pixel_iter.next() { let (r, g, b) = palette[*idx as usize]; pixel[0] = r; pixel[1] = g; pixel[2] = b; } else { return false; } } true } fn set_4bit_pixel_run<'a, T: Iterator<Item=&'a u8>>(pixel_iter: &mut ChunksMut<u8>, palette: &Vec<(u8, u8, u8)>, indices: T, n_pixels: usize) -> bool { for idx in indices.flat_map(|i| once(i >> 4).chain(once(i & 0xf))).take(n_pixels) { if let Some(pixel) = pixel_iter.next() { let (r, g, b) = palette[idx as usize]; pixel[0] = r; pixel[1] = g; pixel[2] = b; } else { return false; } } true } /// A bmp decoder pub struct BMPDecoder<R> { r: R, bmp_header_type: BMPHeaderType, width: i32, height: i32, data_offset: u64, top_down: bool, no_file_header: bool, add_alpha_channel: bool, has_loaded_metadata: bool, image_type: ImageType, bit_count: u16, colors_used: u32, palette: Option<Vec<(u8, u8, u8)>>, bitfields: Option<(u32, u32, u32)>, } enum RLEInsn { EndOfFile, EndOfRow, Delta(u8, u8), Absolute(u8, Vec<u8>), PixelRun(u8, u8), } struct RLEInsnIterator<'a, R: 'a + Read> { r: &'a mut R, image_type: ImageType, } impl<'a, R: Read> Iterator for RLEInsnIterator<'a, R> { type Item = RLEInsn; fn next(&mut self) -> Option<RLEInsn> { let control_byte = match self.r.read_u8() { Ok(b) => b, Err(_) => return None }; match control_byte { RLE_ESCAPE => { let op = match self.r.read_u8() { Ok(b) => b, Err(_) => return None }; match op { RLE_ESCAPE_EOL => Some(RLEInsn::EndOfRow), RLE_ESCAPE_EOF => Some(RLEInsn::EndOfFile), RLE_ESCAPE_DELTA => { let xdelta = match self.r.read_u8() { Ok(n) => n, Err(_) => return None }; let ydelta = match self.r.read_u8() { Ok(n) => n, Err(_) => return None }; Some(RLEInsn::Delta(xdelta, ydelta)) }, _ => { let mut length = op as usize; if self.image_type == ImageType::RLE4 { length = (length + 1) / 2; } length += length & 1; let mut buffer : Vec<u8> = Vec::with_capacity(length); // XXX, should use resize() buffer.extend(0..length as u8); match self.r.read(buffer.borrow_mut()) { Ok(n) if n == length as usize => { Some(RLEInsn::Absolute(op, buffer)) }, _ => None } } } }, _ => { match self.r.read_u8() { Ok(palette_index) => { Some(RLEInsn::PixelRun(control_byte, palette_index)) }, Err(_) => None } } } } } impl<R: Read + Seek> BMPDecoder<R> { /// Create a new decoder that decodes from the stream ```r``` pub fn new(r: R) -> BMPDecoder<R> { BMPDecoder { r: r, bmp_header_type: BMPHeaderType::InfoHeader, width: 0, height: 0, data_offset: 0, top_down: false, no_file_header: false, add_alpha_channel: false, has_loaded_metadata: false, image_type: ImageType::RGB, bit_count: 0, colors_used: 0, palette: None, bitfields: None, } } #[cfg(feature = "ico")] #[doc(hidden)] pub fn reader(&mut self) -> &mut R { &mut self.r } fn read_file_header(&mut self) -> ImageResult<()> { if self.no_file_header { return Ok(()) } let mut signature = [0; 2]; if try!(self.r.read(&mut signature)) != 2 { return Err(ImageError::ImageEnd); } if signature != b"BM"[..] { return Err(ImageError::FormatError("BMP signature not found".to_string())); } // The next 8 bytes represent file size, followed the 4 reserved bytes // We're not interesting these values try!(self.r.seek(SeekFrom::Current(8))); self.data_offset = try!(self.r.read_u32::<LittleEndian>()) as u64; Ok(()) } fn read_bitmap_core_header(&mut self) ->ImageResult<()> { self.width = try!(self.r.read_u16::<LittleEndian>()) as i32; self.height = try!(self.r.read_u16::<LittleEndian>()) as i32; // Don't care about number of planes try!(self.r.seek(SeekFrom::Current(2))); self.bit_count = try!(self.r.read_u16::<LittleEndian>()); match self.bit_count { 1 | 4 | 8 | 24 => (), _ => return Err(ImageError::FormatError("Invalid bit count".to_string())), } Ok(()) } fn read_bitmap_info_header(&mut self) -> ImageResult<()> { self.width = try!(self.r.read_i32::<LittleEndian>()); self.height = try!(self.r.read_i32::<LittleEndian>()); if self.width < 0 { return Err(ImageError::FormatError("Negative width".to_string())); } if self.height == i32::min_value() { return Err(ImageError::FormatError("Invalid height".to_string())); } if self.height < 0 { self.height *= -1; self.top_down = true; } // Don't care about number of planes try!(self.r.seek(SeekFrom::Current(2))); self.bit_count = try!(self.r.read_u16::<LittleEndian>()); match self.bit_count { 1 | 4 | 8 | 16 | 24 | 32 => (), _ => return Err(ImageError::FormatError("Invalid bit count".to_string())), } let image_type_u32 = try!(self.r.read_u32::<LittleEndian>()); match image_type_u32 { 0 => self.image_type = ImageType::RGB, 1 => self.image_type = ImageType::RLE8, 2 => self.image_type = ImageType::RLE4, 3 => self.image_type = ImageType::Bitfields, _ => return Err(ImageError::UnsupportedError("Unsupported image type".to_string())), } // The next 12 bytes represent data array size in bytes, // followed the horizontal and vertical printing resolutions // We will calculate the pixel array size using width & height of image // We're not interesting the horz or vert printing resolutions try!(self.r.seek(SeekFrom::Current(12))); self.colors_used = try!(self.r.read_u32::<LittleEndian>()); // The next 4 bytes represent number of "important" colors // We're not interested in this value, so we'll skip it try!(self.r.seek(SeekFrom::Current(4))); Ok(()) } fn read_bitmasks(&mut self) -> ImageResult<()> { let r_mask = try!(self.r.read_u32::<LittleEndian>()); let g_mask = try!(self.r.read_u32::<LittleEndian>()); let b_mask = try!(self.r.read_u32::<LittleEndian>()); self.bitfields = Some((r_mask, g_mask, b_mask)); Ok(()) } fn read_metadata(&mut self) -> ImageResult<()> { if !self.has_loaded_metadata { try!(self.read_file_header()); let bmp_header_size = try!(self.r.read_u32::<LittleEndian>()); self.bmp_header_type = match bmp_header_size { BITMAPCOREHEADER_SIZE => BMPHeaderType::CoreHeader, BITMAPINFOHEADER_SIZE => BMPHeaderType::InfoHeader, BITMAPV2HEADER_SIZE => BMPHeaderType::V2Header, BITMAPV3HEADER_SIZE => BMPHeaderType::V3Header, BITMAPV4HEADER_SIZE => BMPHeaderType::V4Header, BITMAPV5HEADER_SIZE => BMPHeaderType::V5Header, _ => return Err(ImageError::UnsupportedError("Unsupported Bitmap Header".to_string())) }; match self.bmp_header_type { BMPHeaderType::CoreHeader => { try!(self.read_bitmap_core_header()); }, BMPHeaderType::InfoHeader | BMPHeaderType::V2Header | BMPHeaderType::V3Header | BMPHeaderType::V4Header | BMPHeaderType::V5Header => { try!(self.read_bitmap_info_header()); } }; match self.image_type { ImageType::RGB => { match self.bit_count { 1 | 4 | 8 => try!(self.read_palette()), 16 | 24 | 32 => (), _ => return Err(ImageError::UnsupportedError(format!("Unsupported bit count: {}", self.bit_count ))), }; }, ImageType::RLE8 => { match self.bit_count { 8 => try!(self.read_palette()), _ => return Err(ImageError::UnsupportedError(format!("Unsupported bit count: {}", self.bit_count))), }; }, ImageType::RLE4 => { match self.bit_count { 4 => try!(self.read_palette()), _ => return Err(ImageError::UnsupportedError(format!("Unsupported bit count: {}", self.bit_count))), }; }, ImageType::Bitfields => { match self.bit_count { 16 | 32 => { if self.bmp_header_type == BMPHeaderType::CoreHeader { return Err(ImageError::FormatError("Cannot use bitfield mode with BITMAPCOREHEADER BMP".to_string())); } try!(self.read_bitmasks()); // Skip past alpha mask if self.bmp_header_type != BMPHeaderType::InfoHeader && self.bmp_header_type != BMPHeaderType::V2Header { try!(self.r.seek(SeekFrom::Current(1))); } }, _ => return Err(ImageError::FormatError("Invalid bit count for bitfield BMP".to_string())), } }, }; self.has_loaded_metadata = true; } Ok(()) } #[cfg(feature = "ico")] #[doc(hidden)] pub fn read_metadata_in_ico_format(&mut self, info_header_offset: u32) -> ImageResult<()> { // Use the offset from the ICO header instead of reading a BMP file header. self.data_offset = (info_header_offset + BITMAPINFOHEADER_SIZE) as u64; self.no_file_header = true; self.add_alpha_channel = true; // The height field in an ICO file is doubled to account for the AND mask // (whether or not an AND mask is actually present). try!(self.read_metadata()); self.height = self.height / 2; Ok(()) } fn get_palette_size(&mut self) -> ImageResult<usize> { match self.colors_used { 0 => match self.bit_count { 8 | 4 | 1 => Ok(1 << self.bit_count), _ => Err(ImageError::FormatError("Invalid bit count for palletized BMP".to_string())) }, _ => { if self.colors_used > 1 << self.bit_count { return Err(ImageError::FormatError(format!( "Palette size {} exceeds maximum size for BMP with bit count of {}", self.colors_used, self.bit_count ))) } Ok(self.colors_used as usize) } } } fn bytes_per_color(&self) -> usize { match self.bmp_header_type { BMPHeaderType::CoreHeader => 3, _ => 4 } } fn read_palette(&mut self) -> ImageResult<()> { const MAX_PALETTE_SIZE: usize = 256; // Palette indices are u8. let bytes_per_color = self.bytes_per_color(); let palette_size = try!(self.get_palette_size()); let length = palette_size * bytes_per_color; let max_length = MAX_PALETTE_SIZE * bytes_per_color; let mut buf = Vec::with_capacity(max_length); try!(self.r.by_ref().take(length as u64).read_to_end(&mut buf)); // Allocate 256 entries even if palette_size is smaller, to prevent corrupt files from // causing an out-of-bounds array access. if length < max_length { // TODO: Use Vec::resize when it become stable. buf.extend(repeat(0).take(max_length - length)); } let p: Vec<(u8, u8, u8)> = (0..MAX_PALETTE_SIZE).map(|i| { let b = buf[bytes_per_color * i]; let g = buf[bytes_per_color * i + 1]; let r = buf[bytes_per_color * i + 2]; (r, g, b) }).collect(); self.palette = Some(p); Ok(()) } fn read_color_index_data(&mut self) -> ImageResult<Vec<u8>> { let row_byte_length = ((self.bit_count as u32 * self.width as u32 + 31) / 32 * 4) as usize; let indexes_per_byte = 8 / self.bit_count; let bit_mask = ((1 << self.bit_count as u16) - 1) as u8; let mut result = vec![0; self.width as usize * self.height as usize]; try!(self.r.seek(SeekFrom::Start(self.data_offset))); for h in 0..self.height { let mut line = Vec::with_capacity(row_byte_length); try!(self.r.by_ref().take(row_byte_length as u64).read_to_end(&mut line)); let x = if self.top_down { h } else { self.height - h - 1 }; let mut y = 0; for i in 0..line.len() { let byte = line[i]; for j in 0..indexes_per_byte { if y >= self.width { break; } result[x as usize * self.width as usize + y as usize] = byte >> (8 - self.bit_count * (j + 1)) & bit_mask; y += 1; } } } Ok(result) } fn num_channels(&self) -> usize { if self.add_alpha_channel { 4 } else { 3 } } fn create_pixel_data(&self) -> Vec<u8> { vec![0xFF; self.num_channels() * self.width as usize * self.height as usize] } fn rows<'a>(&self, pixel_data: &'a mut Vec<u8>) -> RowIterator<'a> { let stride = self.width as usize * self.num_channels(); if self.top_down { RowIterator{ chunks: Chunker::FromTop(pixel_data.chunks_mut(stride)) } } else { RowIterator{ chunks: Chunker::FromBottom(pixel_data.chunks_mut(stride).rev()) } } } fn read_palletized_pixel_data(&mut self) -> ImageResult<Vec<u8>> { let mut pixel_data = self.create_pixel_data(); let num_channels = self.num_channels(); let indexes = try!(self.read_color_index_data()); let palette = self.palette.as_mut().unwrap(); for (i, pixel) in pixel_data.chunks_mut(num_channels).enumerate() { let (r, g, b) = palette[indexes[i] as usize]; pixel[0] = r; pixel[1] = g; pixel[2] = b; } Ok(pixel_data) } fn read_16_bit_pixel_data(&mut self, format: Format16Bit) -> ImageResult<Vec<u8>> { let mut pixel_data = self.create_pixel_data(); let num_channels = self.num_channels(); let row_padding = self.width % 2 * 2; try!(self.r.seek(SeekFrom::Start(self.data_offset))); for row in self.rows(&mut pixel_data) { for pixel in row.chunks_mut(num_channels) { let data = try!(self.r.read_u16::<LittleEndian>()); let b = match format { Format16Bit::Format444 => LOOKUP_TABLE_4_BIT_TO_8_BIT[(data & 0b1111) as usize], Format16Bit::Format555 => LOOKUP_TABLE_5_BIT_TO_8_BIT[(data & 0b11111) as usize], Format16Bit::Format565 => LOOKUP_TABLE_5_BIT_TO_8_BIT[(data & 0b11111) as usize] }; let g = match format { Format16Bit::Format444 => LOOKUP_TABLE_4_BIT_TO_8_BIT[(data >> 4 & 0b1111) as usize], Format16Bit::Format555 => LOOKUP_TABLE_5_BIT_TO_8_BIT[(data >> 5 & 0b11111) as usize], Format16Bit::Format565 => LOOKUP_TABLE_6_BIT_TO_8_BIT[(data >> 5 & 0b111111) as usize] }; let r = match format { Format16Bit::Format444 => LOOKUP_TABLE_4_BIT_TO_8_BIT[(data >> 8 & 0b1111) as usize], Format16Bit::Format555 => LOOKUP_TABLE_5_BIT_TO_8_BIT[(data >> 10 & 0b11111) as usize], Format16Bit::Format565 => LOOKUP_TABLE_5_BIT_TO_8_BIT[(data >> 11 & 0b11111) as usize] }; pixel[0] = r; pixel[1] = g; pixel[2] = b; } // Seek past row padding try!(self.r.seek(SeekFrom::Current(row_padding as i64))); } Ok(pixel_data) } fn read_full_byte_pixel_data(&mut self, format: FormatFullBytes) -> ImageResult<Vec<u8>> { let mut pixel_data = self.create_pixel_data(); let num_channels = self.num_channels(); let row_padding = match format { FormatFullBytes::FormatRGB24 => (4 - (self.width as i64 * 3) % 4) % 4, _ => 0 }; try!(self.r.seek(SeekFrom::Start(self.data_offset))); for row in self.rows(&mut pixel_data) { for pixel in row.chunks_mut(num_channels) { if format == FormatFullBytes::Format888 { try!(self.r.seek(SeekFrom::Current(1))); } let b = try!(self.r.read_u8()); let g = try!(self.r.read_u8()); let r = try!(self.r.read_u8()); if format == FormatFullBytes::FormatRGB32 { try!(self.r.seek(SeekFrom::Current(1))); } pixel[0] = r; pixel[1] = g; pixel[2] = b; if format == FormatFullBytes::FormatRGBA32 { let a = try!(self.r.read_u8()); pixel[3] = a; } } // Seek past row padding try!(self.r.seek(SeekFrom::Current(row_padding))); } Ok(pixel_data) } fn read_rle_data(&mut self, image_type: ImageType) -> ImageResult<Vec<u8>> { let mut pixel_data = self.create_pixel_data(); let num_channels = self.num_channels(); try!(self.r.seek(SeekFrom::Start(self.data_offset))); // Scope the borrowing of pixel_data by the row iterator. { // Handling deltas in the RLE scheme means that we need to manually // iterate through rows and pixels. Even if we didn't have to handle // deltas, we have to ensure that a single runlength doesn't straddle // two rows. let mut row_iter = self.rows(&mut pixel_data); let mut insns_iter = RLEInsnIterator{ r: &mut self.r, image_type: image_type }; let p = self.palette.as_ref().unwrap(); 'row_loop: while let Some(row) = row_iter.next() { let mut pixel_iter = row.chunks_mut(num_channels); 'rle_loop: loop { if let Some(insn) = insns_iter.next() { match insn { RLEInsn::EndOfFile => { break 'row_loop; }, RLEInsn::EndOfRow => { break 'rle_loop; }, RLEInsn::Delta(x_delta, y_delta) => { for _ in 0..x_delta { if let None = pixel_iter.next() { // We can't go any further in this row. break; } } if y_delta > 0 { for _ in 1..y_delta { if let None = row_iter.next() { // We've reached the end of the image. break 'row_loop; } } } }, RLEInsn::Absolute(length, indices) => { // Absolute mode cannot span rows, so if we run // out of pixels to process, we should stop // processing the image. match image_type { ImageType::RLE8 => { if !set_8bit_pixel_run(&mut pixel_iter, &p, indices.iter(), length as usize) { break 'row_loop; } }, ImageType::RLE4 => { if !set_4bit_pixel_run(&mut pixel_iter, &p, indices.iter(), length as usize) { break 'row_loop; } }, _ => panic!(), } }, RLEInsn::PixelRun(n_pixels, palette_index) => { // A pixel run isn't allowed to span rows, but we // simply continue on to the next row if we run // out of pixels to set. match image_type { ImageType::RLE8 => { if !set_8bit_pixel_run(&mut pixel_iter, &p, repeat(&palette_index), n_pixels as usize) { break 'rle_loop; } }, ImageType::RLE4 => { if !set_4bit_pixel_run(&mut pixel_iter, &p, repeat(&palette_index), n_pixels as usize) { break 'rle_loop; } }, _ => panic!() } } } } else { // We ran out of data while we still had rows to fill in. return Err(ImageError::FormatError("Not enough RLE data".to_string())) } } } } Ok(pixel_data) } fn read_image_data(&mut self) -> ImageResult<Vec<u8>> { match self.image_type { ImageType::RGB => { match self.bit_count { 1 | 4 | 8 => { return self.read_palletized_pixel_data(); }, 16 => return self.read_16_bit_pixel_data(Format16Bit::Format555), 24 => return self.read_full_byte_pixel_data(FormatFullBytes::FormatRGB24), 32 => return if self.add_alpha_channel { self.read_full_byte_pixel_data(FormatFullBytes::FormatRGBA32) } else { self.read_full_byte_pixel_data(FormatFullBytes::FormatRGB32) }, _ => return Err(ImageError::FormatError("Invalid bit count for RGB bitmap".to_string())) } }, ImageType::RLE8 => { match self.bit_count { 8 => { return self.read_rle_data(ImageType::RLE8); }, _ => return Err(ImageError::FormatError("Invalid bit count for RLE8 bitmap".to_string())) } }, ImageType::RLE4 => { match self.bit_count { 4 => { return self.read_rle_data(ImageType::RLE4); }, _ => return Err(ImageError::FormatError("Invalid bit count for RLE4 bitmap".to_string())) } }, ImageType::Bitfields => { match self.bit_count{ 16 => { match self.bitfields { Some(R4_G4_B4_COLOR_MASK) => { return self.read_16_bit_pixel_data(Format16Bit::Format444) }, Some(R5_G5_B5_COLOR_MASK) => { return self.read_16_bit_pixel_data(Format16Bit::Format555) }, Some(R5_G6_B5_COLOR_MASK) => { return self.read_16_bit_pixel_data(Format16Bit::Format565) }, _ => return Err(ImageError::UnsupportedError("Unsupported 16-bit bitfield".to_string())) } }, 32 => { match self.bitfields { Some(R8_G8_B8_COLOR_MASK) => { return self.read_full_byte_pixel_data(FormatFullBytes::Format888) }, _ => return Err(ImageError::UnsupportedError("Unsupported 32-bit bitfield".to_string())) } }, _ => return Err(ImageError::FormatError("Invalid bit count for bitfield bitmap".to_string())), } }, } } } impl<R: Read + Seek> ImageDecoder for BMPDecoder<R> { fn dimensions(&mut self) -> ImageResult<(u32, u32)> { try!(self.read_metadata()); Ok((self.width as u32, self.height as u32)) } fn colortype(&mut self) -> ImageResult<ColorType> { if self.add_alpha_channel { Ok(ColorType::RGBA(8)) } else { Ok(ColorType::RGB(8)) } } fn row_len(&mut self) -> ImageResult<usize> { try!(self.read_metadata()); Ok(3 * self.width as usize) } fn read_scanline(&mut self, _buf: &mut [u8]) -> ImageResult<u32> { unimplemented!(); } fn read_image(&mut self) -> ImageResult<DecodingResult> { try!(self.read_metadata()); self.read_image_data().map(|v| DecodingResult::U8(v) ) } }
38.167687
340
0.47202
edfe2c1bfcde90aee57b76b8b247129adfde9114
3,776
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. #![recursion_limit = "128"] extern crate proc_macro; use proc_macro::TokenStream; use quote::quote; use syn::parse_macro_input; use syn::spanned::Spanned; /// Attribute to declare the init function of a plugin /// ``` no_run /// #[plugin_init] /// fn plugin_init() -> Result<()> {} /// ``` #[proc_macro_attribute] pub fn plugin_init(_args: TokenStream, input: TokenStream) -> TokenStream { let f = parse_macro_input!(input as syn::ItemFn); let f_vis = &f.vis; let f_block = &f.block; let f_decl = &f.decl; let f_inputs = &f_decl.inputs; // check the function signature let valid_signature = f.constness.is_none() && match f_vis { syn::Visibility::Inherited => true, _ => false, } && f.abi.is_none() && f_inputs.len() == 0 && f.decl.generics.where_clause.is_none() && f.decl.variadic.is_none(); if !valid_signature { return syn::parse::Error::new( f.span(), "`#[plugin_init]` function must have signature `fn()`", ) .to_compile_error() .into(); } quote!( #[no_mangle] pub fn _plugin_init() -> optee_teec::Result<()> { #f_block Ok(()) } ) .into() } /// Attribute to declare the invoke function of a plugin /// ``` no_run /// #[plugin_invoke] /// fn plugin_invoke(params: &mut PluginParameters) {} /// ``` #[proc_macro_attribute] pub fn plugin_invoke(_args: TokenStream, input: TokenStream) -> TokenStream { let f = parse_macro_input!(input as syn::ItemFn); let f_vis = &f.vis; let f_block = &f.block; let f_decl = &f.decl; let f_inputs = &f_decl.inputs; // check the function signature let valid_signature = f.constness.is_none() && match f_vis { syn::Visibility::Inherited => true, _ => false, } && f.abi.is_none() && f_inputs.len() == 1 && f.decl.generics.where_clause.is_none() && f.decl.variadic.is_none(); if !valid_signature { return syn::parse::Error::new( f.span(), "`#[plugin_invoke]` function must have signature `fn(params: &mut PluginParamters)`", ) .to_compile_error() .into(); } quote!( #[no_mangle] pub fn _plugin_invoke( cmd: u32, sub_cmd: u32, data: *mut c_char, in_len: u32, out_len: *mut u32 ) -> optee_teec::Result<()> { let mut inbuf = unsafe { std::slice::from_raw_parts_mut(data, in_len as usize) }; let mut params = PluginParameters::new(cmd, sub_cmd, inbuf); #f_block let outslice = params.get_out_slice(); unsafe { *out_len = outslice.len() as u32; std::ptr::copy(outslice.as_ptr(), data, outslice.len()); } Ok(()) } ) .into() }
29.5
97
0.591102
891456d9c53d11baac7bf45cd18f5db29e025743
3,012
// Copyright 2015 blake2-rfc Developers // // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. #[cfg(feature = "simd")] use crate::simd::simdint; use crate::simd::simdty::{u32x4, u64x4}; use core::ops::{Add, BitXor, Shl, Shr}; macro_rules! impl_ops { ($vec:ident) => { impl Add for $vec { type Output = Self; #[cfg(feature = "simd")] #[inline(always)] fn add(self, rhs: Self) -> Self::Output { unsafe { simdint::simd_add(self, rhs) } } #[cfg(not(feature = "simd"))] #[inline(always)] fn add(self, rhs: Self) -> Self::Output { $vec::new( self.0.wrapping_add(rhs.0), self.1.wrapping_add(rhs.1), self.2.wrapping_add(rhs.2), self.3.wrapping_add(rhs.3), ) } } impl BitXor for $vec { type Output = Self; #[cfg(feature = "simd")] #[inline(always)] fn bitxor(self, rhs: Self) -> Self::Output { unsafe { simdint::simd_xor(self, rhs) } } #[cfg(not(feature = "simd"))] #[inline(always)] fn bitxor(self, rhs: Self) -> Self::Output { $vec::new( self.0 ^ rhs.0, self.1 ^ rhs.1, self.2 ^ rhs.2, self.3 ^ rhs.3, ) } } impl Shl<$vec> for $vec { type Output = Self; #[cfg(feature = "simd")] #[inline(always)] fn shl(self, rhs: Self) -> Self::Output { unsafe { simdint::simd_shl(self, rhs) } } #[cfg(not(feature = "simd"))] #[inline(always)] fn shl(self, rhs: Self) -> Self::Output { $vec::new( self.0 << rhs.0, self.1 << rhs.1, self.2 << rhs.2, self.3 << rhs.3, ) } } impl Shr<$vec> for $vec { type Output = Self; #[cfg(feature = "simd")] #[inline(always)] fn shr(self, rhs: Self) -> Self::Output { unsafe { simdint::simd_shr(self, rhs) } } #[cfg(not(feature = "simd"))] #[inline(always)] fn shr(self, rhs: Self) -> Self::Output { $vec::new( self.0 >> rhs.0, self.1 >> rhs.1, self.2 >> rhs.2, self.3 >> rhs.3, ) } } }; } impl_ops!(u32x4); impl_ops!(u64x4);
28.961538
77
0.421647
1c343557122e85e605435e7fa74d0331c98a506e
294
mod a { #[legacy_exports]; mod b { #[legacy_exports]; mod a { #[legacy_exports]; fn foo() -> int { return 1; } } } } mod c { #[legacy_exports]; use a::b::a; fn bar() { assert (a::foo() == 1); } } fn main() { c::bar(); }
15.473684
41
0.404762
edac5898d679282ded5d1cabdd171063841465b1
5,347
use crate::bindings::{ gravity_class_bool, gravity_class_class, gravity_class_closure, gravity_class_fiber, gravity_class_float, gravity_class_function, gravity_class_instance, gravity_class_int, gravity_class_list, gravity_class_map, gravity_class_null, gravity_class_range, gravity_class_string, gravity_value_equals, gravity_value_from_bool, gravity_value_from_float, gravity_value_from_int, gravity_value_from_null, gravity_value_from_undefined, gravity_value_t, }; pub type Value = gravity_value_t; impl Value { #[inline] pub fn null() -> Self { unsafe { gravity_value_from_null() } } #[inline] pub fn undefined() -> Self { unsafe { gravity_value_from_undefined() } } #[inline] pub fn boolean<T>(value: T) -> Self where T: Into<bool>, { unsafe { gravity_value_from_bool(value.into()) } } #[inline] pub fn integer<T>(value: T) -> Self where T: Into<i64>, { unsafe { gravity_value_from_int(value.into()) } } #[inline] pub fn float<T>(value: T) -> Self where T: Into<f64>, { unsafe { gravity_value_from_float(value.into()) } } #[inline] pub fn is_null_class(&self) -> bool { unsafe { std::ptr::eq(self.isa, gravity_class_null) } } #[inline] pub fn is_null(&self) -> bool { unsafe { std::ptr::eq(self.isa, gravity_class_null) && self.__bindgen_anon_1.n == 0 } } #[inline] pub fn is_undefined(&self) -> bool { unsafe { std::ptr::eq(self.isa, gravity_class_null) && self.__bindgen_anon_1.n == 1 } } #[inline] pub fn is_boolean(&self) -> bool { unsafe { std::ptr::eq(self.isa, gravity_class_bool) } } #[inline] pub fn is_integer(&self) -> bool { unsafe { std::ptr::eq(self.isa, gravity_class_int) } } #[inline] pub fn is_float(&self) -> bool { unsafe { std::ptr::eq(self.isa, gravity_class_float) } } #[inline] pub fn is_function(&self) -> bool { unsafe { std::ptr::eq(self.isa, gravity_class_function) } } #[inline] pub fn is_instance(&self) -> bool { unsafe { std::ptr::eq(self.isa, gravity_class_instance) } } #[inline] pub fn is_closure(&self) -> bool { unsafe { std::ptr::eq(self.isa, gravity_class_closure) } } #[inline] pub fn is_class(&self) -> bool { unsafe { std::ptr::eq(self.isa, gravity_class_class) } } #[inline] pub fn is_fiber(&self) -> bool { unsafe { std::ptr::eq(self.isa, gravity_class_fiber) } } #[inline] pub fn is_string(&self) -> bool { unsafe { std::ptr::eq(self.isa, gravity_class_string) } } #[inline] pub fn is_list(&self) -> bool { unsafe { std::ptr::eq(self.isa, gravity_class_list) } } #[inline] pub fn is_map(&self) -> bool { unsafe { std::ptr::eq(self.isa, gravity_class_map) } } #[inline] pub fn is_range(&self) -> bool { unsafe { std::ptr::eq(self.isa, gravity_class_range) } } #[inline] pub fn is_basic_type(&self) -> bool { self.is_string() || self.is_integer() || self.is_float() || self.is_boolean() } #[inline] pub fn is_valid(&self) -> bool { !std::ptr::eq(self.isa, std::ptr::null()) } #[inline] pub fn is_invalid(&self) -> bool { std::ptr::eq(self.isa, std::ptr::null()) } } impl From<bool> for Value { #[inline] fn from(value: bool) -> Self { Self::boolean(value) } } impl From<u8> for Value { #[inline] fn from(value: u8) -> Self { Self::integer(value) } } impl From<u16> for Value { #[inline] fn from(value: u16) -> Self { Self::integer(value) } } impl From<u32> for Value { #[inline] fn from(value: u32) -> Self { Self::integer(value) } } impl From<i8> for Value { #[inline] fn from(value: i8) -> Self { Self::integer(value) } } impl From<i16> for Value { #[inline] fn from(value: i16) -> Self { Self::integer(value) } } impl From<i32> for Value { #[inline] fn from(value: i32) -> Self { Self::integer(value) } } impl From<i64> for Value { #[inline] fn from(value: i64) -> Self { Self::integer(value) } } impl From<f64> for Value { #[inline] fn from(value: f64) -> Self { Self::float(value) } } impl From<f32> for Value { #[inline] fn from(value: f32) -> Self { Self::float(value) } } impl PartialEq<Value> for Value { fn eq(&self, other: &Value) -> bool { unsafe { gravity_value_equals(*self, *other) } } } #[cfg(test)] mod tests { use super::Value; #[test] fn value_is_null() { let x = Value::null(); assert!(x.is_null()) } #[test] fn value_is_undefined() { let x = Value::undefined(); assert!(x.is_undefined()) } #[test] fn value_is_integer() { let x = Value::from(10); assert!(x.is_integer()) } #[test] fn value_is_float() { let x = Value::from(10.0); assert!(x.is_float()) } #[test] fn value_is_boolean() { let x = Value::from(false); assert!(x.is_boolean()) } }
21.218254
99
0.565364
90fc4a740b28c727b2a908a9bf7df2649f6c2e01
22,471
use std::cell::{Ref, RefMut}; #[cfg(feature = "async")] use std::future::Future; #[cfg(feature = "serialize")] use { serde::ser::{self, Serialize, Serializer}, std::result::Result as StdResult, }; use crate::error::{Error, Result}; use crate::ffi; use crate::function::Function; use crate::lua::Lua; use crate::table::Table; use crate::types::{LuaRef, MaybeSend, UserDataCell}; use crate::util::{assert_stack, get_destructed_userdata_metatable, get_userdata, StackGuard}; use crate::value::{FromLua, FromLuaMulti, ToLua, ToLuaMulti, Value}; /// Kinds of metamethods that can be overridden. /// /// Currently, this mechanism does not allow overriding the `__gc` metamethod, since there is /// generally no need to do so: [`UserData`] implementors can instead just implement `Drop`. /// /// [`UserData`]: trait.UserData.html #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] pub enum MetaMethod { /// The `+` operator. Add, /// The `-` operator. Sub, /// The `*` operator. Mul, /// The `/` operator. Div, /// The `%` operator. Mod, /// The `^` operator. Pow, /// The unary minus (`-`) operator. Unm, /// The floor division (//) operator. /// Requires `feature = "lua54/lua53"` #[cfg(any(feature = "lua54", feature = "lua53", doc))] IDiv, /// The bitwise AND (&) operator. /// Requires `feature = "lua54/lua53"` #[cfg(any(feature = "lua54", feature = "lua53", doc))] BAnd, /// The bitwise OR (|) operator. /// Requires `feature = "lua54/lua53"` #[cfg(any(feature = "lua54", feature = "lua53", doc))] BOr, /// The bitwise XOR (binary ~) operator. /// Requires `feature = "lua54/lua53"` #[cfg(any(feature = "lua54", feature = "lua53", doc))] BXor, /// The bitwise NOT (unary ~) operator. /// Requires `feature = "lua54/lua53"` #[cfg(any(feature = "lua54", feature = "lua53", doc))] BNot, /// The bitwise left shift (<<) operator. #[cfg(any(feature = "lua54", feature = "lua53", doc))] Shl, /// The bitwise right shift (>>) operator. #[cfg(any(feature = "lua54", feature = "lua53", doc))] Shr, /// The string concatenation operator `..`. Concat, /// The length operator `#`. Len, /// The `==` operator. Eq, /// The `<` operator. Lt, /// The `<=` operator. Le, /// Index access `obj[key]`. Index, /// Index write access `obj[key] = value`. NewIndex, /// The call "operator" `obj(arg1, args2, ...)`. Call, /// The `__tostring` metamethod. /// /// This is not an operator, but will be called by methods such as `tostring` and `print`. ToString, /// The `__pairs` metamethod. /// /// This is not an operator, but it will be called by the built-in `pairs` function. /// /// Requires `feature = "lua54/lua53/lua52"` #[cfg(any(feature = "lua54", feature = "lua53", feature = "lua52", doc))] Pairs, /// The `__close` metamethod. /// /// Executed when a variable, that marked as to-be-closed, goes out of scope. /// /// More information about to-be-closed variabled can be found in the Lua 5.4 /// [documentation][lua_doc]. /// /// Requires `feature = "lua54"` /// /// [lua_doc]: https://www.lua.org/manual/5.4/manual.html#3.3.8 #[cfg(any(feature = "lua54", doc))] Close, } impl MetaMethod { pub fn name(self) -> &'static [u8] { match self { MetaMethod::Add => b"__add", MetaMethod::Sub => b"__sub", MetaMethod::Mul => b"__mul", MetaMethod::Div => b"__div", MetaMethod::Mod => b"__mod", MetaMethod::Pow => b"__pow", MetaMethod::Unm => b"__unm", #[cfg(any(feature = "lua54", feature = "lua53"))] MetaMethod::IDiv => b"__idiv", #[cfg(any(feature = "lua54", feature = "lua53"))] MetaMethod::BAnd => b"__band", #[cfg(any(feature = "lua54", feature = "lua53"))] MetaMethod::BOr => b"__bor", #[cfg(any(feature = "lua54", feature = "lua53"))] MetaMethod::BXor => b"__bxor", #[cfg(any(feature = "lua54", feature = "lua53"))] MetaMethod::BNot => b"__bnot", #[cfg(any(feature = "lua54", feature = "lua53"))] MetaMethod::Shl => b"__shl", #[cfg(any(feature = "lua54", feature = "lua53"))] MetaMethod::Shr => b"__shr", MetaMethod::Concat => b"__concat", MetaMethod::Len => b"__len", MetaMethod::Eq => b"__eq", MetaMethod::Lt => b"__lt", MetaMethod::Le => b"__le", MetaMethod::Index => b"__index", MetaMethod::NewIndex => b"__newindex", MetaMethod::Call => b"__call", MetaMethod::ToString => b"__tostring", #[cfg(any(feature = "lua54", feature = "lua53", feature = "lua52"))] MetaMethod::Pairs => b"__pairs", #[cfg(feature = "lua54")] MetaMethod::Close => b"__close", } } } /// Method registry for [`UserData`] implementors. /// /// [`UserData`]: trait.UserData.html pub trait UserDataMethods<'lua, T: UserData> { /// Add a method which accepts a `&T` as the first parameter. /// /// Regular methods are implemented by overriding the `__index` metamethod and returning the /// accessed method. This allows them to be used with the expected `userdata:method()` syntax. /// /// If `add_meta_method` is used to set the `__index` metamethod, the `__index` metamethod will /// be used as a fall-back if no regular method is found. fn add_method<S, A, R, M>(&mut self, name: &S, method: M) where S: ?Sized + AsRef<[u8]>, A: FromLuaMulti<'lua>, R: ToLuaMulti<'lua>, M: 'static + MaybeSend + Fn(&'lua Lua, &T, A) -> Result<R>; /// Add a regular method which accepts a `&mut T` as the first parameter. /// /// Refer to [`add_method`] for more information about the implementation. /// /// [`add_method`]: #method.add_method fn add_method_mut<S, A, R, M>(&mut self, name: &S, method: M) where S: ?Sized + AsRef<[u8]>, A: FromLuaMulti<'lua>, R: ToLuaMulti<'lua>, M: 'static + MaybeSend + FnMut(&'lua Lua, &mut T, A) -> Result<R>; /// Add an async method which accepts a `T` as the first parameter and returns Future. /// The passed `T` is cloned from the original value. /// /// Refer to [`add_method`] for more information about the implementation. /// /// Requires `feature = "async"` /// /// [`add_method`]: #method.add_method #[cfg(feature = "async")] #[cfg_attr(docsrs, doc(cfg(feature = "async")))] fn add_async_method<S, A, R, M, MR>(&mut self, name: &S, method: M) where T: Clone, S: ?Sized + AsRef<[u8]>, A: FromLuaMulti<'lua>, R: ToLuaMulti<'lua>, M: 'static + MaybeSend + Fn(&'lua Lua, T, A) -> MR, MR: 'lua + Future<Output = Result<R>>; /// Add a regular method as a function which accepts generic arguments, the first argument will /// be a `UserData` of type T if the method is called with Lua method syntax: /// `my_userdata:my_method(arg1, arg2)`, or it is passed in as the first argument: /// `my_userdata.my_method(my_userdata, arg1, arg2)`. /// /// Prefer to use [`add_method`] or [`add_method_mut`] as they are easier to use. /// /// [`add_method`]: #method.add_method /// [`add_method_mut`]: #method.add_method_mut fn add_function<S, A, R, F>(&mut self, name: &S, function: F) where S: ?Sized + AsRef<[u8]>, A: FromLuaMulti<'lua>, R: ToLuaMulti<'lua>, F: 'static + MaybeSend + Fn(&'lua Lua, A) -> Result<R>; /// Add a regular method as a mutable function which accepts generic arguments. /// /// This is a version of [`add_function`] that accepts a FnMut argument. /// /// [`add_function`]: #method.add_function fn add_function_mut<S, A, R, F>(&mut self, name: &S, function: F) where S: ?Sized + AsRef<[u8]>, A: FromLuaMulti<'lua>, R: ToLuaMulti<'lua>, F: 'static + MaybeSend + FnMut(&'lua Lua, A) -> Result<R>; /// Add a regular method as an async function which accepts generic arguments /// and returns Future. /// /// This is an async version of [`add_function`]. /// /// Requires `feature = "async"` /// /// [`add_function`]: #method.add_function #[cfg(feature = "async")] #[cfg_attr(docsrs, doc(cfg(feature = "async")))] fn add_async_function<S, A, R, F, FR>(&mut self, name: &S, function: F) where T: Clone, S: ?Sized + AsRef<[u8]>, A: FromLuaMulti<'lua>, R: ToLuaMulti<'lua>, F: 'static + MaybeSend + Fn(&'lua Lua, A) -> FR, FR: 'lua + Future<Output = Result<R>>; /// Add a metamethod which accepts a `&T` as the first parameter. /// /// # Note /// /// This can cause an error with certain binary metamethods that can trigger if only the right /// side has a metatable. To prevent this, use [`add_meta_function`]. /// /// [`add_meta_function`]: #method.add_meta_function fn add_meta_method<A, R, M>(&mut self, meta: MetaMethod, method: M) where A: FromLuaMulti<'lua>, R: ToLuaMulti<'lua>, M: 'static + MaybeSend + Fn(&'lua Lua, &T, A) -> Result<R>; /// Add a metamethod as a function which accepts a `&mut T` as the first parameter. /// /// # Note /// /// This can cause an error with certain binary metamethods that can trigger if only the right /// side has a metatable. To prevent this, use [`add_meta_function`]. /// /// [`add_meta_function`]: #method.add_meta_function fn add_meta_method_mut<A, R, M>(&mut self, meta: MetaMethod, method: M) where A: FromLuaMulti<'lua>, R: ToLuaMulti<'lua>, M: 'static + MaybeSend + FnMut(&'lua Lua, &mut T, A) -> Result<R>; /// Add a metamethod which accepts generic arguments. /// /// Metamethods for binary operators can be triggered if either the left or right argument to /// the binary operator has a metatable, so the first argument here is not necessarily a /// userdata of type `T`. fn add_meta_function<A, R, F>(&mut self, meta: MetaMethod, function: F) where A: FromLuaMulti<'lua>, R: ToLuaMulti<'lua>, F: 'static + MaybeSend + Fn(&'lua Lua, A) -> Result<R>; /// Add a metamethod as a mutable function which accepts generic arguments. /// /// This is a version of [`add_meta_function`] that accepts a FnMut argument. /// /// [`add_meta_function`]: #method.add_meta_function fn add_meta_function_mut<A, R, F>(&mut self, meta: MetaMethod, function: F) where A: FromLuaMulti<'lua>, R: ToLuaMulti<'lua>, F: 'static + MaybeSend + FnMut(&'lua Lua, A) -> Result<R>; } /// Trait for custom userdata types. /// /// By implementing this trait, a struct becomes eligible for use inside Lua code. Implementations /// of [`ToLua`] and [`FromLua`] are automatically provided. /// /// # Examples /// /// ``` /// # use mlua::{Lua, Result, UserData}; /// # fn main() -> Result<()> { /// # let lua = Lua::new(); /// struct MyUserData(i32); /// /// impl UserData for MyUserData {} /// /// // `MyUserData` now implements `ToLua`: /// lua.globals().set("myobject", MyUserData(123))?; /// /// lua.load("assert(type(myobject) == 'userdata')").exec()?; /// # Ok(()) /// # } /// ``` /// /// Custom methods and operators can be provided by implementing `add_methods` (refer to /// [`UserDataMethods`] for more information): /// /// ``` /// # use mlua::{Lua, MetaMethod, Result, UserData, UserDataMethods}; /// # fn main() -> Result<()> { /// # let lua = Lua::new(); /// struct MyUserData(i32); /// /// impl UserData for MyUserData { /// fn add_methods<'lua, M: UserDataMethods<'lua, Self>>(methods: &mut M) { /// methods.add_method("get", |_, this, _: ()| { /// Ok(this.0) /// }); /// /// methods.add_method_mut("add", |_, this, value: i32| { /// this.0 += value; /// Ok(()) /// }); /// /// methods.add_meta_method(MetaMethod::Add, |_, this, value: i32| { /// Ok(this.0 + value) /// }); /// } /// } /// /// lua.globals().set("myobject", MyUserData(123))?; /// /// lua.load(r#" /// assert(myobject:get() == 123) /// myobject:add(7) /// assert(myobject:get() == 130) /// assert(myobject + 10 == 140) /// "#).exec()?; /// # Ok(()) /// # } /// ``` /// /// [`ToLua`]: trait.ToLua.html /// [`FromLua`]: trait.FromLua.html /// [`UserDataMethods`]: trait.UserDataMethods.html pub trait UserData: Sized { /// Adds custom methods and operators specific to this userdata. fn add_methods<'lua, M: UserDataMethods<'lua, Self>>(_methods: &mut M) {} } pub struct UserDataWrapped<T> { pub data: *mut T, #[cfg(feature = "serialize")] ser: *mut dyn erased_serde::Serialize, } impl<T> Drop for UserDataWrapped<T> { fn drop(&mut self) { unsafe { drop(Box::from_raw(self.data)); #[cfg(feature = "serialize")] if self.data as *mut () != self.ser as *mut () { drop(Box::from_raw(self.ser)); } } } } impl<T> UserDataWrapped<T> { pub fn new(data: T) -> Self { UserDataWrapped { data: Box::into_raw(Box::new(data)), #[cfg(feature = "serialize")] ser: Box::into_raw(Box::new(UserDataSerializeError)), } } #[cfg(feature = "serialize")] pub fn new_ser(data: T) -> Self where T: 'static + Serialize, { let data_raw = Box::into_raw(Box::new(data)); UserDataWrapped { data: data_raw, ser: data_raw, } } } impl<T> AsRef<T> for UserDataWrapped<T> { fn as_ref(&self) -> &T { unsafe { &*self.data } } } impl<T> AsMut<T> for UserDataWrapped<T> { fn as_mut(&mut self) -> &mut T { unsafe { &mut *self.data } } } #[cfg(feature = "serialize")] pub struct UserDataSerializeError; #[cfg(feature = "serialize")] impl Serialize for UserDataSerializeError { fn serialize<S>(&self, _serializer: S) -> StdResult<S::Ok, S::Error> where S: Serializer, { Err(ser::Error::custom("cannot serialize <userdata>")) } } /// Handle to an internal Lua userdata for any type that implements [`UserData`]. /// /// Similar to `std::any::Any`, this provides an interface for dynamic type checking via the [`is`] /// and [`borrow`] methods. /// /// Internally, instances are stored in a `RefCell`, to best match the mutable semantics of the Lua /// language. /// /// # Note /// /// This API should only be used when necessary. Implementing [`UserData`] already allows defining /// methods which check the type and acquire a borrow behind the scenes. /// /// [`UserData`]: trait.UserData.html /// [`is`]: #method.is /// [`borrow`]: #method.borrow #[derive(Clone, Debug)] pub struct AnyUserData<'lua>(pub LuaRef<'lua>); impl<'lua> AnyUserData<'lua> { /// Checks whether the type of this userdata is `T`. pub fn is<T: 'static + UserData>(&self) -> bool { match self.inspect(|_: &UserDataCell<T>| Ok(())) { Ok(()) => true, Err(Error::UserDataTypeMismatch) => false, Err(_) => unreachable!(), } } /// Borrow this userdata immutably if it is of type `T`. /// /// # Errors /// /// Returns a `UserDataBorrowError` if the userdata is already mutably borrowed. Returns a /// `UserDataTypeMismatch` if the userdata is not of type `T`. pub fn borrow<T: 'static + UserData>(&self) -> Result<Ref<T>> { self.inspect(|cell| { let cell_ref = cell.try_borrow().map_err(|_| Error::UserDataBorrowError)?; Ok(Ref::map(cell_ref, |x| unsafe { &*x.data })) }) } /// Borrow this userdata mutably if it is of type `T`. /// /// # Errors /// /// Returns a `UserDataBorrowMutError` if the userdata is already borrowed. Returns a /// `UserDataTypeMismatch` if the userdata is not of type `T`. pub fn borrow_mut<T: 'static + UserData>(&self) -> Result<RefMut<T>> { self.inspect(|cell| { let cell_ref = cell .try_borrow_mut() .map_err(|_| Error::UserDataBorrowMutError)?; Ok(RefMut::map(cell_ref, |x| unsafe { &mut *x.data })) }) } /// Sets an associated value to this `AnyUserData`. /// /// The value may be any Lua value whatsoever, and can be retrieved with [`get_user_value`]. /// As Lua < 5.3 allows to store only tables, the value will be stored in a table at index 1. /// /// [`get_user_value`]: #method.get_user_value pub fn set_user_value<V: ToLua<'lua>>(&self, v: V) -> Result<()> { let lua = self.0.lua; #[cfg(any(feature = "lua52", feature = "lua51", feature = "luajit"))] let v = { // Lua 5.2/5.1 allows to store only a table. Then we will wrap the value. let t = lua.create_table()?; t.raw_set(1, v)?; crate::Value::Table(t) }; #[cfg(any(feature = "lua54", feature = "lua53"))] let v = v.to_lua(lua)?; unsafe { let _sg = StackGuard::new(lua.state); assert_stack(lua.state, 2); lua.push_ref(&self.0); lua.push_value(v)?; ffi::lua_setuservalue(lua.state, -2); Ok(()) } } /// Returns an associated value set by [`set_user_value`]. /// /// For Lua < 5.3 the value will be automatically extracted from the table wrapper from index 1. /// /// [`set_user_value`]: #method.set_user_value pub fn get_user_value<V: FromLua<'lua>>(&self) -> Result<V> { let lua = self.0.lua; let res = unsafe { let _sg = StackGuard::new(lua.state); assert_stack(lua.state, 3); lua.push_ref(&self.0); ffi::lua_getuservalue(lua.state, -1); lua.pop_value() }; #[cfg(any(feature = "lua52", feature = "lua51", feature = "luajit"))] return crate::Table::from_lua(res, lua)?.get(1); #[cfg(any(feature = "lua54", feature = "lua53"))] V::from_lua(res, lua) } /// Checks for a metamethod in this `AnyUserData` pub fn has_metamethod(&self, method: MetaMethod) -> Result<bool> { match self._get_metatable() { Ok(mt) => { let name = self.0.lua.create_string(method.name())?; if let Value::Nil = mt.raw_get(name)? { Ok(false) } else { Ok(true) } } Err(Error::UserDataTypeMismatch) => Ok(false), Err(e) => Err(e), } } /// Gets the metatable for this `AnyUserData`. This function is considered /// unsafe, as altering the generated metatable can lead to unsaftey. pub unsafe fn get_metatable(&self) -> Result<Table<'lua>> { self._get_metatable() } fn _get_metatable(&self) -> Result<Table<'lua>> { unsafe { let lua = self.0.lua; let _sg = StackGuard::new(lua.state); assert_stack(lua.state, 3); lua.push_ref(&self.0); if ffi::lua_getmetatable(lua.state, -1) == 0 { return Err(Error::UserDataTypeMismatch); } Ok(Table(lua.pop_ref())) } } pub fn equals<T: AsRef<Self>>(&self, other: T) -> Result<bool> { let other = other.as_ref(); if self == other { return Ok(true); } let mt = self._get_metatable()?; if mt != other._get_metatable()? { return Ok(false); } if mt.contains_key("__eq")? { return mt .get::<_, Function>("__eq")? .call((self.clone(), other.clone())); } Ok(false) } fn inspect<'a, T, R, F>(&'a self, func: F) -> Result<R> where T: 'static + UserData, F: FnOnce(&'a UserDataCell<T>) -> Result<R>, { unsafe { let lua = self.0.lua; let _sg = StackGuard::new(lua.state); assert_stack(lua.state, 3); lua.push_ref(&self.0); if ffi::lua_getmetatable(lua.state, -1) == 0 { Err(Error::UserDataTypeMismatch) } else { ffi::lua_rawgeti( lua.state, ffi::LUA_REGISTRYINDEX, lua.userdata_metatable::<T>()? as ffi::lua_Integer, ); if ffi::lua_rawequal(lua.state, -1, -2) == 0 { // Maybe UserData destructed? ffi::lua_pop(lua.state, 1); get_destructed_userdata_metatable(lua.state); if ffi::lua_rawequal(lua.state, -1, -2) == 1 { Err(Error::UserDataDestructed) } else { Err(Error::UserDataTypeMismatch) } } else { func(&*get_userdata::<UserDataCell<T>>(lua.state, -3)) } } } } } impl<'lua> PartialEq for AnyUserData<'lua> { fn eq(&self, other: &Self) -> bool { self.0 == other.0 } } impl<'lua> AsRef<AnyUserData<'lua>> for AnyUserData<'lua> { #[inline] fn as_ref(&self) -> &Self { self } } #[cfg(feature = "serialize")] impl<'lua> Serialize for AnyUserData<'lua> { fn serialize<S>(&self, serializer: S) -> StdResult<S::Ok, S::Error> where S: Serializer, { let f = || unsafe { let lua = self.0.lua; let _sg = StackGuard::new(lua.state); assert_stack(lua.state, 2); lua.push_userdata_ref(&self.0)?; let ud = &*get_userdata::<UserDataCell<()>>(lua.state, -1); (*ud.try_borrow().map_err(|_| Error::UserDataBorrowError)?.ser) .serialize(serializer) .map_err(|err| Error::SerializeError(err.to_string())) }; f().map_err(ser::Error::custom) } }
33.689655
100
0.554225
61d37e8bcae526c805b64ae56d4f7db95579420f
21,649
// Copyright 2021 Jacob Alexander // // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. #![no_std] mod descriptor; mod test; pub use crate::descriptor::{ HidioReport, KeyboardNkroReport, MouseReport, SysCtrlConsumerCtrlReport, }; use heapless::spsc::Consumer; use usb_device::bus::{UsbBus, UsbBusAllocator}; use usb_device::class::UsbClass; use usbd_hid::descriptor::generator_prelude::*; use usbd_hid::descriptor::KeyboardReport; use usbd_hid::hid_class::{HIDClass, HidClassSettings, HidProtocol, HidSubClass}; pub use usbd_hid::hid_class::{HidCountryCode, HidProtocolMode, ProtocolModeConfig}; use usbd_hid::UsbError; #[cfg(feature = "hidio")] use heapless::Vec; #[cfg(feature = "hidio")] use kiibohd_hid_io::{CommandInterface, KiibohdCommandInterface}; #[derive(Copy, Clone, Debug, PartialEq, defmt::Format)] pub enum KeyState { /// Press the given USB HID Keyboard code Press(u8), /// Release the given USB HID Keyboard code Release(u8), /// Clear all currently pressed USB HID Keyboard codes Clear, } #[derive(Copy, Clone, Debug, PartialEq, defmt::Format)] pub enum MouseState { /// Press the given mouse button (1->8) Press(u8), /// Release the given mouse button (1->8) Release(u8), /// Position update Position { x: i16, y: i16 }, /// Vertical Wheel Increment VertWheel(i8), /// Horizontal Wheel Increment HorzWheel(i8), /// Clear all mouse state Clear, } #[derive(Copy, Clone, Debug, PartialEq, defmt::Format)] pub enum CtrlState { /// Press the given USB HID System Ctrl code SystemCtrlPress(u8), /// Release the given USB HID System Ctrl code SystemCtrlRelease(u8), /// Press the given USB HID Consumer Ctrl code ConsumerCtrlPress(u16), /// Release the given USB HID Consumer Ctrl code ConsumerCtrlRelease(u16), /// Clear all the currently pressed consumer and system control HID codes Clear, } /// USB HID Combination Interface /// /// Handles creation and management of multiple USB HID interfaces through SPSC queues. /// Maintains state for you so you only need to send state changes and poll events. /// /// Typical Usage /// - Queue up changes using SPSC queues /// - Call push to send the current states of all the queues /// (queues are not processed until push() is called) /// /// HID-IO /// - Queue up changes, or receive changes using hidio_rx and hidio_tx spsc queues /// - Call poll to process queues in both directions (or push, which will call poll for you) /// Will attempt to push and pull as many packets as possible in case the USB device supports /// larger buffers (e.g. double buffering) /// /// Example Usage (atsam4s) /// ```rust,ignore /// use heapless::spsc::Queue; /// use usbd_hid::hid_class::{HidCountryCode, HidProtocolMode, ProtocolModeConfig}; /// /// // These define the maximum pending items in each queue /// const KBD_QUEUE_SIZE: usize = 10; // This would limit NKRO mode to 10KRO /// const MOUSE_QUEUE_SIZE: usize = 5; /// const CTRL_QUEUE_SIZE: usize = 2; /// /// type HidInterface = /// kiibohd_usb::HidInterface<'static, UdpBus, KBD_QUEUE_SIZE, MOUSE_QUEUE_SIZE, CTRL_QUEUE_SIZE>; /// /// pub struct HidioInterface<const H: usize> {} /// /// impl<const H: usize> HidioInterface<H> { /// fn new() -> Self { /// Self {} /// } /// } /// /// impl<const H: usize> KiibohdCommandInterface<H> for HidioInterface<H> { /// fn h0001_device_name(&self) -> Option<&str> { /// Some("Input Club Keystone - TKL") /// } /// /// fn h0001_firmware_name(&self) -> Option<&str> { /// Some("kiibohd-firmware") /// } /// } /// /// // Setup the queues used to generate the input reports (ctrl, keyboard and mouse) /// let ctrl_queue: Queue<kiibohd_usb::CtrlState, CTRL_QUEUE_SIZE> = Queue::new(); /// let kbd_queue: Queue<kiibohd_usb::KeyState, KBD_QUEUE_SIZE> = Queue::new(); /// let mouse_queue: Queue<kiibohd_usb::MouseState, MOUSE_QUEUE_SIZE> = Queue::new(); /// let (kbd_producer, kbd_consumer) = kbd_queue.split(); /// let (mouse_producer, mouse_consumer) = mouse_queue.split(); /// let (ctrl_producer, ctrl_consumer) = ctrl_queue.split(); /// /// // Setup the interface /// // NOTE: Ignoring usb_bus setup in this example, use a compliant usb-device UsbBus interface /// let usb_hid = HidInterface::new( /// usb_bus, /// HidCountryCode::NotSupported, /// kbd_consumer, /// mouse_consumer, /// ctrl_consumer, /// ); /// /// // Basic CommandInterface /// let hidio_intf = CommandInterface::< /// HidioInterface<MESSAGE_LEN>, /// TX_BUF, /// RX_BUF, /// BUF_CHUNK, /// MESSAGE_LEN, /// SERIALIZATION_LEN, /// ID_LEN, /// >::new( /// &[ /// HidIoCommandId::SupportedIds, /// HidIoCommandId::GetInfo, /// HidIoCommandId::TestPacket, /// ], /// HidioInterface::<MESSAGE_LEN>::new(), /// ) /// .unwrap(); /// /// // To push keyboard key report, first push to the queue, then process all queues /// kbd_producer.enqueue(kiibohd_usb::KeyState::Press(0x04)); // Press the A key /// usb_hid.push(); /// /// // In the USB interrupt (or similar), usb_hid will also need to be handled (Ctrl EP requests) /// fn usb_irq() { /// let usb_dev = some_global_mechanism.usb_dev; /// let usb_hid = some_global_mechanism.usb_hid; /// let hidio_intf = some_global_mechanism.hidio_intf; /// if usb_dev.poll(&mut usb_hid.interfaces()) { /// // poll is only available with the hidio feature /// usb_hid.poll(hidio_intf); /// } /// } /// ``` pub struct HidInterface< 'a, B: UsbBus, const KBD_SIZE: usize, const MOUSE_SIZE: usize, const CTRL_SIZE: usize, > { kbd_6kro: HIDClass<'a, B>, kbd_6kro_report: KeyboardReport, kbd_nkro: HIDClass<'a, B>, kbd_nkro_report: KeyboardNkroReport, kbd_consumer: Consumer<'a, KeyState, KBD_SIZE>, ctrl: HIDClass<'a, B>, ctrl_consumer: Consumer<'a, CtrlState, CTRL_SIZE>, ctrl_report: SysCtrlConsumerCtrlReport, #[cfg(feature = "mouse")] mouse: HIDClass<'a, B>, #[cfg(feature = "mouse")] mouse_consumer: Consumer<'a, MouseState, MOUSE_SIZE>, #[cfg(feature = "mouse")] mouse_report: MouseReport, #[cfg(feature = "hidio")] hidio: HIDClass<'a, B>, } impl<B: UsbBus, const KBD_SIZE: usize, const MOUSE_SIZE: usize, const CTRL_SIZE: usize> HidInterface<'_, B, KBD_SIZE, MOUSE_SIZE, CTRL_SIZE> { pub fn new<'a>( alloc: &'a UsbBusAllocator<B>, locale: HidCountryCode, kbd_consumer: Consumer<'a, KeyState, KBD_SIZE>, #[cfg(feature = "mouse")] mouse_consumer: Consumer<'a, MouseState, MOUSE_SIZE>, ctrl_consumer: Consumer<'a, CtrlState, CTRL_SIZE>, ) -> HidInterface<'a, B, KBD_SIZE, MOUSE_SIZE, CTRL_SIZE> { let kbd_6kro = HIDClass::new_ep_in_with_settings( alloc, KeyboardReport::desc(), 10, HidClassSettings { subclass: HidSubClass::Boot, protocol: HidProtocol::Keyboard, config: ProtocolModeConfig::DefaultBehavior, locale, }, ); let kbd_nkro = HIDClass::new_ep_in_with_settings( alloc, KeyboardNkroReport::desc(), 10, HidClassSettings { subclass: HidSubClass::NoSubClass, protocol: HidProtocol::Keyboard, config: ProtocolModeConfig::DefaultBehavior, locale, }, ); let ctrl = HIDClass::new_ep_in(alloc, SysCtrlConsumerCtrlReport::desc(), 10); #[cfg(feature = "mouse")] let mouse = HIDClass::new_ep_in(alloc, MouseReport::desc(), 10); #[cfg(feature = "hidio")] let hidio = HIDClass::new(alloc, HidioReport::desc(), 10); HidInterface { kbd_6kro, kbd_6kro_report: KeyboardReport { modifier: 0, reserved: 0, leds: 0, keycodes: [0; 6], }, kbd_nkro, kbd_nkro_report: KeyboardNkroReport { leds: 0, keybitmap: [0; 29], }, kbd_consumer, ctrl, ctrl_consumer, ctrl_report: SysCtrlConsumerCtrlReport { consumer_ctrl: 0, system_ctrl: 0, }, #[cfg(feature = "mouse")] mouse, #[cfg(feature = "mouse")] mouse_consumer, #[cfg(feature = "mouse")] mouse_report: MouseReport { buttons: 0, x: 0, y: 0, vert_wheel: 0, horz_wheel: 0, }, #[cfg(feature = "hidio")] hidio, } } /// Dynamically update the keyboard protocol mode (and behavior) /// Used to force NKRO or 6KRO regardless of what the host configures pub fn set_kbd_protocol_mode(&mut self, mode: HidProtocolMode, config: ProtocolModeConfig) { defmt::trace!( "HidInterface::set_kbd_protocol_mode({:?}, {:?})", mode, config ); self.kbd_6kro.set_protocol_mode(mode, config).ok(); self.kbd_nkro.set_protocol_mode(mode, config).ok(); } /// Retrieves the current protocol mode /// Uses the 6kro keyboard (both HID Classes should return the same value) pub fn get_kbd_protocol_mode(&self) -> HidProtocolMode { self.kbd_6kro.get_protocol_mode().unwrap() } /// Used to pass all of the interfaces to usb_dev.poll() #[cfg(all(feature = "mouse", feature = "hidio"))] pub fn interfaces(&mut self) -> [&'_ mut dyn UsbClass<B>; 5] { [ &mut self.kbd_6kro, &mut self.kbd_nkro, &mut self.ctrl, &mut self.mouse, &mut self.hidio, ] } /// Used to pass all of the interfaces to usb_dev.poll() #[cfg(all(feature = "mouse", not(feature = "hidio")))] pub fn interfaces(&mut self) -> [&'_ mut dyn UsbClass<B>; 4] { [ &mut self.kbd_6kro, &mut self.kbd_nkro, &mut self.ctrl, &mut self.mouse, ] } /// Used to pass all of the interfaces to usb_dev.poll() #[cfg(all(not(feature = "mouse"), feature = "hidio"))] pub fn interfaces(&mut self) -> [&'_ mut dyn UsbClass<B>; 4] { [ &mut self.kbd_6kro, &mut self.kbd_nkro, &mut self.ctrl, &mut self.hidio, ] } /// Used to pass all of the interfaces to usb_dev.poll() #[cfg(all(not(feature = "mouse"), not(feature = "hidio")))] pub fn interfaces(&mut self) -> [&'_ mut dyn UsbClass<B>; 3] { [&mut self.kbd_6kro, &mut self.kbd_nkro, &mut self.ctrl] } /// Modifies the nkro report bitmask fn nkro_bit(&mut self, key: u8, press: bool) { // NOTE: The indexing actually starts from 1 (not 0), so position 0 represents 1 // 0 in USB HID represents no keys pressed, so it's meaningless in a bitmask // Ignore any keys over 231/0xE7 if key == 0 || key > 0xE7 { defmt::warn!("Invalid key for nkro_bit({}, {}), ignored.", key, press); return; } let key = key - 1; // Determine position let byte: usize = (key / 8).into(); let bit: usize = (key % 8).into(); // Set/Unset if press { self.kbd_nkro_report.keybitmap[byte] |= 1 << bit; } else { self.kbd_nkro_report.keybitmap[byte] &= !(1 << bit); } } fn update_kbd(&mut self) -> bool { let mut updated = false; // Empty kbd queue loop { match self.kbd_consumer.dequeue() { Some(state) => { updated = true; match state { KeyState::Press(key) => { // Ignore 0 // - 6KRO - // Modifiers if key & 0xE0 == 0xE0 { self.kbd_6kro_report.modifier |= 1 << (key ^ 0xE0); // Left shift 1 by key XOR 0xE0 } // Keys for pos in self.kbd_6kro_report.keycodes.iter_mut() { // Check to see if key is already presed if *pos == key { break; } // Set the key if we encounter a 0 (no key set) if *pos == 0 { *pos = key; break; } } // - NKRO - self.nkro_bit(key, true); } KeyState::Release(key) => { // - 6KRO - // Modifiers if key & 0xE0 == 0xE0 { self.kbd_6kro_report.modifier |= 1 << (key ^ 0xE0); // Left shift 1 by key XOR 0xE0 } // Keys if key != 0 { // Check to see if key is pressed if let Some(index) = self.kbd_6kro_report.keycodes.iter().position(|&k| k == key) { // Rotate in all the keys // OSs will skip all the keys after the first 0 is found in // the array. self.kbd_6kro_report.keycodes[index..].rotate_left(1); // Clear the last index self.kbd_6kro_report.keycodes [self.kbd_6kro_report.keycodes.len() - 1] = 0; } } // - NKRO - self.nkro_bit(key, false); } KeyState::Clear => { // - 6KRO - self.kbd_6kro_report.modifier = 0; self.kbd_6kro_report.keycodes = [0; 6]; // - NKRO - self.kbd_nkro_report.keybitmap = [0; 29]; } } } None => { return updated; } } } } fn push_6kro_kbd(&mut self) { if let Err(val) = self.kbd_6kro.push_input(&self.kbd_6kro_report) { defmt::error!("6KRO Buffer Overflow: {:?}", val); } } fn push_nkro_kbd(&mut self) { if let Err(val) = self.kbd_nkro.push_input(&self.kbd_nkro_report) { defmt::error!("NKRO Buffer Overflow: {:?}", val); } } #[cfg(feature = "mouse")] fn mouse_button_bit(&mut self, button: u8, press: bool) { // Ignore keys outside of 1 to 8 if let 1..=8 = button { let button = button - 1; // Determine position let bit: usize = (button % 8).into(); // Set/Unset if press { self.mouse_report.buttons |= 1 << bit; } else { self.mouse_report.buttons &= !(1 << bit); } } } #[cfg(feature = "mouse")] fn push_mouse(&mut self) { let mut updated = false; // Empty mouse queue while let Some(state) = self.mouse_consumer.dequeue() { updated = true; match state { MouseState::Press(key) => { self.mouse_button_bit(key, true); } MouseState::Release(key) => { self.mouse_button_bit(key, false); } MouseState::Position { x, y } => { self.mouse_report.x = x; self.mouse_report.y = y; } MouseState::VertWheel(pos) => { self.mouse_report.vert_wheel = pos; } MouseState::HorzWheel(pos) => { self.mouse_report.horz_wheel = pos; } MouseState::Clear => { self.mouse_report.buttons = 0; } } } // Push report if updated { if let Err(val) = self.mouse.push_input(&self.mouse_report) { defmt::error!("Mouse Buffer Overflow: {:?}", val); } } // Clear relative fields self.mouse_report.x = 0; self.mouse_report.y = 0; self.mouse_report.vert_wheel = 0; self.mouse_report.horz_wheel = 0; } fn push_ctrl(&mut self) { let mut updated = false; // Empty ctrl queue while let Some(state) = self.ctrl_consumer.dequeue() { updated = true; match state { CtrlState::SystemCtrlPress(key) => { self.ctrl_report.system_ctrl = key; } CtrlState::SystemCtrlRelease(_key) => { self.ctrl_report.system_ctrl = 0; } CtrlState::ConsumerCtrlPress(key) => { self.ctrl_report.consumer_ctrl = key; } CtrlState::ConsumerCtrlRelease(_key) => { self.ctrl_report.consumer_ctrl = 0; } CtrlState::Clear => { self.ctrl_report.consumer_ctrl = 0; self.ctrl_report.system_ctrl = 0; } } } // Push report if updated { if let Err(val) = self.ctrl.push_input(&self.ctrl_report) { defmt::error!("Ctrl Buffer Overflow: {:?}", val); } } } /// Processes each of the spsc queues and pushes data over USB /// This is primarily for keyboard, mouse and ctrl interfaces. /// HID-IO is handled with poll() pub fn push(&mut self) { // Update keyboard if necessary if self.update_kbd() { // Check protocol mode to decide nkro vs. 6kro (boot) match self.get_kbd_protocol_mode() { HidProtocolMode::Report => { self.push_nkro_kbd(); } HidProtocolMode::Boot => { self.push_6kro_kbd(); } } } // Push consumer and system control reports self.push_ctrl(); // Push mouse reports #[cfg(feature = "mouse")] self.push_mouse(); } /// Poll the HID-IO interface #[cfg(feature = "hidio")] pub fn poll< KINTF: KiibohdCommandInterface<H>, const TX: usize, const RX: usize, const N: usize, const H: usize, const S: usize, const ID: usize, >( &mut self, interface: &mut CommandInterface<KINTF, TX, RX, N, H, S, ID>, ) { // Check for any incoming packets while !interface.rx_bytebuf.is_full() { let mut packet = Vec::new(); packet.resize_default(N).unwrap(); match self.hidio.pull_raw_output(&mut packet) { Ok(size) => { packet.truncate(size); defmt::trace!("rx packet: {}", packet); interface.rx_bytebuf.enqueue(packet).unwrap(); } Err(UsbError::WouldBlock) => { // No pending data break; } Err(e) => { defmt::warn!( "Failed to add packet to hidio rx buffer: {} -> {}", e, packet ); break; } } } // Process rx buffer if let Err(e) = interface.process_rx(0) { defmt::warn!("process_rx failed -> {}", e); } // Push as many packets as possible while !interface.tx_bytebuf.is_empty() { // Don't dequeue yet, we might not be able to send let packet = interface.tx_bytebuf.peek().unwrap(); defmt::trace!("tx packet: {}", packet); // Attempt to push match self.hidio.push_raw_input(packet) { Ok(_size) => { // Dequeue interface.tx_bytebuf.dequeue().unwrap(); } Err(UsbError::WouldBlock) => { // USB Endpoint buffer is likely full break; } Err(e) => { defmt::warn!("Failed to push hidio tx packet: {} -> {}", e, packet); break; } } } } }
34.527911
102
0.509169
eb6327ff3fc8487dee52fd59b2136e7cc8491e35
19,044
// Copyright (c) 2021 The Lutino Projects // // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file or at // https://opensource.org/licenses/MIT. //! Contains types related to inserting new entities into a [`World`](super::world::World) use std::marker::PhantomData; use nh_proc_macros::impl_all_tuples; use crate::entity::Entity; use crate::query::filter::{FilterResult, LayoutFilter}; use crate::storage::archetype::{Archetype, ArchetypeIndex, EntityLayout}; use crate::storage::component::{Component, ComponentTypeId}; use crate::storage::{ComponentIndex, ComponentStorage, MultiMut, UnknownComponentStorage}; /// Provides access to writers for writing new entities into an archetype in a world. /// /// Users must claim all components contained in the archetype and write an equal number /// of components to each as the number of entities pushed to the writer. pub struct ArchetypeWriter<'a> { arch_index: ArchetypeIndex, archetype: &'a mut Archetype, components: MultiMut<'a>, claimed: u128, initial_count: usize, } impl<'a> ArchetypeWriter<'a> { /// Constructs a new archetype writer. pub fn new( arch_index: ArchetypeIndex, archetype: &'a mut Archetype, components: MultiMut<'a>, ) -> Self { let initial_count = archetype.entities().len(); Self { arch_index, archetype, components, claimed: 0, initial_count, } } /// Returns the archetype being written to. pub fn archetype(&self) -> &Archetype { &self.archetype } fn mark_claimed(&mut self, type_id: ComponentTypeId) { let component_index = self .archetype .layout() .component_types() .iter() .position(|t| t == &type_id) .expect("archetype does not contain component"); let mask = 1u128 << component_index; assert!(self.claimed & mask == 0, "component type already claimed"); self.claimed |= mask; } /// Claims a component storage for a given component. /// /// # Panics /// Panics if the storage for the requested component type has already been claimed /// or does not exist in the archetype. pub fn claim_components<T: Component>(&mut self) -> ComponentWriter<'a, T> { let type_id = ComponentTypeId::of::<T>(); self.mark_claimed(type_id); ComponentWriter { components: unsafe { self.components.claim::<T>() }.unwrap(), archetype: self.arch_index, } } /// Claims a component storage for a given component. /// /// # Panics /// Panics if the storage for the requested component type has already been claimed /// or does not exist in the archetype. pub fn claim_components_unknown( &mut self, type_id: ComponentTypeId, ) -> UnknownComponentWriter<'a> { self.mark_claimed(type_id); UnknownComponentWriter { components: unsafe { self.components.claim_unknown(type_id) }.unwrap(), archetype: self.arch_index, } } /// Pushes an entity ID into the archetype. pub fn push(&mut self, entity: Entity) { self.archetype.push(entity); } /// Reserves capacity for at least `additional` extra entity IDs in the archetype. pub fn reserve(&mut self, additional: usize) { self.archetype.reserve(additional) } /// Returns a slice of entities inserted by this writer, and the component index of the first /// inserted entity. pub fn inserted(&self) -> (ComponentIndex, &[Entity]) { let start = self.initial_count; (ComponentIndex(start), &self.archetype.entities()[start..]) } } impl<'a> Drop for ArchetypeWriter<'a> { fn drop(&mut self) { assert_eq!( self.claimed.count_ones() as usize, self.archetype.layout().component_types().len() ); } } /// Provides the ability to append new components to the entities in an archetype. pub struct ComponentWriter<'a, T: Component> { components: &'a mut T::Storage, archetype: ArchetypeIndex, } impl<'a, T: Component> ComponentWriter<'a, T> { /// Writes the given components into the component storage. /// /// # Safety /// `ptr` must point to a valid array of `T` of length at least as long as `len`. /// The data in this array will be memcopied into the world's internal storage. /// If the component type is not `Copy`, then the caller must ensure that the memory /// copied is not accessed until it is re-initialized. It is recommended to immediately /// `std::mem::forget` the source after calling `extend_memcopy`. pub unsafe fn extend_memcopy(&mut self, ptr: *const T, len: usize) { self.components.extend_memcopy(self.archetype, ptr, len) } /// Ensures that the given spare capacity is available in the target storage location. /// Calling this function before calling `extend_memcopy` is not required, but may /// avoid additional vector resizes. pub fn ensure_capacity(&mut self, space: usize) { self.components.ensure_capacity(self.archetype, space); } } /// Provides the ability to append new components to the entities in an archetype. pub struct UnknownComponentWriter<'a> { components: &'a mut dyn UnknownComponentStorage, archetype: ArchetypeIndex, } impl<'a> UnknownComponentWriter<'a> { /// Writes the given components into the component storage. /// /// # Safety /// `ptr` must point to a valid array of the correct component type of length at least as /// long as `len`. /// The data in this array will be memcopied into the world's internal storage. /// If the component type is not `Copy`, then the caller must ensure that the memory /// copied is not accessed until it is re-initialized. It is recommended to immediately /// `std::mem::forget` the source after calling `extend_memcopy`. pub unsafe fn extend_memcopy_raw(&mut self, ptr: *const u8, len: usize) { self.components.extend_memcopy_raw(self.archetype, ptr, len) } /// Ensures that the given spare capacity is available in the target storage location. /// Calling this function before calling `extend_memcopy` is not required, but may /// avoid additional vector resizes. pub fn ensure_capacity(&mut self, space: usize) { self.components.ensure_capacity(self.archetype, space); } /// Moves all of the components from the given storage location into this writer's storage. pub fn move_archetype_from( &mut self, src_archetype: ArchetypeIndex, src: &mut dyn UnknownComponentStorage, ) { src.transfer_archetype(src_archetype, self.archetype, self.components); } /// Moves a single component from the given storage location into this writer's storage. pub fn move_component_from( &mut self, src_archetype: ArchetypeIndex, src_component: ComponentIndex, src: &mut dyn UnknownComponentStorage, ) { src.transfer_component( src_archetype, src_component, self.archetype, self.components, ); } } /// Defines a type which can describe the layout of an archetype. pub trait ArchetypeSource { /// A filter which finds existing archetypes which match the layout. type Filter: LayoutFilter; /// Returns the archetype source's filter. fn filter(&self) -> Self::Filter; /// Constructs a new entity layout. fn layout(&mut self) -> EntityLayout; } /// Describes a type which can write entity components into a world. pub trait ComponentSource: ArchetypeSource { /// Writes components for new entities into an archetype. fn push_components<'a>( &mut self, writer: &mut ArchetypeWriter<'a>, entities: impl Iterator<Item = Entity>, ); } /// A collection with a known length. pub trait KnownLength { fn len(&self) -> usize; } /// Converts a type into a [`ComponentSource`]. pub trait IntoComponentSource { /// The output component source. type Source: ComponentSource; /// Converts this structure into a component source. fn into(self) -> Self::Source; } /// A wrapper for a Structure of Arrays used for efficient entity insertion. pub struct StructOfArray<T> { vecs: T, } /// A single vector component inside an StructOfArray. pub struct StructOfArrayItem<T> { _phantom: PhantomData<T>, ptr: *mut T, len: usize, capacity: usize, } unsafe impl<T> Send for StructOfArrayItem<T> {} unsafe impl<T> Sync for StructOfArrayItem<T> {} impl<T> Drop for StructOfArrayItem<T> { fn drop(&mut self) { unsafe { // reconstruct the original vector, but with length set to the remaining elements let _ = Vec::from_raw_parts(self.ptr, self.len, self.capacity); } } } /// Describes a type which can convert itself into an StructOfArray representation for entity /// insertion. pub trait IntoStructOfArray { /// The output entity source. type Source; /// Converts this into an StructOfArray component source. fn into_soa(self) -> Self::Source; } /// A wrapper for an Array of Structures used for entity insertions. pub struct ArrayOfStruct<T, Iter> { _phantom: PhantomData<T>, iter: Iter, } impl<T, Iter> ArrayOfStruct<T, Iter> where Iter: Iterator<Item = T>, { /// Constructs a new ArrayOfStruct. fn new(iter: Iter) -> Self { Self { iter, _phantom: PhantomData, } } } impl<I> IntoComponentSource for I where I: IntoIterator, ArrayOfStruct<I::Item, I::IntoIter>: ComponentSource, { type Source = ArrayOfStruct<I::Item, I::IntoIter>; fn into(self) -> Self::Source { <Self::Source>::new(self.into_iter()) } } /// A layout filter used to select the appropriate archetype for inserting /// entities from a component source into a world. pub struct ComponentSourceFilter<T>(PhantomData<T>); impl<T> Default for ComponentSourceFilter<T> { fn default() -> Self { ComponentSourceFilter(PhantomData) } } impl LayoutFilter for ComponentSourceFilter<()> { fn matches_layout(&self, components: &[ComponentTypeId]) -> FilterResult { FilterResult::Match(components.is_empty()) } } impl<Iter> IntoComponentSource for ArrayOfStruct<(), Iter> where Iter: Iterator, ArrayOfStruct<(), Iter>: ComponentSource, { type Source = Self; fn into(self) -> Self::Source { self } } impl<Iter> ArchetypeSource for ArrayOfStruct<(), Iter> where Iter: Iterator, { type Filter = ComponentSourceFilter<()>; fn filter(&self) -> Self::Filter { ComponentSourceFilter(PhantomData) } fn layout(&mut self) -> EntityLayout { EntityLayout::new() } } impl<Iter> ComponentSource for ArrayOfStruct<(), Iter> where Iter: Iterator, { fn push_components<'a>( &mut self, writer: &mut ArchetypeWriter<'a>, mut entities: impl Iterator<Item = Entity>, ) { for _ in &mut self.iter { let entity = entities.next().unwrap(); writer.push(entity); } } } impl<Iter> KnownLength for ArrayOfStruct<(), Iter> where Iter: ExactSizeIterator, { fn len(&self) -> usize { self.iter.len() } } macro_rules! impl_component_source { ( $( $ty: ident ),* ) => { impl<$( $ty: Component ),*> LayoutFilter for ComponentSourceFilter<($( $ty, )*)> { fn matches_layout( &self, components: &[ComponentTypeId], ) -> FilterResult { let types = &[$( ComponentTypeId::of::<$ty>() ),*]; FilterResult::Match(components.len() == types.len() && types.iter().all(|t| components.contains(t))) } } paste::item! { impl<$( $ty: Component ),*> StructOfArray<($( StructOfArrayItem<$ty>, )*)> { fn validate_equal_length(vecs: &($( Vec<$ty>, )*)) -> bool { #![allow(non_snake_case)] let len = vecs.0.len(); let ($( [<$ty _vec>], )*) = vecs; $( if [<$ty _vec>].len() != len { return false; } )* true } } impl<$( $ty: Component ),*> IntoStructOfArray for ($( Vec<$ty>, )*) { type Source = StructOfArray<($( StructOfArrayItem<$ty>, )*)>; fn into_soa(self) -> Self::Source { #![allow(non_snake_case)] if !<Self::Source>::validate_equal_length(&self) { panic!("all component vecs must have equal length"); } let ($([<$ty _vec>], )*) = self; StructOfArray { vecs: ($({ let mut [<$ty _vec>] = std::mem::ManuallyDrop::new([<$ty _vec>]); StructOfArrayItem { _phantom: PhantomData, capacity: [<$ty _vec>].capacity(), len: [<$ty _vec>].len(), ptr: [<$ty _vec>].as_mut_ptr(), } }, )*), } } } } // impl<$( $ty: Component ),*> IntoComponentSource for ($( Vec<$ty>, )*) { // type Source = StructOfArray<($( StructOfArrayItem<$ty>, )*)>; // fn into(self) -> Self::Source { StructOfArray::<($( StructOfArrayItem<$ty>, )*)>::new(self) } // } impl<$( $ty ),*> IntoComponentSource for StructOfArray<($( StructOfArrayItem<$ty>, )*)> where StructOfArray<($( StructOfArrayItem<$ty>, )*)>: ComponentSource { type Source = Self; fn into(self) -> Self::Source { self } } impl<$( $ty: Component ),*> ArchetypeSource for StructOfArray<($( StructOfArrayItem<$ty>, )*)> { type Filter = ComponentSourceFilter<($( $ty, )*)>; fn filter(&self) -> Self::Filter { ComponentSourceFilter(PhantomData) } fn layout(&mut self) -> EntityLayout { let mut layout = EntityLayout::new(); $( layout.register_component::<$ty>(); )* layout } } impl<$( $ty: Component ),*> ComponentSource for StructOfArray<($( StructOfArrayItem<$ty>, )*)> { paste::item! { fn push_components<'a>( &mut self, writer: &mut ArchetypeWriter<'a>, mut entities: impl Iterator<Item = Entity>, ) { #![allow(unused_variables)] #![allow(non_snake_case)] let len = self.vecs.0.len; for _ in 0..len { writer.push(entities.next().unwrap()); } let ($( [<$ty _vec>], )*) = &mut self.vecs; $( let mut target = writer.claim_components::<$ty>(); unsafe { target.extend_memcopy([<$ty _vec>].ptr, len); [<$ty _vec>].len = 0 } )* } } } impl<$( $ty: Component ),*> KnownLength for StructOfArray<($( StructOfArrayItem<$ty>, )*)> { fn len(&self) -> usize { self.vecs.0.len } } impl<Iter, $( $ty: Component ),*> IntoComponentSource for ArrayOfStruct<($( $ty, )*), Iter> where Iter: Iterator<Item = ($( $ty, )*)>, ArrayOfStruct<($( $ty, )*), Iter>: ComponentSource { type Source = Self; fn into(self) -> Self::Source { self } } // impl<Iter, $( $ty: Component ),*> LayoutFilter for ArrayOfStruct<($( $ty, )*), Iter> // where // Iter: Iterator<Item = ($( $ty, )*)> // { // fn matches_layout( // &self, // components: &[ComponentTypeId], // ) -> Option<bool> { // let types = &[$( ComponentTypeId::of::<$ty>() ),*]; // Some(components.len() == types.len() // && types.iter().all(|t| components.contains(t))) // } // } impl<Iter, $( $ty: Component ),*> ArchetypeSource for ArrayOfStruct<($( $ty, )*), Iter> where Iter: Iterator<Item = ($( $ty, )*)> { type Filter = ComponentSourceFilter<($( $ty, )*)>; fn filter(&self) -> Self::Filter { ComponentSourceFilter(PhantomData) } fn layout(&mut self) -> EntityLayout { let mut layout = EntityLayout::new(); $( layout.register_component::<$ty>(); )* layout } } impl<Iter, $( $ty: Component ),*> ComponentSource for ArrayOfStruct<($( $ty, )*), Iter> where Iter: Iterator<Item = ($( $ty, )*)> { paste::item! { fn push_components<'a>( &mut self, writer: &mut ArchetypeWriter<'a>, mut entities: impl Iterator<Item = Entity>, ) { #![allow(non_snake_case)] $( let mut [<$ty _target>] = writer.claim_components::<$ty>(); )* let (min_size, _) = self.iter.size_hint(); $( [<$ty _target>].ensure_capacity(min_size); )* let mut count = 0; for ($( $ty, )*) in &mut self.iter { count += 1; $( unsafe { [<$ty _target>].extend_memcopy(&$ty, 1); std::mem::forget($ty); } )* } for _ in 0..count { let entity = entities.next().unwrap(); writer.push(entity); } } } } impl<Iter, $( $ty: Component ),*> KnownLength for ArrayOfStruct<($( $ty, )*), Iter> where Iter: Iterator<Item = ($( $ty, )*)> + ExactSizeIterator { fn len(&self) -> usize { self.iter.len() } } }; } impl_all_tuples!(impl_component_source, 1, 26, A);
33.235602
108
0.550672
3acd4b2af6675d1053a5e747fad5e4074232f933
2,226
// Copyright (c) 2021 The vulkano developers // Licensed under the Apache License, Version 2.0 // <LICENSE-APACHE or // https://www.apache.org/licenses/LICENSE-2.0> or the MIT // license <LICENSE-MIT or https://opensource.org/licenses/MIT>, // at your option. All files in the project carrying such // notice may not be copied, modified, or distributed except // according to those terms. use crate::buffer::BufferAccess; use crate::device::Device; use crate::device::DeviceOwned; use crate::VulkanObject; use std::error; use std::fmt; /// Checks whether an indirect buffer can be bound. pub fn check_indirect_buffer<Inb>( device: &Device, buffer: &Inb, ) -> Result<(), CheckIndirectBufferError> where Inb: BufferAccess + Send + Sync + 'static, { assert_eq!( buffer.inner().buffer.device().internal_object(), device.internal_object() ); if !buffer.inner().buffer.usage().indirect_buffer { return Err(CheckIndirectBufferError::BufferMissingUsage); } Ok(()) } /// Error that can happen when checking whether binding an indirect buffer is valid. #[derive(Debug, Copy, Clone)] pub enum CheckIndirectBufferError { /// The "indirect buffer" usage must be enabled on the indirect buffer. BufferMissingUsage, /// The maximum number of indirect draws has been exceeded. MaxDrawIndirectCountLimitExceeded { /// The limit that must be fulfilled. limit: u32, /// What was requested. requested: u32, }, } impl error::Error for CheckIndirectBufferError {} impl fmt::Display for CheckIndirectBufferError { #[inline] fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!( fmt, "{}", match *self { CheckIndirectBufferError::BufferMissingUsage => { "the indirect buffer usage must be enabled on the indirect buffer" } CheckIndirectBufferError::MaxDrawIndirectCountLimitExceeded { limit, requested, } => { "the maximum number of indirect draws has been exceeded" } } ) } }
30.493151
86
0.630728
d9da77495d2c77e30ecdc87223f8fd1f874dd05f
644
use pyo3_build_config::{find_interpreter, get_config_from_interpreter}; fn main() -> Result<(), Box<dyn std::error::Error>> { let config = get_config_from_interpreter(&find_interpreter()?)?; println!("implementation: {}", config.implementation); println!("interpreter version: {}", config.version); println!("interpreter path: {:?}", config.executable); println!("libdir: {:?}", config.libdir); println!("shared: {}", config.shared); println!("base prefix: {:?}", config.base_prefix); println!("ld_version: {:?}", config.ld_version); println!("pointer width: {:?}", config.calcsize_pointer); Ok(()) }
37.882353
71
0.656832
bbd1791e8c9560598e897498b78e132e8da294ee
2,381
#[doc = "Register `IOMMU_PMU_ACCESS_HIGH4_REG` reader"] pub struct R(crate::R<IOMMU_PMU_ACCESS_HIGH4_REG_SPEC>); impl core::ops::Deref for R { type Target = crate::R<IOMMU_PMU_ACCESS_HIGH4_REG_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<IOMMU_PMU_ACCESS_HIGH4_REG_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<IOMMU_PMU_ACCESS_HIGH4_REG_SPEC>) -> Self { R(reader) } } #[doc = "Register `IOMMU_PMU_ACCESS_HIGH4_REG` writer"] pub struct W(crate::W<IOMMU_PMU_ACCESS_HIGH4_REG_SPEC>); impl core::ops::Deref for W { type Target = crate::W<IOMMU_PMU_ACCESS_HIGH4_REG_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<IOMMU_PMU_ACCESS_HIGH4_REG_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<IOMMU_PMU_ACCESS_HIGH4_REG_SPEC>) -> Self { W(writer) } } impl W { #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "IOMMU PMU Access High 4 Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [iommu_pmu_access_high4_reg](index.html) module"] pub struct IOMMU_PMU_ACCESS_HIGH4_REG_SPEC; impl crate::RegisterSpec for IOMMU_PMU_ACCESS_HIGH4_REG_SPEC { type Ux = u32; } #[doc = "`read()` method returns [iommu_pmu_access_high4_reg::R](R) reader structure"] impl crate::Readable for IOMMU_PMU_ACCESS_HIGH4_REG_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [iommu_pmu_access_high4_reg::W](W) writer structure"] impl crate::Writable for IOMMU_PMU_ACCESS_HIGH4_REG_SPEC { type Writer = W; } #[doc = "`reset()` method sets IOMMU_PMU_ACCESS_HIGH4_REG to value 0"] impl crate::Resettable for IOMMU_PMU_ACCESS_HIGH4_REG_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
36.630769
439
0.677866
1a57c404e96e5772269bbb15dd29a7a03b669f52
63,149
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. //! Defines the join plan for executing partitions in parallel and then joining the results //! into a set of partitions. use ahash::RandomState; use arrow::{ array::{ ArrayData, ArrayRef, BooleanArray, LargeStringArray, PrimitiveArray, UInt32BufferBuilder, UInt32Builder, UInt64BufferBuilder, UInt64Builder, }, compute, datatypes::{UInt32Type, UInt64Type}, }; use smallvec::{smallvec, SmallVec}; use std::sync::Arc; use std::{any::Any, usize}; use std::{time::Instant, vec}; use async_trait::async_trait; use futures::{Stream, StreamExt, TryStreamExt}; use hashbrown::HashMap; use tokio::sync::Mutex; use arrow::array::Array; use arrow::datatypes::DataType; use arrow::datatypes::{Schema, SchemaRef}; use arrow::error::Result as ArrowResult; use arrow::record_batch::RecordBatch; use arrow::array::{ Int16Array, Int32Array, Int64Array, Int8Array, StringArray, UInt16Array, UInt32Array, UInt64Array, UInt8Array, }; use hashbrown::raw::RawTable; use super::expressions::Column; use super::hash_utils::create_hashes; use super::{ coalesce_partitions::CoalescePartitionsExec, hash_utils::{build_join_schema, check_join_is_valid, JoinOn}, }; use crate::error::{DataFusionError, Result}; use crate::logical_plan::JoinType; use super::{ DisplayFormatType, ExecutionPlan, Partitioning, RecordBatchStream, SendableRecordBatchStream, }; use crate::physical_plan::coalesce_batches::concat_batches; use crate::physical_plan::{PhysicalExpr, SQLMetric}; use log::debug; use std::fmt; // Maps a `u64` hash value based on the left ["on" values] to a list of indices with this key's value. // // Note that the `u64` keys are not stored in the hashmap (hence the `()` as key), but are only used // to put the indices in a certain bucket. // By allocating a `HashMap` with capacity for *at least* the number of rows for entries at the left side, // we make sure that we don't have to re-hash the hashmap, which needs access to the key (the hash in this case) value. // E.g. 1 -> [3, 6, 8] indicates that the column values map to rows 3, 6 and 8 for hash value 1 // As the key is a hash value, we need to check possible hash collisions in the probe stage // During this stage it might be the case that a row is contained the same hashmap value, // but the values don't match. Those are checked in the [equal_rows] macro // TODO: speed up collission check and move away from using a hashbrown HashMap // https://github.com/apache/arrow-datafusion/issues/50 struct JoinHashMap(RawTable<(u64, SmallVec<[u64; 1]>)>); impl fmt::Debug for JoinHashMap { fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { Ok(()) } } type JoinLeftData = Arc<(JoinHashMap, RecordBatch)>; /// join execution plan executes partitions in parallel and combines them into a set of /// partitions. #[derive(Debug)] pub struct HashJoinExec { /// left (build) side which gets hashed left: Arc<dyn ExecutionPlan>, /// right (probe) side which are filtered by the hash table right: Arc<dyn ExecutionPlan>, /// Set of common columns used to join on on: Vec<(Column, Column)>, /// How the join is performed join_type: JoinType, /// The schema once the join is applied schema: SchemaRef, /// Build-side build_side: Arc<Mutex<Option<JoinLeftData>>>, /// Shares the `RandomState` for the hashing algorithm random_state: RandomState, /// Partitioning mode to use mode: PartitionMode, /// Metrics metrics: Arc<HashJoinMetrics>, } /// Metrics for HashJoinExec #[derive(Debug)] struct HashJoinMetrics { /// Total time for joining probe-side batches to the build-side batches join_time: Arc<SQLMetric>, /// Number of batches consumed by this operator input_batches: Arc<SQLMetric>, /// Number of rows consumed by this operator input_rows: Arc<SQLMetric>, /// Number of batches produced by this operator output_batches: Arc<SQLMetric>, /// Number of rows produced by this operator output_rows: Arc<SQLMetric>, } impl HashJoinMetrics { fn new() -> Self { Self { join_time: SQLMetric::time_nanos(), input_batches: SQLMetric::counter(), input_rows: SQLMetric::counter(), output_batches: SQLMetric::counter(), output_rows: SQLMetric::counter(), } } } #[derive(Clone, Copy, Debug, PartialEq)] /// Partitioning mode to use for hash join pub enum PartitionMode { /// Left/right children are partitioned using the left and right keys Partitioned, /// Left side will collected into one partition CollectLeft, } /// Information about the index and placement (left or right) of the columns struct ColumnIndex { /// Index of the column index: usize, /// Whether the column is at the left or right side is_left: bool, } impl HashJoinExec { /// Tries to create a new [HashJoinExec]. /// # Error /// This function errors when it is not possible to join the left and right sides on keys `on`. pub fn try_new( left: Arc<dyn ExecutionPlan>, right: Arc<dyn ExecutionPlan>, on: JoinOn, join_type: &JoinType, partition_mode: PartitionMode, ) -> Result<Self> { let left_schema = left.schema(); let right_schema = right.schema(); check_join_is_valid(&left_schema, &right_schema, &on)?; let schema = Arc::new(build_join_schema(&left_schema, &right_schema, join_type)); let random_state = RandomState::with_seeds(0, 0, 0, 0); Ok(HashJoinExec { left, right, on, join_type: *join_type, schema, build_side: Arc::new(Mutex::new(None)), random_state, mode: partition_mode, metrics: Arc::new(HashJoinMetrics::new()), }) } /// left (build) side which gets hashed pub fn left(&self) -> &Arc<dyn ExecutionPlan> { &self.left } /// right (probe) side which are filtered by the hash table pub fn right(&self) -> &Arc<dyn ExecutionPlan> { &self.right } /// Set of common columns used to join on pub fn on(&self) -> &[(Column, Column)] { &self.on } /// How the join is performed pub fn join_type(&self) -> &JoinType { &self.join_type } /// The partitioning mode of this hash join pub fn partition_mode(&self) -> &PartitionMode { &self.mode } /// Calculates column indices and left/right placement on input / output schemas and jointype fn column_indices_from_schema(&self) -> ArrowResult<Vec<ColumnIndex>> { let (primary_is_left, primary_schema, secondary_schema) = match self.join_type { JoinType::Inner | JoinType::Left | JoinType::Full | JoinType::Semi | JoinType::Anti => (true, self.left.schema(), self.right.schema()), JoinType::Right => (false, self.right.schema(), self.left.schema()), }; let mut column_indices = Vec::with_capacity(self.schema.fields().len()); for field in self.schema.fields() { let (is_primary, index) = match primary_schema.index_of(field.name()) { Ok(i) => Ok((true, i)), Err(_) => { match secondary_schema.index_of(field.name()) { Ok(i) => Ok((false, i)), _ => Err(DataFusionError::Internal( format!("During execution, the column {} was not found in neither the left or right side of the join", field.name()).to_string() )) } } }.map_err(DataFusionError::into_arrow_external_error)?; let is_left = is_primary && primary_is_left || !is_primary && !primary_is_left; column_indices.push(ColumnIndex { index, is_left }); } Ok(column_indices) } } #[async_trait] impl ExecutionPlan for HashJoinExec { fn as_any(&self) -> &dyn Any { self } fn schema(&self) -> SchemaRef { self.schema.clone() } fn children(&self) -> Vec<Arc<dyn ExecutionPlan>> { vec![self.left.clone(), self.right.clone()] } fn with_new_children( &self, children: Vec<Arc<dyn ExecutionPlan>>, ) -> Result<Arc<dyn ExecutionPlan>> { match children.len() { 2 => Ok(Arc::new(HashJoinExec::try_new( children[0].clone(), children[1].clone(), self.on.clone(), &self.join_type, self.mode, )?)), _ => Err(DataFusionError::Internal( "HashJoinExec wrong number of children".to_string(), )), } } fn output_partitioning(&self) -> Partitioning { self.right.output_partitioning() } async fn execute(&self, partition: usize) -> Result<SendableRecordBatchStream> { let on_left = self.on.iter().map(|on| on.0.clone()).collect::<Vec<_>>(); // we only want to compute the build side once for PartitionMode::CollectLeft let left_data = { match self.mode { PartitionMode::CollectLeft => { let mut build_side = self.build_side.lock().await; match build_side.as_ref() { Some(stream) => stream.clone(), None => { let start = Instant::now(); // merge all left parts into a single stream let merge = CoalescePartitionsExec::new(self.left.clone()); let stream = merge.execute(0).await?; // This operation performs 2 steps at once: // 1. creates a [JoinHashMap] of all batches from the stream // 2. stores the batches in a vector. let initial = (0, Vec::new()); let (num_rows, batches) = stream .try_fold(initial, |mut acc, batch| async { acc.0 += batch.num_rows(); acc.1.push(batch); Ok(acc) }) .await?; let mut hashmap = JoinHashMap(RawTable::with_capacity(num_rows)); let mut hashes_buffer = Vec::new(); let mut offset = 0; for batch in batches.iter() { hashes_buffer.clear(); hashes_buffer.resize(batch.num_rows(), 0); update_hash( &on_left, batch, &mut hashmap, offset, &self.random_state, &mut hashes_buffer, )?; offset += batch.num_rows(); } // Merge all batches into a single batch, so we // can directly index into the arrays let single_batch = concat_batches(&self.left.schema(), &batches, num_rows)?; let left_side = Arc::new((hashmap, single_batch)); *build_side = Some(left_side.clone()); debug!( "Built build-side of hash join containing {} rows in {} ms", num_rows, start.elapsed().as_millis() ); left_side } } } PartitionMode::Partitioned => { let start = Instant::now(); // Load 1 partition of left side in memory let stream = self.left.execute(partition).await?; // This operation performs 2 steps at once: // 1. creates a [JoinHashMap] of all batches from the stream // 2. stores the batches in a vector. let initial = (0, Vec::new()); let (num_rows, batches) = stream .try_fold(initial, |mut acc, batch| async { acc.0 += batch.num_rows(); acc.1.push(batch); Ok(acc) }) .await?; let mut hashmap = JoinHashMap(RawTable::with_capacity(num_rows)); let mut hashes_buffer = Vec::new(); let mut offset = 0; for batch in batches.iter() { hashes_buffer.clear(); hashes_buffer.resize(batch.num_rows(), 0); update_hash( &on_left, batch, &mut hashmap, offset, &self.random_state, &mut hashes_buffer, )?; offset += batch.num_rows(); } // Merge all batches into a single batch, so we // can directly index into the arrays let single_batch = concat_batches(&self.left.schema(), &batches, num_rows)?; let left_side = Arc::new((hashmap, single_batch)); debug!( "Built build-side {} of hash join containing {} rows in {} ms", partition, num_rows, start.elapsed().as_millis() ); left_side } } }; // we have the batches and the hash map with their keys. We can how create a stream // over the right that uses this information to issue new batches. let right_stream = self.right.execute(partition).await?; let on_right = self.on.iter().map(|on| on.1.clone()).collect::<Vec<_>>(); let column_indices = self.column_indices_from_schema()?; let num_rows = left_data.1.num_rows(); let visited_left_side = match self.join_type { JoinType::Left | JoinType::Full | JoinType::Semi | JoinType::Anti => { vec![false; num_rows] } JoinType::Inner | JoinType::Right => vec![], }; Ok(Box::pin(HashJoinStream::new( self.schema.clone(), on_left, on_right, self.join_type, left_data, right_stream, column_indices, self.random_state.clone(), visited_left_side, self.metrics.clone(), ))) } fn fmt_as( &self, t: DisplayFormatType, f: &mut std::fmt::Formatter, ) -> std::fmt::Result { match t { DisplayFormatType::Default => { write!( f, "HashJoinExec: mode={:?}, join_type={:?}, on={:?}", self.mode, self.join_type, self.on ) } } } fn metrics(&self) -> HashMap<String, SQLMetric> { let mut metrics = HashMap::new(); metrics.insert("joinTime".to_owned(), (*self.metrics.join_time).clone()); metrics.insert( "inputBatches".to_owned(), (*self.metrics.input_batches).clone(), ); metrics.insert("inputRows".to_owned(), (*self.metrics.input_rows).clone()); metrics.insert( "outputBatches".to_owned(), (*self.metrics.output_batches).clone(), ); metrics.insert("outputRows".to_owned(), (*self.metrics.output_rows).clone()); metrics } } /// Updates `hash` with new entries from [RecordBatch] evaluated against the expressions `on`, /// assuming that the [RecordBatch] corresponds to the `index`th fn update_hash( on: &[Column], batch: &RecordBatch, hash_map: &mut JoinHashMap, offset: usize, random_state: &RandomState, hashes_buffer: &mut Vec<u64>, ) -> Result<()> { // evaluate the keys let keys_values = on .iter() .map(|c| Ok(c.evaluate(batch)?.into_array(batch.num_rows()))) .collect::<Result<Vec<_>>>()?; // calculate the hash values let hash_values = create_hashes(&keys_values, random_state, hashes_buffer)?; // insert hashes to key of the hashmap for (row, hash_value) in hash_values.iter().enumerate() { let item = hash_map .0 .get_mut(*hash_value, |(hash, _)| *hash_value == *hash); if let Some((_, indices)) = item { indices.push((row + offset) as u64); } else { hash_map.0.insert( *hash_value, (*hash_value, smallvec![(row + offset) as u64]), |(hash, _)| *hash, ); } } Ok(()) } /// A stream that issues [RecordBatch]es as they arrive from the right of the join. struct HashJoinStream { /// Input schema schema: Arc<Schema>, /// columns from the left on_left: Vec<Column>, /// columns from the right used to compute the hash on_right: Vec<Column>, /// type of the join join_type: JoinType, /// information from the left left_data: JoinLeftData, /// right right: SendableRecordBatchStream, /// Information of index and left / right placement of columns column_indices: Vec<ColumnIndex>, /// Random state used for hashing initialization random_state: RandomState, /// Keeps track of the left side rows whether they are visited visited_left_side: Vec<bool>, // TODO: use a more memory efficient data structure, https://github.com/apache/arrow-datafusion/issues/240 /// There is nothing to process anymore and left side is processed in case of left join is_exhausted: bool, /// Metrics metrics: Arc<HashJoinMetrics>, } #[allow(clippy::too_many_arguments)] impl HashJoinStream { fn new( schema: Arc<Schema>, on_left: Vec<Column>, on_right: Vec<Column>, join_type: JoinType, left_data: JoinLeftData, right: SendableRecordBatchStream, column_indices: Vec<ColumnIndex>, random_state: RandomState, visited_left_side: Vec<bool>, metrics: Arc<HashJoinMetrics>, ) -> Self { HashJoinStream { schema, on_left, on_right, join_type, left_data, right, column_indices, random_state, visited_left_side, is_exhausted: false, metrics, } } } impl RecordBatchStream for HashJoinStream { fn schema(&self) -> SchemaRef { self.schema.clone() } } /// Returns a new [RecordBatch] by combining the `left` and `right` according to `indices`. /// The resulting batch has [Schema] `schema`. /// # Error /// This function errors when: /// * fn build_batch_from_indices( schema: &Schema, left: &RecordBatch, right: &RecordBatch, left_indices: UInt64Array, right_indices: UInt32Array, column_indices: &[ColumnIndex], ) -> ArrowResult<(RecordBatch, UInt64Array)> { // build the columns of the new [RecordBatch]: // 1. pick whether the column is from the left or right // 2. based on the pick, `take` items from the different RecordBatches let mut columns: Vec<Arc<dyn Array>> = Vec::with_capacity(schema.fields().len()); for column_index in column_indices { let array = if column_index.is_left { let array = left.column(column_index.index); compute::take(array.as_ref(), &left_indices, None)? } else { let array = right.column(column_index.index); compute::take(array.as_ref(), &right_indices, None)? }; columns.push(array); } RecordBatch::try_new(Arc::new(schema.clone()), columns).map(|x| (x, left_indices)) } #[allow(clippy::too_many_arguments)] fn build_batch( batch: &RecordBatch, left_data: &JoinLeftData, on_left: &[Column], on_right: &[Column], join_type: JoinType, schema: &Schema, column_indices: &[ColumnIndex], random_state: &RandomState, ) -> ArrowResult<(RecordBatch, UInt64Array)> { let (left_indices, right_indices) = build_join_indexes(left_data, batch, join_type, on_left, on_right, random_state) .unwrap(); if matches!(join_type, JoinType::Semi | JoinType::Anti) { return Ok(( RecordBatch::new_empty(Arc::new(schema.clone())), left_indices, )); } build_batch_from_indices( schema, &left_data.1, batch, left_indices, right_indices, column_indices, ) } /// returns a vector with (index from left, index from right). /// The size of this vector corresponds to the total size of a joined batch // For a join on column A: // left right // batch 1 // A B A D // --------------- // 1 a 3 6 // 2 b 1 2 // 3 c 2 4 // batch 2 // A B A D // --------------- // 1 a 5 10 // 2 b 2 2 // 4 d 1 1 // indices (batch, batch_row) // left right // (0, 2) (0, 0) // (0, 0) (0, 1) // (0, 1) (0, 2) // (1, 0) (0, 1) // (1, 1) (0, 2) // (0, 1) (1, 1) // (0, 0) (1, 2) // (1, 1) (1, 1) // (1, 0) (1, 2) fn build_join_indexes( left_data: &JoinLeftData, right: &RecordBatch, join_type: JoinType, left_on: &[Column], right_on: &[Column], random_state: &RandomState, ) -> Result<(UInt64Array, UInt32Array)> { let keys_values = right_on .iter() .map(|c| Ok(c.evaluate(right)?.into_array(right.num_rows()))) .collect::<Result<Vec<_>>>()?; let left_join_values = left_on .iter() .map(|c| Ok(c.evaluate(&left_data.1)?.into_array(left_data.1.num_rows()))) .collect::<Result<Vec<_>>>()?; let hashes_buffer = &mut vec![0; keys_values[0].len()]; let hash_values = create_hashes(&keys_values, random_state, hashes_buffer)?; let left = &left_data.0; match join_type { JoinType::Inner | JoinType::Semi | JoinType::Anti => { // Using a buffer builder to avoid slower normal builder let mut left_indices = UInt64BufferBuilder::new(0); let mut right_indices = UInt32BufferBuilder::new(0); // Visit all of the right rows for (row, hash_value) in hash_values.iter().enumerate() { // Get the hash and find it in the build index // For every item on the left and right we check if it matches // This possibly contains rows with hash collisions, // So we have to check here whether rows are equal or not if let Some((_, indices)) = left.0.get(*hash_value, |(hash, _)| *hash_value == *hash) { for &i in indices { // Check hash collisions if equal_rows(i as usize, row, &left_join_values, &keys_values)? { left_indices.append(i); right_indices.append(row as u32); } } } } let left = ArrayData::builder(DataType::UInt64) .len(left_indices.len()) .add_buffer(left_indices.finish()) .build(); let right = ArrayData::builder(DataType::UInt32) .len(right_indices.len()) .add_buffer(right_indices.finish()) .build(); Ok(( PrimitiveArray::<UInt64Type>::from(left), PrimitiveArray::<UInt32Type>::from(right), )) } JoinType::Left => { let mut left_indices = UInt64Builder::new(0); let mut right_indices = UInt32Builder::new(0); // First visit all of the rows for (row, hash_value) in hash_values.iter().enumerate() { if let Some((_, indices)) = left.0.get(*hash_value, |(hash, _)| *hash_value == *hash) { for &i in indices { // Collision check if equal_rows(i as usize, row, &left_join_values, &keys_values)? { left_indices.append_value(i)?; right_indices.append_value(row as u32)?; } } }; } Ok((left_indices.finish(), right_indices.finish())) } JoinType::Right | JoinType::Full => { let mut left_indices = UInt64Builder::new(0); let mut right_indices = UInt32Builder::new(0); for (row, hash_value) in hash_values.iter().enumerate() { match left.0.get(*hash_value, |(hash, _)| *hash_value == *hash) { Some((_, indices)) => { for &i in indices { if equal_rows( i as usize, row, &left_join_values, &keys_values, )? { left_indices.append_value(i)?; } else { left_indices.append_null()?; } right_indices.append_value(row as u32)?; } } None => { // when no match, add the row with None for the left side left_indices.append_null()?; right_indices.append_value(row as u32)?; } } } Ok((left_indices.finish(), right_indices.finish())) } } } macro_rules! equal_rows_elem { ($array_type:ident, $l: ident, $r: ident, $left: ident, $right: ident) => {{ let left_array = $l.as_any().downcast_ref::<$array_type>().unwrap(); let right_array = $r.as_any().downcast_ref::<$array_type>().unwrap(); match (left_array.is_null($left), left_array.is_null($right)) { (false, false) => left_array.value($left) == right_array.value($right), _ => false, } }}; } /// Left and right row have equal values fn equal_rows( left: usize, right: usize, left_arrays: &[ArrayRef], right_arrays: &[ArrayRef], ) -> Result<bool> { let mut err = None; let res = left_arrays .iter() .zip(right_arrays) .all(|(l, r)| match l.data_type() { DataType::Null => true, DataType::Boolean => equal_rows_elem!(BooleanArray, l, r, left, right), DataType::Int8 => equal_rows_elem!(Int8Array, l, r, left, right), DataType::Int16 => equal_rows_elem!(Int16Array, l, r, left, right), DataType::Int32 => equal_rows_elem!(Int32Array, l, r, left, right), DataType::Int64 => equal_rows_elem!(Int64Array, l, r, left, right), DataType::UInt8 => equal_rows_elem!(UInt8Array, l, r, left, right), DataType::UInt16 => equal_rows_elem!(UInt16Array, l, r, left, right), DataType::UInt32 => equal_rows_elem!(UInt32Array, l, r, left, right), DataType::UInt64 => equal_rows_elem!(UInt64Array, l, r, left, right), DataType::Timestamp(_, None) => { equal_rows_elem!(Int64Array, l, r, left, right) } DataType::Utf8 => equal_rows_elem!(StringArray, l, r, left, right), DataType::LargeUtf8 => equal_rows_elem!(LargeStringArray, l, r, left, right), _ => { // This is internal because we should have caught this before. err = Some(Err(DataFusionError::Internal( "Unsupported data type in hasher".to_string(), ))); false } }); err.unwrap_or(Ok(res)) } // Produces a batch for left-side rows that have/have not been matched during the whole join fn produce_from_matched( visited_left_side: &[bool], schema: &SchemaRef, column_indices: &[ColumnIndex], left_data: &JoinLeftData, unmatched: bool, ) -> ArrowResult<RecordBatch> { // Find indices which didn't match any right row (are false) let indices = if unmatched { UInt64Array::from_iter_values( visited_left_side .iter() .enumerate() .filter(|&(_, &value)| !value) .map(|(index, _)| index as u64), ) } else { // produce those that did match UInt64Array::from_iter_values( visited_left_side .iter() .enumerate() .filter(|&(_, &value)| value) .map(|(index, _)| index as u64), ) }; // generate batches by taking values from the left side and generating columns filled with null on the right side let num_rows = indices.len(); let mut columns: Vec<Arc<dyn Array>> = Vec::with_capacity(schema.fields().len()); for (idx, column_index) in column_indices.iter().enumerate() { let array = if column_index.is_left { let array = left_data.1.column(column_index.index); compute::take(array.as_ref(), &indices, None).unwrap() } else { let datatype = schema.field(idx).data_type(); arrow::array::new_null_array(datatype, num_rows) }; columns.push(array); } RecordBatch::try_new(schema.clone(), columns) } impl Stream for HashJoinStream { type Item = ArrowResult<RecordBatch>; fn poll_next( mut self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll<Option<Self::Item>> { self.right .poll_next_unpin(cx) .map(|maybe_batch| match maybe_batch { Some(Ok(batch)) => { let start = Instant::now(); let result = build_batch( &batch, &self.left_data, &self.on_left, &self.on_right, self.join_type, &self.schema, &self.column_indices, &self.random_state, ); self.metrics.input_batches.add(1); self.metrics.input_rows.add(batch.num_rows()); if let Ok((ref batch, ref left_side)) = result { self.metrics .join_time .add(start.elapsed().as_millis() as usize); self.metrics.output_batches.add(1); self.metrics.output_rows.add(batch.num_rows()); match self.join_type { JoinType::Left | JoinType::Full | JoinType::Semi | JoinType::Anti => { left_side.iter().flatten().for_each(|x| { self.visited_left_side[x as usize] = true; }); } JoinType::Inner | JoinType::Right => {} } } Some(result.map(|x| x.0)) } other => { let start = Instant::now(); // For the left join, produce rows for unmatched rows match self.join_type { JoinType::Left | JoinType::Full | JoinType::Semi | JoinType::Anti if !self.is_exhausted => { let result = produce_from_matched( &self.visited_left_side, &self.schema, &self.column_indices, &self.left_data, self.join_type != JoinType::Semi, ); if let Ok(ref batch) = result { self.metrics.input_batches.add(1); self.metrics.input_rows.add(batch.num_rows()); if let Ok(ref batch) = result { self.metrics .join_time .add(start.elapsed().as_millis() as usize); self.metrics.output_batches.add(1); self.metrics.output_rows.add(batch.num_rows()); } } self.is_exhausted = true; return Some(result); } JoinType::Left | JoinType::Full | JoinType::Semi | JoinType::Anti | JoinType::Inner | JoinType::Right => {} } other } }) } } #[cfg(test)] mod tests { use crate::{ assert_batches_sorted_eq, physical_plan::{ common, expressions::Column, memory::MemoryExec, repartition::RepartitionExec, }, test::{build_table_i32, columns}, }; use super::*; use std::sync::Arc; fn build_table( a: (&str, &Vec<i32>), b: (&str, &Vec<i32>), c: (&str, &Vec<i32>), ) -> Arc<dyn ExecutionPlan> { let batch = build_table_i32(a, b, c); let schema = batch.schema(); Arc::new(MemoryExec::try_new(&[vec![batch]], schema, None).unwrap()) } fn join( left: Arc<dyn ExecutionPlan>, right: Arc<dyn ExecutionPlan>, on: JoinOn, join_type: &JoinType, ) -> Result<HashJoinExec> { HashJoinExec::try_new(left, right, on, join_type, PartitionMode::CollectLeft) } async fn join_collect( left: Arc<dyn ExecutionPlan>, right: Arc<dyn ExecutionPlan>, on: JoinOn, join_type: &JoinType, ) -> Result<(Vec<String>, Vec<RecordBatch>)> { let join = join(left, right, on, join_type)?; let columns = columns(&join.schema()); let stream = join.execute(0).await?; let batches = common::collect(stream).await?; Ok((columns, batches)) } async fn partitioned_join_collect( left: Arc<dyn ExecutionPlan>, right: Arc<dyn ExecutionPlan>, on: JoinOn, join_type: &JoinType, ) -> Result<(Vec<String>, Vec<RecordBatch>)> { let partition_count = 4; let (left_expr, right_expr) = on .iter() .map(|(l, r)| { ( Arc::new(l.clone()) as Arc<dyn PhysicalExpr>, Arc::new(r.clone()) as Arc<dyn PhysicalExpr>, ) }) .unzip(); let join = HashJoinExec::try_new( Arc::new(RepartitionExec::try_new( left, Partitioning::Hash(left_expr, partition_count), )?), Arc::new(RepartitionExec::try_new( right, Partitioning::Hash(right_expr, partition_count), )?), on, join_type, PartitionMode::Partitioned, )?; let columns = columns(&join.schema()); let mut batches = vec![]; for i in 0..partition_count { let stream = join.execute(i).await?; let more_batches = common::collect(stream).await?; batches.extend( more_batches .into_iter() .filter(|b| b.num_rows() > 0) .collect::<Vec<_>>(), ); } Ok((columns, batches)) } #[tokio::test] async fn join_inner_one() -> Result<()> { let left = build_table( ("a1", &vec![1, 2, 3]), ("b1", &vec![4, 5, 5]), // this has a repetition ("c1", &vec![7, 8, 9]), ); let right = build_table( ("a2", &vec![10, 20, 30]), ("b1", &vec![4, 5, 6]), ("c2", &vec![70, 80, 90]), ); let on = vec![( Column::new_with_schema("b1", &left.schema())?, Column::new_with_schema("b1", &right.schema())?, )]; let (columns, batches) = join_collect(left.clone(), right.clone(), on.clone(), &JoinType::Inner) .await?; assert_eq!(columns, vec!["a1", "b1", "c1", "a2", "b1", "c2"]); let expected = vec![ "+----+----+----+----+----+----+", "| a1 | b1 | c1 | a2 | b1 | c2 |", "+----+----+----+----+----+----+", "| 1 | 4 | 7 | 10 | 4 | 70 |", "| 2 | 5 | 8 | 20 | 5 | 80 |", "| 3 | 5 | 9 | 20 | 5 | 80 |", "+----+----+----+----+----+----+", ]; assert_batches_sorted_eq!(expected, &batches); Ok(()) } #[tokio::test] async fn partitioned_join_inner_one() -> Result<()> { let left = build_table( ("a1", &vec![1, 2, 3]), ("b1", &vec![4, 5, 5]), // this has a repetition ("c1", &vec![7, 8, 9]), ); let right = build_table( ("a2", &vec![10, 20, 30]), ("b1", &vec![4, 5, 6]), ("c2", &vec![70, 80, 90]), ); let on = vec![( Column::new_with_schema("b1", &left.schema())?, Column::new_with_schema("b1", &right.schema())?, )]; let (columns, batches) = partitioned_join_collect( left.clone(), right.clone(), on.clone(), &JoinType::Inner, ) .await?; assert_eq!(columns, vec!["a1", "b1", "c1", "a2", "b1", "c2"]); let expected = vec![ "+----+----+----+----+----+----+", "| a1 | b1 | c1 | a2 | b1 | c2 |", "+----+----+----+----+----+----+", "| 1 | 4 | 7 | 10 | 4 | 70 |", "| 2 | 5 | 8 | 20 | 5 | 80 |", "| 3 | 5 | 9 | 20 | 5 | 80 |", "+----+----+----+----+----+----+", ]; assert_batches_sorted_eq!(expected, &batches); Ok(()) } #[tokio::test] async fn join_inner_one_no_shared_column_names() -> Result<()> { let left = build_table( ("a1", &vec![1, 2, 3]), ("b1", &vec![4, 5, 5]), // this has a repetition ("c1", &vec![7, 8, 9]), ); let right = build_table( ("a2", &vec![10, 20, 30]), ("b2", &vec![4, 5, 6]), ("c2", &vec![70, 80, 90]), ); let on = vec![( Column::new_with_schema("b1", &left.schema())?, Column::new_with_schema("b2", &right.schema())?, )]; let (columns, batches) = join_collect(left, right, on, &JoinType::Inner).await?; assert_eq!(columns, vec!["a1", "b1", "c1", "a2", "b2", "c2"]); let expected = vec![ "+----+----+----+----+----+----+", "| a1 | b1 | c1 | a2 | b2 | c2 |", "+----+----+----+----+----+----+", "| 1 | 4 | 7 | 10 | 4 | 70 |", "| 2 | 5 | 8 | 20 | 5 | 80 |", "| 3 | 5 | 9 | 20 | 5 | 80 |", "+----+----+----+----+----+----+", ]; assert_batches_sorted_eq!(expected, &batches); Ok(()) } #[tokio::test] async fn join_inner_two() -> Result<()> { let left = build_table( ("a1", &vec![1, 2, 2]), ("b2", &vec![1, 2, 2]), ("c1", &vec![7, 8, 9]), ); let right = build_table( ("a1", &vec![1, 2, 3]), ("b2", &vec![1, 2, 2]), ("c2", &vec![70, 80, 90]), ); let on = vec![ ( Column::new_with_schema("a1", &left.schema())?, Column::new_with_schema("a1", &right.schema())?, ), ( Column::new_with_schema("b2", &left.schema())?, Column::new_with_schema("b2", &right.schema())?, ), ]; let (columns, batches) = join_collect(left, right, on, &JoinType::Inner).await?; assert_eq!(columns, vec!["a1", "b2", "c1", "a1", "b2", "c2"]); assert_eq!(batches.len(), 1); let expected = vec![ "+----+----+----+----+----+----+", "| a1 | b2 | c1 | a1 | b2 | c2 |", "+----+----+----+----+----+----+", "| 1 | 1 | 7 | 1 | 1 | 70 |", "| 2 | 2 | 8 | 2 | 2 | 80 |", "| 2 | 2 | 9 | 2 | 2 | 80 |", "+----+----+----+----+----+----+", ]; assert_batches_sorted_eq!(expected, &batches); Ok(()) } /// Test where the left has 2 parts, the right with 1 part => 1 part #[tokio::test] async fn join_inner_one_two_parts_left() -> Result<()> { let batch1 = build_table_i32( ("a1", &vec![1, 2]), ("b2", &vec![1, 2]), ("c1", &vec![7, 8]), ); let batch2 = build_table_i32(("a1", &vec![2]), ("b2", &vec![2]), ("c1", &vec![9])); let schema = batch1.schema(); let left = Arc::new( MemoryExec::try_new(&[vec![batch1], vec![batch2]], schema, None).unwrap(), ); let right = build_table( ("a1", &vec![1, 2, 3]), ("b2", &vec![1, 2, 2]), ("c2", &vec![70, 80, 90]), ); let on = vec![ ( Column::new_with_schema("a1", &left.schema())?, Column::new_with_schema("a1", &right.schema())?, ), ( Column::new_with_schema("b2", &left.schema())?, Column::new_with_schema("b2", &right.schema())?, ), ]; let (columns, batches) = join_collect(left, right, on, &JoinType::Inner).await?; assert_eq!(columns, vec!["a1", "b2", "c1", "a1", "b2", "c2"]); assert_eq!(batches.len(), 1); let expected = vec![ "+----+----+----+----+----+----+", "| a1 | b2 | c1 | a1 | b2 | c2 |", "+----+----+----+----+----+----+", "| 1 | 1 | 7 | 1 | 1 | 70 |", "| 2 | 2 | 8 | 2 | 2 | 80 |", "| 2 | 2 | 9 | 2 | 2 | 80 |", "+----+----+----+----+----+----+", ]; assert_batches_sorted_eq!(expected, &batches); Ok(()) } /// Test where the left has 1 part, the right has 2 parts => 2 parts #[tokio::test] async fn join_inner_one_two_parts_right() -> Result<()> { let left = build_table( ("a1", &vec![1, 2, 3]), ("b1", &vec![4, 5, 5]), // this has a repetition ("c1", &vec![7, 8, 9]), ); let batch1 = build_table_i32( ("a2", &vec![10, 20]), ("b1", &vec![4, 6]), ("c2", &vec![70, 80]), ); let batch2 = build_table_i32(("a2", &vec![30]), ("b1", &vec![5]), ("c2", &vec![90])); let schema = batch1.schema(); let right = Arc::new( MemoryExec::try_new(&[vec![batch1], vec![batch2]], schema, None).unwrap(), ); let on = vec![( Column::new_with_schema("b1", &left.schema())?, Column::new_with_schema("b1", &right.schema())?, )]; let join = join(left, right, on, &JoinType::Inner)?; let columns = columns(&join.schema()); assert_eq!(columns, vec!["a1", "b1", "c1", "a2", "b1", "c2"]); // first part let stream = join.execute(0).await?; let batches = common::collect(stream).await?; assert_eq!(batches.len(), 1); let expected = vec![ "+----+----+----+----+----+----+", "| a1 | b1 | c1 | a2 | b1 | c2 |", "+----+----+----+----+----+----+", "| 1 | 4 | 7 | 10 | 4 | 70 |", "+----+----+----+----+----+----+", ]; assert_batches_sorted_eq!(expected, &batches); // second part let stream = join.execute(1).await?; let batches = common::collect(stream).await?; assert_eq!(batches.len(), 1); let expected = vec![ "+----+----+----+----+----+----+", "| a1 | b1 | c1 | a2 | b1 | c2 |", "+----+----+----+----+----+----+", "| 2 | 5 | 8 | 30 | 5 | 90 |", "| 3 | 5 | 9 | 30 | 5 | 90 |", "+----+----+----+----+----+----+", ]; assert_batches_sorted_eq!(expected, &batches); Ok(()) } fn build_table_two_batches( a: (&str, &Vec<i32>), b: (&str, &Vec<i32>), c: (&str, &Vec<i32>), ) -> Arc<dyn ExecutionPlan> { let batch = build_table_i32(a, b, c); let schema = batch.schema(); Arc::new( MemoryExec::try_new(&[vec![batch.clone(), batch]], schema, None).unwrap(), ) } #[tokio::test] async fn join_left_multi_batch() { let left = build_table( ("a1", &vec![1, 2, 3]), ("b1", &vec![4, 5, 7]), // 7 does not exist on the right ("c1", &vec![7, 8, 9]), ); let right = build_table_two_batches( ("a2", &vec![10, 20, 30]), ("b1", &vec![4, 5, 6]), ("c2", &vec![70, 80, 90]), ); let on = vec![( Column::new_with_schema("b1", &left.schema()).unwrap(), Column::new_with_schema("b1", &right.schema()).unwrap(), )]; let join = join(left, right, on, &JoinType::Left).unwrap(); let columns = columns(&join.schema()); assert_eq!(columns, vec!["a1", "b1", "c1", "a2", "b1", "c2"]); let stream = join.execute(0).await.unwrap(); let batches = common::collect(stream).await.unwrap(); let expected = vec![ "+----+----+----+----+----+----+", "| a1 | b1 | c1 | a2 | b1 | c2 |", "+----+----+----+----+----+----+", "| 1 | 4 | 7 | 10 | 4 | 70 |", "| 1 | 4 | 7 | 10 | 4 | 70 |", "| 2 | 5 | 8 | 20 | 5 | 80 |", "| 2 | 5 | 8 | 20 | 5 | 80 |", "| 3 | 7 | 9 | | 7 | |", "+----+----+----+----+----+----+", ]; assert_batches_sorted_eq!(expected, &batches); } #[tokio::test] async fn join_full_multi_batch() { let left = build_table( ("a1", &vec![1, 2, 3]), ("b1", &vec![4, 5, 7]), // 7 does not exist on the right ("c1", &vec![7, 8, 9]), ); // create two identical batches for the right side let right = build_table_two_batches( ("a2", &vec![10, 20, 30]), ("b2", &vec![4, 5, 6]), ("c2", &vec![70, 80, 90]), ); let on = vec![( Column::new_with_schema("b1", &left.schema()).unwrap(), Column::new_with_schema("b2", &right.schema()).unwrap(), )]; let join = join(left, right, on, &JoinType::Full).unwrap(); let columns = columns(&join.schema()); assert_eq!(columns, vec!["a1", "b1", "c1", "a2", "b2", "c2"]); let stream = join.execute(0).await.unwrap(); let batches = common::collect(stream).await.unwrap(); let expected = vec![ "+----+----+----+----+----+----+", "| a1 | b1 | c1 | a2 | b2 | c2 |", "+----+----+----+----+----+----+", "| | | | 30 | 6 | 90 |", "| | | | 30 | 6 | 90 |", "| 1 | 4 | 7 | 10 | 4 | 70 |", "| 1 | 4 | 7 | 10 | 4 | 70 |", "| 2 | 5 | 8 | 20 | 5 | 80 |", "| 2 | 5 | 8 | 20 | 5 | 80 |", "| 3 | 7 | 9 | | | |", "+----+----+----+----+----+----+", ]; assert_batches_sorted_eq!(expected, &batches); } #[tokio::test] async fn join_left_empty_right() { let left = build_table( ("a1", &vec![1, 2, 3]), ("b1", &vec![4, 5, 7]), ("c1", &vec![7, 8, 9]), ); let right = build_table_i32(("a2", &vec![]), ("b1", &vec![]), ("c2", &vec![])); let on = vec![( Column::new_with_schema("b1", &left.schema()).unwrap(), Column::new_with_schema("b1", &right.schema()).unwrap(), )]; let schema = right.schema(); let right = Arc::new(MemoryExec::try_new(&[vec![right]], schema, None).unwrap()); let join = join(left, right, on, &JoinType::Left).unwrap(); let columns = columns(&join.schema()); assert_eq!(columns, vec!["a1", "b1", "c1", "a2", "b1", "c2"]); let stream = join.execute(0).await.unwrap(); let batches = common::collect(stream).await.unwrap(); let expected = vec![ "+----+----+----+----+----+----+", "| a1 | b1 | c1 | a2 | b1 | c2 |", "+----+----+----+----+----+----+", "| 1 | 4 | 7 | | 4 | |", "| 2 | 5 | 8 | | 5 | |", "| 3 | 7 | 9 | | 7 | |", "+----+----+----+----+----+----+", ]; assert_batches_sorted_eq!(expected, &batches); } #[tokio::test] async fn join_full_empty_right() { let left = build_table( ("a1", &vec![1, 2, 3]), ("b1", &vec![4, 5, 7]), ("c1", &vec![7, 8, 9]), ); let right = build_table_i32(("a2", &vec![]), ("b2", &vec![]), ("c2", &vec![])); let on = vec![( Column::new_with_schema("b1", &left.schema()).unwrap(), Column::new_with_schema("b2", &right.schema()).unwrap(), )]; let schema = right.schema(); let right = Arc::new(MemoryExec::try_new(&[vec![right]], schema, None).unwrap()); let join = join(left, right, on, &JoinType::Full).unwrap(); let columns = columns(&join.schema()); assert_eq!(columns, vec!["a1", "b1", "c1", "a2", "b2", "c2"]); let stream = join.execute(0).await.unwrap(); let batches = common::collect(stream).await.unwrap(); let expected = vec![ "+----+----+----+----+----+----+", "| a1 | b1 | c1 | a2 | b2 | c2 |", "+----+----+----+----+----+----+", "| 1 | 4 | 7 | | | |", "| 2 | 5 | 8 | | | |", "| 3 | 7 | 9 | | | |", "+----+----+----+----+----+----+", ]; assert_batches_sorted_eq!(expected, &batches); } #[tokio::test] async fn join_left_one() -> Result<()> { let left = build_table( ("a1", &vec![1, 2, 3]), ("b1", &vec![4, 5, 7]), // 7 does not exist on the right ("c1", &vec![7, 8, 9]), ); let right = build_table( ("a2", &vec![10, 20, 30]), ("b1", &vec![4, 5, 6]), ("c2", &vec![70, 80, 90]), ); let on = vec![( Column::new_with_schema("b1", &left.schema())?, Column::new_with_schema("b1", &right.schema())?, )]; let (columns, batches) = join_collect(left.clone(), right.clone(), on.clone(), &JoinType::Left) .await?; assert_eq!(columns, vec!["a1", "b1", "c1", "a2", "b1", "c2"]); let expected = vec![ "+----+----+----+----+----+----+", "| a1 | b1 | c1 | a2 | b1 | c2 |", "+----+----+----+----+----+----+", "| 1 | 4 | 7 | 10 | 4 | 70 |", "| 2 | 5 | 8 | 20 | 5 | 80 |", "| 3 | 7 | 9 | | 7 | |", "+----+----+----+----+----+----+", ]; assert_batches_sorted_eq!(expected, &batches); Ok(()) } #[tokio::test] async fn partitioned_join_left_one() -> Result<()> { let left = build_table( ("a1", &vec![1, 2, 3]), ("b1", &vec![4, 5, 7]), // 7 does not exist on the right ("c1", &vec![7, 8, 9]), ); let right = build_table( ("a2", &vec![10, 20, 30]), ("b1", &vec![4, 5, 6]), ("c2", &vec![70, 80, 90]), ); let on = vec![( Column::new_with_schema("b1", &left.schema())?, Column::new_with_schema("b1", &right.schema())?, )]; let (columns, batches) = partitioned_join_collect( left.clone(), right.clone(), on.clone(), &JoinType::Left, ) .await?; assert_eq!(columns, vec!["a1", "b1", "c1", "a2", "b1", "c2"]); let expected = vec![ "+----+----+----+----+----+----+", "| a1 | b1 | c1 | a2 | b1 | c2 |", "+----+----+----+----+----+----+", "| 1 | 4 | 7 | 10 | 4 | 70 |", "| 2 | 5 | 8 | 20 | 5 | 80 |", "| 3 | 7 | 9 | | 7 | |", "+----+----+----+----+----+----+", ]; assert_batches_sorted_eq!(expected, &batches); Ok(()) } #[tokio::test] async fn join_semi() -> Result<()> { let left = build_table( ("a1", &vec![1, 2, 2, 3]), ("b1", &vec![4, 5, 5, 7]), // 7 does not exist on the right ("c1", &vec![7, 8, 8, 9]), ); let right = build_table( ("a2", &vec![10, 20, 30, 40]), ("b1", &vec![4, 5, 6, 5]), // 5 is double on the right ("c2", &vec![70, 80, 90, 100]), ); let on = vec![( Column::new_with_schema("b1", &left.schema())?, Column::new_with_schema("b1", &right.schema())?, )]; let join = join(left, right, on, &JoinType::Semi)?; let columns = columns(&join.schema()); assert_eq!(columns, vec!["a1", "b1", "c1"]); let stream = join.execute(0).await?; let batches = common::collect(stream).await?; let expected = vec![ "+----+----+----+", "| a1 | b1 | c1 |", "+----+----+----+", "| 1 | 4 | 7 |", "| 2 | 5 | 8 |", "| 2 | 5 | 8 |", "+----+----+----+", ]; assert_batches_sorted_eq!(expected, &batches); Ok(()) } #[tokio::test] async fn join_anti() -> Result<()> { let left = build_table( ("a1", &vec![1, 2, 2, 3, 5]), ("b1", &vec![4, 5, 5, 7, 7]), // 7 does not exist on the right ("c1", &vec![7, 8, 8, 9, 11]), ); let right = build_table( ("a2", &vec![10, 20, 30, 40]), ("b1", &vec![4, 5, 6, 5]), // 5 is double on the right ("c2", &vec![70, 80, 90, 100]), ); let on = vec![( Column::new_with_schema("b1", &left.schema())?, Column::new_with_schema("b1", &right.schema())?, )]; let join = join(left, right, on, &JoinType::Anti)?; let columns = columns(&join.schema()); assert_eq!(columns, vec!["a1", "b1", "c1"]); let stream = join.execute(0).await?; let batches = common::collect(stream).await?; let expected = vec![ "+----+----+----+", "| a1 | b1 | c1 |", "+----+----+----+", "| 3 | 7 | 9 |", "| 5 | 7 | 11 |", "+----+----+----+", ]; assert_batches_sorted_eq!(expected, &batches); Ok(()) } #[tokio::test] async fn join_right_one() -> Result<()> { let left = build_table( ("a1", &vec![1, 2, 3]), ("b1", &vec![4, 5, 7]), ("c1", &vec![7, 8, 9]), ); let right = build_table( ("a2", &vec![10, 20, 30]), ("b1", &vec![4, 5, 6]), // 6 does not exist on the left ("c2", &vec![70, 80, 90]), ); let on = vec![( Column::new_with_schema("b1", &left.schema())?, Column::new_with_schema("b1", &right.schema())?, )]; let (columns, batches) = join_collect(left, right, on, &JoinType::Right).await?; assert_eq!(columns, vec!["a1", "b1", "c1", "a2", "b1", "c2"]); let expected = vec![ "+----+----+----+----+----+----+", "| a1 | b1 | c1 | a2 | b1 | c2 |", "+----+----+----+----+----+----+", "| | 6 | | 30 | 6 | 90 |", "| 1 | 4 | 7 | 10 | 4 | 70 |", "| 2 | 5 | 8 | 20 | 5 | 80 |", "+----+----+----+----+----+----+", ]; assert_batches_sorted_eq!(expected, &batches); Ok(()) } #[tokio::test] async fn partitioned_join_right_one() -> Result<()> { let left = build_table( ("a1", &vec![1, 2, 3]), ("b1", &vec![4, 5, 7]), ("c1", &vec![7, 8, 9]), ); let right = build_table( ("a2", &vec![10, 20, 30]), ("b1", &vec![4, 5, 6]), // 6 does not exist on the left ("c2", &vec![70, 80, 90]), ); let on = vec![( Column::new_with_schema("b1", &left.schema())?, Column::new_with_schema("b1", &right.schema())?, )]; let (columns, batches) = partitioned_join_collect(left, right, on, &JoinType::Right).await?; assert_eq!(columns, vec!["a1", "b1", "c1", "a2", "b1", "c2"]); let expected = vec![ "+----+----+----+----+----+----+", "| a1 | b1 | c1 | a2 | b1 | c2 |", "+----+----+----+----+----+----+", "| | 6 | | 30 | 6 | 90 |", "| 1 | 4 | 7 | 10 | 4 | 70 |", "| 2 | 5 | 8 | 20 | 5 | 80 |", "+----+----+----+----+----+----+", ]; assert_batches_sorted_eq!(expected, &batches); Ok(()) } #[tokio::test] async fn join_full_one() -> Result<()> { let left = build_table( ("a1", &vec![1, 2, 3]), ("b1", &vec![4, 5, 7]), // 7 does not exist on the right ("c1", &vec![7, 8, 9]), ); let right = build_table( ("a2", &vec![10, 20, 30]), ("b2", &vec![4, 5, 6]), ("c2", &vec![70, 80, 90]), ); let on = vec![( Column::new_with_schema("b1", &left.schema()).unwrap(), Column::new_with_schema("b2", &right.schema()).unwrap(), )]; let join = join(left, right, on, &JoinType::Full)?; let columns = columns(&join.schema()); assert_eq!(columns, vec!["a1", "b1", "c1", "a2", "b2", "c2"]); let stream = join.execute(0).await?; let batches = common::collect(stream).await?; let expected = vec![ "+----+----+----+----+----+----+", "| a1 | b1 | c1 | a2 | b2 | c2 |", "+----+----+----+----+----+----+", "| | | | 30 | 6 | 90 |", "| 1 | 4 | 7 | 10 | 4 | 70 |", "| 2 | 5 | 8 | 20 | 5 | 80 |", "| 3 | 7 | 9 | | | |", "+----+----+----+----+----+----+", ]; assert_batches_sorted_eq!(expected, &batches); Ok(()) } #[test] fn join_with_hash_collision() -> Result<()> { let mut hashmap_left = RawTable::with_capacity(2); let left = build_table_i32( ("a", &vec![10, 20]), ("x", &vec![100, 200]), ("y", &vec![200, 300]), ); let random_state = RandomState::with_seeds(0, 0, 0, 0); let hashes_buff = &mut vec![0; left.num_rows()]; let hashes = create_hashes(&[left.columns()[0].clone()], &random_state, hashes_buff)?; // Create hash collisions (same hashes) hashmap_left.insert(hashes[0], (hashes[0], smallvec![0, 1]), |(h, _)| *h); hashmap_left.insert(hashes[1], (hashes[1], smallvec![0, 1]), |(h, _)| *h); let right = build_table_i32( ("a", &vec![10, 20]), ("b", &vec![0, 0]), ("c", &vec![30, 40]), ); let left_data = JoinLeftData::new((JoinHashMap(hashmap_left), left)); let (l, r) = build_join_indexes( &left_data, &right, JoinType::Inner, &[Column::new("a", 0)], &[Column::new("a", 0)], &random_state, )?; let mut left_ids = UInt64Builder::new(0); left_ids.append_value(0)?; left_ids.append_value(1)?; let mut right_ids = UInt32Builder::new(0); right_ids.append_value(0)?; right_ids.append_value(1)?; assert_eq!(left_ids.finish(), l); assert_eq!(right_ids.finish(), r); Ok(()) } }
35.063298
160
0.46205
56ec68ff75deed98b798baca88400b74fad95951
3,802
extern crate pkg_config; use std::process::Command; use std::path::PathBuf; use std::env; use std::fs; macro_rules! sio { ($expr:expr) => { format!("libsoundio-{}", $expr) } } macro_rules! err_exists { ($expr:expr, $msg:expr) => { match $expr { Ok(val) => val, Err(err) => { match err.kind() { ::std::io::ErrorKind::AlreadyExists => (), _ => panic!(format!("{}: {}", $msg, err)), } } } } } fn lib_available(name: &str) -> bool { match pkg_config::find_library(name) { Ok(_) => true, Err(_) => { let res = Command::new("ldconfig").arg("--print-cache").output().unwrap(); if res.status.success() { String::from_utf8(res.stdout).unwrap().contains(name) } else { false } } } } fn windows(_: String) { // TODO: Use precompiled binary! unimplemented!(); } fn osx(_: String) { // TODO: I down't own any apple hardware to test this. unimplemented!(); } fn linux(target: String) { if !lib_available("soundio") { build(target); } } fn main() { let target = env::var("TARGET").unwrap(); if target.contains("windows") { windows(target) } else if target.contains("apple") { osx(target) } else { // assume the rest is linux linux(target) } } fn sio_url(ext: &'static str) -> String { match ext { "tar.gz" | "zip" => format!("http://libsound.io/release/{}.{}", sio!("1.1.0"), ext), _ => panic!(format!("No release for format: {}", ext)), } } fn build(target: String) { let host = env::var("HOST").unwrap(); let dst_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); let lib_dir = dst_dir.join("lib"); let include_dir = dst_dir.join("include"); err_exists!(fs::create_dir(&lib_dir), &lib_dir.display()); err_exists!(fs::create_dir(&include_dir), &include_dir.display()); // set cargo flags println!("cargo:rustc-link-lib=dylib={}", "soundio"); // -l println!("cargo:rustc-link-search=native={}", &lib_dir.display()); // -L println!("cargo:include={}", &include_dir.display()); println!("cargo:root={}", &dst_dir.display()); // download and extract libsoundio source Command::new("curl") .current_dir(&dst_dir) .args(&["--location", "--remote-name"]) .arg(sio_url("tar.gz")) .output() .unwrap(); Command::new("tar") .current_dir(&dst_dir) .arg("-xvzf") .arg(format!("{}.{}", sio!("1.1.0"), "tar.gz")) .output() .unwrap(); // create build dir let soundio_root = dst_dir.join(sio!("1.1.0")); let build_dir = soundio_root.join("build"); err_exists!(fs::create_dir(&build_dir), &build_dir.display()); // TODO:set build type to release for env var PROFILE={release,bench} // run cmake Command::new("cmake") .current_dir(&build_dir) .arg("-DCMAKE_BUILD_TYPE=Debug") .arg("-DCMAKE_INSTALL_LIBDIR:PATH=lib") .arg(format!("-DCMAKE_INSTALL_PREFIX:PATH={}", &dst_dir.display())) .arg("-DBUILD_EXAMPLE_PROGRAMS:BOOL=OFF") .arg("-DBUILD_TESTS:BOOL=OFF") .arg("-DBUILD_STATIC_LIBS:BOOL=OFF") .arg("-DBUILD_SHARED_LIBS:BOOL=ON") .arg("-DENABLE_JACK:BOOL=OFF") .arg("-DENABLE_PULSEAUDIO:BOOL=OFF") .arg("-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON") .arg("..") .output() .unwrap(); // make install Command::new("make") .current_dir(&build_dir) .arg("install") .output() .unwrap(); // remove builddir fs::remove_dir_all(build_dir).unwrap(); }
28.162963
92
0.545502
fbaf38d69df7fc2608e92060fe69bdc50b3bcefd
168
// min-llvm-version 8.0 // compile-flags: -C no-prepopulate-passes -C force-unwind-tables=y #![crate_type="lib"] // CHECK: attributes #{{.*}} uwtable pub fn foo() {}
21
67
0.654762
1854d41fd49eeada82344a80a1430f3a1faa1d5d
8,843
//! Types and utilities for manipulating the Wayland protocol use std::{ffi::CString, os::unix::io::RawFd}; pub use wayland_sys::common::{wl_argument, wl_interface, wl_message}; /// Describes whether an argument may have a null value. #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub enum AllowNull { /// Null values are allowed. Yes, /// Null values are forbidden. No, } /// Enum of possible argument types as recognized by the wire #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum ArgumentType { /// An integer argument. Represented by a [`i32`]. Int, /// An unsigned integer argument. Represented by a [`u32`]. Uint, /// A signed fixed point number with 1/256 precision Fixed, /// A string. This is represented as a [`CString`] in a message. Str(AllowNull), /// Id of a wayland object Object(AllowNull), /// Id of a newly created wayland object NewId(AllowNull), /// Vec<u8> Array(AllowNull), /// A file descriptor argument. Represented by a [`RawFd`]. Fd, } impl ArgumentType { /// Returns true if the type of the argument is the same. pub fn same_type(self, other: Self) -> bool { std::mem::discriminant(&self) == std::mem::discriminant(&other) } } /// Enum of possible argument of the protocol #[derive(Clone, PartialEq, Eq, Debug)] #[allow(clippy::box_collection)] pub enum Argument<Id> { /// An integer argument. Represented by a [`i32`]. Int(i32), /// An unsigned integer argument. Represented by a [`u32`]. Uint(u32), /// A signed fixed point number with 1/256 precision Fixed(i32), /// CString /// /// The value is boxed to reduce the stack size of Argument. The performance /// impact is negligible as `string` arguments are pretty rare in the protocol. Str(Box<CString>), /// Id of a wayland object Object(Id), /// Id of a newly created wayland object NewId(Id), /// Vec<u8> /// /// The value is boxed to reduce the stack size of Argument. The performance /// impact is negligible as `array` arguments are pretty rare in the protocol. Array(Box<Vec<u8>>), /// A file descriptor argument. Represented by a [`RawFd`]. Fd(RawFd), } impl<Id> Argument<Id> { /// Retrieve the type of a given argument instance pub fn get_type(&self) -> ArgumentType { match *self { Argument::Int(_) => ArgumentType::Int, Argument::Uint(_) => ArgumentType::Uint, Argument::Fixed(_) => ArgumentType::Fixed, Argument::Str(_) => ArgumentType::Str(AllowNull::Yes), Argument::Object(_) => ArgumentType::Object(AllowNull::Yes), Argument::NewId(_) => ArgumentType::NewId(AllowNull::Yes), Argument::Array(_) => ArgumentType::Array(AllowNull::Yes), Argument::Fd(_) => ArgumentType::Fd, } } } #[cfg(not(tarpaulin_include))] impl<Id: std::fmt::Display> std::fmt::Display for Argument<Id> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Argument::Int(value) => write!(f, "{}", value), Argument::Uint(value) => write!(f, "{}", value), Argument::Fixed(value) => write!(f, "{}", value), Argument::Str(value) => write!(f, "{:?}", value), Argument::Object(value) => write!(f, "{}", value), Argument::NewId(value) => write!(f, "{}", value), Argument::Array(value) => write!(f, "{:?}", value), Argument::Fd(value) => write!(f, "{}", value), } } } /// Description of wayland interface. /// /// An interface describes the possible requests and events that a wayland client and compositor use to /// communicate. #[derive(Debug)] pub struct Interface { /// The name of the interface. pub name: &'static str, /// The maximum supported version of the interface. pub version: u32, /// A list that describes every request this interface supports. pub requests: &'static [MessageDesc], /// A list that describes every event this interface supports. pub events: &'static [MessageDesc], /// A C representation of this interface that may be used to interoperate with libwayland. pub c_ptr: Option<&'static wayland_sys::common::wl_interface>, } #[cfg(not(tarpaulin_include))] impl std::fmt::Display for Interface { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str(self.name) } } /// Wire metadata of a given message #[derive(Copy, Clone, Debug)] pub struct MessageDesc { /// Name of this message pub name: &'static str, /// Signature of the message pub signature: &'static [ArgumentType], /// Minimum required version of the interface pub since: u32, /// Whether this message is a destructor pub is_destructor: bool, /// The child interface created from this message. /// /// In the wayland xml format, this corresponds to the `new_id` type. pub child_interface: Option<&'static Interface>, /// The interfaces passed into this message as arguments. pub arg_interfaces: &'static [&'static Interface], } /// Special interface representing an anonymous object pub static ANONYMOUS_INTERFACE: Interface = Interface { name: "<anonymous>", version: 0, requests: &[], events: &[], c_ptr: None }; /// Description of the protocol-level information of an object #[derive(Copy, Clone, Debug)] pub struct ObjectInfo { /// The protocol ID pub id: u32, /// The interface pub interface: &'static Interface, /// The version pub version: u32, } /// A protocol error /// /// This kind of error is generated by the server if your client didn't respect /// the protocol, after which the server will kill your connection. #[derive(Clone, Debug)] pub struct ProtocolError { /// The error code associated with the error /// /// It should be interpreted as an instance of the `Error` enum of the /// associated interface. pub code: u32, /// The id of the object that caused the error pub object_id: u32, /// The interface of the object that caused the error pub object_interface: String, /// The message sent by the server describing the error pub message: String, } /// Number of arguments that are stocked inline in a `Message` before allocating /// /// This is a ad-hoc number trying to reach a good balance between avoiding too many allocations /// and keeping the stack size of `Message` small. pub const INLINE_ARGS: usize = 4; /// Represents a message that has been sent from some object. #[derive(Debug, Clone, PartialEq)] pub struct Message<Id> { /// The id of the object that sent the message. pub sender_id: Id, /// The opcode of the message. pub opcode: u16, /// The arguments of the message. pub args: smallvec::SmallVec<[Argument<Id>; INLINE_ARGS]>, } impl std::error::Error for ProtocolError {} #[cfg(not(tarpaulin_include))] impl std::fmt::Display for ProtocolError { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { write!( f, "Protocol error {} on object {}@{}: {}", self.code, self.object_interface, self.object_id, self.message ) } } /// Returns true if the two interfaces are the same. #[inline] pub fn same_interface(a: &'static Interface, b: &'static Interface) -> bool { std::ptr::eq(a, b) || a.name == b.name } pub(crate) fn check_for_signature<Id>(signature: &[ArgumentType], args: &[Argument<Id>]) -> bool { if signature.len() != args.len() { return false; } for (typ, arg) in signature.iter().copied().zip(args.iter()) { if !arg.get_type().same_type(typ) { return false; } } true } #[inline] #[allow(dead_code)] pub(crate) fn same_interface_or_anonymous(a: &'static Interface, b: &'static Interface) -> bool { same_interface(a, b) || same_interface(a, &ANONYMOUS_INTERFACE) } /// An enum value in the protocol. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum WEnum<T> { /// The interpreted value Value(T), /// The stored value does not match one defined by the protocol file Unknown(u32), } impl<T: std::convert::TryFrom<u32>> From<u32> for WEnum<T> { /// Constructs an enum from the integer format used by the wayland protocol. fn from(v: u32) -> WEnum<T> { match T::try_from(v) { Ok(t) => WEnum::Value(t), Err(_) => WEnum::Unknown(v), } } } impl<T: Into<u32>> From<WEnum<T>> for u32 { /// Converts an enum into a numerical form used by the wayland protocol. fn from(enu: WEnum<T>) -> u32 { match enu { WEnum::Unknown(u) => u, WEnum::Value(t) => t.into(), } } }
33.496212
103
0.63146
2328485a8dce3ca7435ea4e79ed24add749d8dc8
1,278
use crate::communicator::Communicator; use async_trait::async_trait; use rcon::Connection; use serde::Serialize; use serde_json::{Value, json}; use log::*; #[derive(Serialize)] pub struct CSGORcon { #[serde(skip)] conn: Option<Connection>, #[serde(rename = "password/password")] password: String, } impl CSGORcon { pub fn new() -> CSGORcon { CSGORcon { conn: None, password: String::new(), } } } #[async_trait] impl Communicator for CSGORcon { async fn send_cmd(&mut self, cmd: String) -> String { if self.conn.is_some() { return self.conn.as_mut().unwrap().cmd(cmd.as_str()).await.unwrap(); } return "Not connected to server.".to_string() } async fn connect(&mut self, address: &str, password: &str) -> Result<(), rcon::Error> { let conn = Connection::builder().connect(address, password).await?; self.conn = Some(conn); Ok(()) } fn settings(&self) -> Value { match serde_json::to_value(self) { Ok(json) => { json }, Err(err) => { error!("could not obtain settings"); error!("{}", err); Value::Null } } } fn update_settings(&mut self, new: Value) -> Result<(), Box<dyn std::error::Error>> { Ok(()) // TODO: implement me } }
22.821429
89
0.597809
aceec2aafdfa35bc37d051e1efe2142c595a1f41
937
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Test that a class with a non-copyable field can't be // copied #[derive(Show)] struct bar { x: int, } impl Drop for bar { fn drop(&mut self) {} } fn bar(x:int) -> bar { bar { x: x } } #[derive(Show)] struct foo { i: int, j: bar, } fn foo(i:int) -> foo { foo { i: i, j: bar(5) } } fn main() { let x = foo(10); let _y = x.clone(); //~ ERROR does not implement any method in scope println!("{}", x); }
19.93617
72
0.621131
ede49e5c3a6e3ff254f75c0c986006980cc07ea9
7,533
use crate::error::Tag; use crate::traits::{ErrorKind, Sealed}; macro_rules! define_errors { ($( (struct $id:ident, $code:expr, $tags: expr), )*) => { $( pub struct $id; impl Sealed for $id { const CODE: u32 = $code; const NAME: &'static str = stringify!($id); const TAGS: u32 = $tags; } impl ErrorKind for $id {} )* pub(crate) fn tag_check(code: u32, bit: u32) -> bool { let tag_mask = match code { $( $code => $tags, )* _ => 0, }; return tag_mask & (1 << bit) != 0; } pub(crate) fn error_name(code: u32) -> &'static str { match code { $( $code => stringify!($id), )* _ => "EdgeDBError", } } } } // AUTOGENERATED BY EdgeDB WITH // $ cargo run --bin edgedb_gen_errors -- errors.txt #[allow(unused_macros)] // fake macro for generator macro_rules! define_tag { ($name: ident, $bit: expr) => { pub static $name: Tag = Tag { bit: $bit }; } } // <define_tag> pub static SHOULD_RECONNECT: Tag = Tag { bit: 0 }; pub static SHOULD_RETRY: Tag = Tag { bit: 1 }; // </define_tag> #[allow(unused_macros)] // fake macro for generator macro_rules! define_error { (struct $name: ident, $code: expr, $tag_bits: expr) => { (struct $name, $code, $tag_bits), } } define_errors![ // <define_error> (struct InternalServerError, 0x01000000u32, 0x00000000), (struct UnsupportedFeatureError, 0x02000000u32, 0x00000000), (struct ProtocolError, 0x03000000u32, 0x00000000), (struct BinaryProtocolError, 0x03010000u32, 0x00000000), (struct UnsupportedProtocolVersionError, 0x03010001u32, 0x00000000), (struct TypeSpecNotFoundError, 0x03010002u32, 0x00000000), (struct UnexpectedMessageError, 0x03010003u32, 0x00000000), (struct InputDataError, 0x03020000u32, 0x00000000), (struct ResultCardinalityMismatchError, 0x03030000u32, 0x00000000), (struct CapabilityError, 0x03040000u32, 0x00000000), (struct UnsupportedCapabilityError, 0x03040100u32, 0x00000000), (struct DisabledCapabilityError, 0x03040200u32, 0x00000000), (struct QueryError, 0x04000000u32, 0x00000000), (struct InvalidSyntaxError, 0x04010000u32, 0x00000000), (struct EdgeQLSyntaxError, 0x04010100u32, 0x00000000), (struct SchemaSyntaxError, 0x04010200u32, 0x00000000), (struct GraphQLSyntaxError, 0x04010300u32, 0x00000000), (struct InvalidTypeError, 0x04020000u32, 0x00000000), (struct InvalidTargetError, 0x04020100u32, 0x00000000), (struct InvalidLinkTargetError, 0x04020101u32, 0x00000000), (struct InvalidPropertyTargetError, 0x04020102u32, 0x00000000), (struct InvalidReferenceError, 0x04030000u32, 0x00000000), (struct UnknownModuleError, 0x04030001u32, 0x00000000), (struct UnknownLinkError, 0x04030002u32, 0x00000000), (struct UnknownPropertyError, 0x04030003u32, 0x00000000), (struct UnknownUserError, 0x04030004u32, 0x00000000), (struct UnknownDatabaseError, 0x04030005u32, 0x00000000), (struct UnknownParameterError, 0x04030006u32, 0x00000000), (struct SchemaError, 0x04040000u32, 0x00000000), (struct SchemaDefinitionError, 0x04050000u32, 0x00000000), (struct InvalidDefinitionError, 0x04050100u32, 0x00000000), (struct InvalidModuleDefinitionError, 0x04050101u32, 0x00000000), (struct InvalidLinkDefinitionError, 0x04050102u32, 0x00000000), (struct InvalidPropertyDefinitionError, 0x04050103u32, 0x00000000), (struct InvalidUserDefinitionError, 0x04050104u32, 0x00000000), (struct InvalidDatabaseDefinitionError, 0x04050105u32, 0x00000000), (struct InvalidOperatorDefinitionError, 0x04050106u32, 0x00000000), (struct InvalidAliasDefinitionError, 0x04050107u32, 0x00000000), (struct InvalidFunctionDefinitionError, 0x04050108u32, 0x00000000), (struct InvalidConstraintDefinitionError, 0x04050109u32, 0x00000000), (struct InvalidCastDefinitionError, 0x0405010Au32, 0x00000000), (struct DuplicateDefinitionError, 0x04050200u32, 0x00000000), (struct DuplicateModuleDefinitionError, 0x04050201u32, 0x00000000), (struct DuplicateLinkDefinitionError, 0x04050202u32, 0x00000000), (struct DuplicatePropertyDefinitionError, 0x04050203u32, 0x00000000), (struct DuplicateUserDefinitionError, 0x04050204u32, 0x00000000), (struct DuplicateDatabaseDefinitionError, 0x04050205u32, 0x00000000), (struct DuplicateOperatorDefinitionError, 0x04050206u32, 0x00000000), (struct DuplicateViewDefinitionError, 0x04050207u32, 0x00000000), (struct DuplicateFunctionDefinitionError, 0x04050208u32, 0x00000000), (struct DuplicateConstraintDefinitionError, 0x04050209u32, 0x00000000), (struct DuplicateCastDefinitionError, 0x0405020Au32, 0x00000000), (struct QueryTimeoutError, 0x04060000u32, 0x00000000), (struct ExecutionError, 0x05000000u32, 0x00000000), (struct InvalidValueError, 0x05010000u32, 0x00000000), (struct DivisionByZeroError, 0x05010001u32, 0x00000000), (struct NumericOutOfRangeError, 0x05010002u32, 0x00000000), (struct IntegrityError, 0x05020000u32, 0x00000000), (struct ConstraintViolationError, 0x05020001u32, 0x00000000), (struct CardinalityViolationError, 0x05020002u32, 0x00000000), (struct MissingRequiredError, 0x05020003u32, 0x00000000), (struct TransactionError, 0x05030000u32, 0x00000000), (struct TransactionConflictError, 0x05030100u32, 0x00000002), (struct TransactionSerializationError, 0x05030101u32, 0x00000002), (struct TransactionDeadlockError, 0x05030102u32, 0x00000002), (struct ConfigurationError, 0x06000000u32, 0x00000000), (struct AccessError, 0x07000000u32, 0x00000000), (struct AuthenticationError, 0x07010000u32, 0x00000000), (struct AvailabilityError, 0x08000000u32, 0x00000000), (struct BackendUnavailableError, 0x08000001u32, 0x00000002), (struct LogMessage, 0xF0000000u32, 0x00000000), (struct WarningMessage, 0xF0010000u32, 0x00000000), (struct ClientError, 0xFF000000u32, 0x00000000), (struct ClientConnectionError, 0xFF010000u32, 0x00000000), (struct ClientConnectionFailedError, 0xFF010100u32, 0x00000000), (struct ClientConnectionFailedTemporarilyError, 0xFF010101u32, 0x00000003), (struct ClientConnectionTimeoutError, 0xFF010200u32, 0x00000003), (struct ClientConnectionClosedError, 0xFF010300u32, 0x00000003), (struct InterfaceError, 0xFF020000u32, 0x00000000), (struct QueryArgumentError, 0xFF020100u32, 0x00000000), (struct MissingArgumentError, 0xFF020101u32, 0x00000000), (struct UnknownArgumentError, 0xFF020102u32, 0x00000000), (struct InvalidArgumentError, 0xFF020103u32, 0x00000000), (struct NoDataError, 0xFF030000u32, 0x00000000), // </define_error> (struct ProtocolTlsError, 0x03FF0000u32, 0x00000000), (struct ProtocolOutOfOrderError, 0x03FE0000u32, 0x00000000), (struct ProtocolEncodingError, 0x03FD0000u32, 0x00000000), (struct PasswordRequired, 0x0701FF00u32, 0x00000000), (struct ClientInconsistentError, 0xFFFF0000u32, 0x00000000), (struct ClientEncodingError, 0xFFFE0000u32, 0x00000000), (struct ClientConnectionEosError, 0xFF01FF00u32, 0x00000000), (struct NoResultExpected, 0xFF02FF00u32, 0x00000000), (struct DescriptorMismatch, 0xFF02FE00u32, 0x00000000), ];
48.288462
79
0.728395
62276e10c9223015db5f927e6557b1cf0603f2ef
2,292
// Copyright 2021 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use hyper::StatusCode; use poem::error::Result as PoemResult; use poem::post; use poem::web::Json; use poem::web::Query; use poem::Endpoint; use poem::Route; use serde::Deserialize; use super::query::HttpQueryRequest; use super::query::HttpSession; use super::query::HttpSessionConf; use super::query::PaginationConf; use super::HttpQueryContext; use super::QueryResponse; #[derive(Deserialize)] pub struct StatementHandlerParams { db: Option<String>, } #[poem::handler] pub async fn statement_handler( ctx: &HttpQueryContext, sql: String, Query(params): Query<StatementHandlerParams>, ) -> PoemResult<Json<QueryResponse>> { let http_query_manager = ctx.session_mgr.get_http_query_manager(); let query_id = http_query_manager.next_query_id(); let session = HttpSessionConf { database: params.db.filter(|x| !x.is_empty()), max_idle_time: None, }; let req = HttpQueryRequest { sql, session: HttpSession::New(session), pagination: PaginationConf { wait_time_secs: -1 }, }; let query = http_query_manager .try_create_query(&query_id, ctx, req) .await; match query { Ok(query) => { let resp = query .get_response_page(0) .await .map_err(|err| poem::Error::from_string(err.message(), StatusCode::NOT_FOUND))?; http_query_manager.remove_query(&query_id).await; Ok(Json(QueryResponse::from_internal(query_id, resp))) } Err(e) => Ok(Json(QueryResponse::fail_to_start_sql(query_id, &e))), } } pub fn statement_router() -> impl Endpoint { Route::new().at("/", post(statement_handler)) }
31.833333
96
0.679756
8fbc363b482d91277d22bf7339a49b2159c9e1c4
4,238
use futures_core::ready; use futures_core::task::{Context, Poll}; #[cfg(feature = "read-initializer")] use futures_io::Initializer; use futures_io::{AsyncBufRead, AsyncRead, IoSliceMut}; use pin_project::pin_project; use std::fmt; use std::io; use std::pin::Pin; /// Reader for the [`chain`](super::AsyncReadExt::chain) method. #[pin_project] #[must_use = "readers do nothing unless polled"] pub struct Chain<T, U> { #[pin] first: T, #[pin] second: U, done_first: bool, } impl<T, U> Chain<T, U> where T: AsyncRead, U: AsyncRead, { pub(super) fn new(first: T, second: U) -> Self { Self { first, second, done_first: false, } } /// Gets references to the underlying readers in this `Chain`. pub fn get_ref(&self) -> (&T, &U) { (&self.first, &self.second) } /// Gets mutable references to the underlying readers in this `Chain`. /// /// Care should be taken to avoid modifying the internal I/O state of the /// underlying readers as doing so may corrupt the internal state of this /// `Chain`. pub fn get_mut(&mut self) -> (&mut T, &mut U) { (&mut self.first, &mut self.second) } /// Gets pinned mutable references to the underlying readers in this `Chain`. /// /// Care should be taken to avoid modifying the internal I/O state of the /// underlying readers as doing so may corrupt the internal state of this /// `Chain`. pub fn get_pin_mut(self: Pin<&mut Self>) -> (Pin<&mut T>, Pin<&mut U>) { let this = self.project(); (this.first, this.second) } /// Consumes the `Chain`, returning the wrapped readers. pub fn into_inner(self) -> (T, U) { (self.first, self.second) } } impl<T, U> fmt::Debug for Chain<T, U> where T: fmt::Debug, U: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Chain") .field("t", &self.first) .field("u", &self.second) .field("done_first", &self.done_first) .finish() } } impl<T, U> AsyncRead for Chain<T, U> where T: AsyncRead, U: AsyncRead, { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut [u8], ) -> Poll<io::Result<usize>> { let this = self.project(); if !*this.done_first { match ready!(this.first.poll_read(cx, buf)?) { 0 if !buf.is_empty() => *this.done_first = true, n => return Poll::Ready(Ok(n)), } } this.second.poll_read(cx, buf) } fn poll_read_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &mut [IoSliceMut<'_>], ) -> Poll<io::Result<usize>> { let this = self.project(); if !*this.done_first { let n = ready!(this.first.poll_read_vectored(cx, bufs)?); if n == 0 && bufs.iter().any(|b| !b.is_empty()) { *this.done_first = true } else { return Poll::Ready(Ok(n)); } } this.second.poll_read_vectored(cx, bufs) } #[cfg(feature = "read-initializer")] unsafe fn initializer(&self) -> Initializer { let initializer = self.first.initializer(); if initializer.should_initialize() { initializer } else { self.second.initializer() } } } impl<T, U> AsyncBufRead for Chain<T, U> where T: AsyncBufRead, U: AsyncBufRead, { fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> { let this = self.project(); if !*this.done_first { match ready!(this.first.poll_fill_buf(cx)?) { buf if buf.is_empty() => { *this.done_first = true; } buf => return Poll::Ready(Ok(buf)), } } this.second.poll_fill_buf(cx) } fn consume(self: Pin<&mut Self>, amt: usize) { let this = self.project(); if !*this.done_first { this.first.consume(amt) } else { this.second.consume(amt) } } }
26.822785
93
0.542001
1ae8ca1e0058cbb59a006017fc30f9aa4bb75849
54,091
//! A bunch of useful utilities for building networks of nodes and exchanging messages between //! nodes for functional tests. use chain::chaininterface; use chain::transaction::OutPoint; use chain::keysinterface::KeysInterface; use ln::channelmanager::{ChannelManager,RAACommitmentOrder, PaymentPreimage, PaymentHash}; use ln::router::{Route, Router}; use ln::features::InitFeatures; use ln::msgs; use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler}; use util::enforcing_trait_impls::EnforcingChannelKeys; use util::test_utils; use util::test_utils::TestChannelMonitor; use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider}; use util::errors::APIError; use util::logger::Logger; use util::config::UserConfig; use bitcoin::util::hash::BitcoinHash; use bitcoin::blockdata::block::BlockHeader; use bitcoin::blockdata::transaction::{Transaction, TxOut}; use bitcoin::network::constants::Network; use bitcoin_hashes::sha256::Hash as Sha256; use bitcoin_hashes::sha256d::Hash as Sha256d; use bitcoin_hashes::Hash; use secp256k1::Secp256k1; use secp256k1::key::PublicKey; use rand::{thread_rng,Rng}; use std::cell::RefCell; use std::rc::Rc; use std::sync::{Arc, Mutex}; use std::mem; use std::collections::HashSet; pub const CHAN_CONFIRM_DEPTH: u32 = 100; pub fn confirm_transaction<'a, 'b: 'a>(notifier: &'a chaininterface::BlockNotifierRef<'b>, chain: &chaininterface::ChainWatchInterfaceUtil, tx: &Transaction, chan_id: u32) { assert!(chain.does_match_tx(tx)); let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; notifier.block_connected_checked(&header, 1, &[tx; 1], &[chan_id; 1]); for i in 2..CHAN_CONFIRM_DEPTH { header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; notifier.block_connected_checked(&header, i, &vec![], &[0; 0]); } } pub fn connect_blocks<'a, 'b>(notifier: &'a chaininterface::BlockNotifierRef<'b>, depth: u32, height: u32, parent: bool, prev_blockhash: Sha256d) -> Sha256d { let mut header = BlockHeader { version: 0x2000000, prev_blockhash: if parent { prev_blockhash } else { Default::default() }, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; notifier.block_connected_checked(&header, height + 1, &Vec::new(), &Vec::new()); for i in 2..depth + 1 { header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; notifier.block_connected_checked(&header, height + i, &Vec::new(), &Vec::new()); } header.bitcoin_hash() } pub struct NodeCfg { pub chain_monitor: Arc<chaininterface::ChainWatchInterfaceUtil>, pub tx_broadcaster: Arc<test_utils::TestBroadcaster>, pub fee_estimator: Arc<test_utils::TestFeeEstimator>, pub chan_monitor: test_utils::TestChannelMonitor, pub keys_manager: Arc<test_utils::TestKeysInterface>, pub logger: Arc<test_utils::TestLogger>, pub node_seed: [u8; 32], } pub struct Node<'a, 'b: 'a> { pub block_notifier: chaininterface::BlockNotifierRef<'b>, pub chain_monitor: Arc<chaininterface::ChainWatchInterfaceUtil>, pub tx_broadcaster: Arc<test_utils::TestBroadcaster>, pub chan_monitor: &'b test_utils::TestChannelMonitor, pub keys_manager: Arc<test_utils::TestKeysInterface>, pub node: &'a ChannelManager<EnforcingChannelKeys, &'b TestChannelMonitor>, pub router: Router, pub node_seed: [u8; 32], pub network_payment_count: Rc<RefCell<u8>>, pub network_chan_count: Rc<RefCell<u32>>, pub logger: Arc<test_utils::TestLogger> } impl<'a, 'b> Drop for Node<'a, 'b> { fn drop(&mut self) { if !::std::thread::panicking() { // Check that we processed all pending events assert!(self.node.get_and_clear_pending_msg_events().is_empty()); assert!(self.node.get_and_clear_pending_events().is_empty()); assert!(self.chan_monitor.added_monitors.lock().unwrap().is_empty()); } } } pub fn create_chan_between_nodes<'a, 'b, 'c>(node_a: &'a Node<'b, 'c>, node_b: &'a Node<'b, 'c>, a_flags: InitFeatures, b_flags: InitFeatures) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) { create_chan_between_nodes_with_value(node_a, node_b, 100000, 10001, a_flags, b_flags) } pub fn create_chan_between_nodes_with_value<'a, 'b, 'c>(node_a: &'a Node<'b, 'c>, node_b: &'a Node<'b, 'c>, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) { let (funding_locked, channel_id, tx) = create_chan_between_nodes_with_value_a(node_a, node_b, channel_value, push_msat, a_flags, b_flags); let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(node_a, node_b, &funding_locked); (announcement, as_update, bs_update, channel_id, tx) } macro_rules! get_revoke_commit_msgs { ($node: expr, $node_id: expr) => { { let events = $node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); (match events[0] { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { assert_eq!(*node_id, $node_id); (*msg).clone() }, _ => panic!("Unexpected event"), }, match events[1] { MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { assert_eq!(*node_id, $node_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); updates.commitment_signed.clone() }, _ => panic!("Unexpected event"), }) } } } macro_rules! get_event_msg { ($node: expr, $event_type: path, $node_id: expr) => { { let events = $node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { $event_type { ref node_id, ref msg } => { assert_eq!(*node_id, $node_id); (*msg).clone() }, _ => panic!("Unexpected event"), } } } } macro_rules! get_htlc_update_msgs { ($node: expr, $node_id: expr) => { { let events = $node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { assert_eq!(*node_id, $node_id); (*updates).clone() }, _ => panic!("Unexpected event"), } } } } macro_rules! get_feerate { ($node: expr, $channel_id: expr) => { { let chan_lock = $node.node.channel_state.lock().unwrap(); let chan = chan_lock.by_id.get(&$channel_id).unwrap(); chan.get_feerate() } } } pub fn create_funding_transaction<'a, 'b>(node: &Node<'a, 'b>, expected_chan_value: u64, expected_user_chan_id: u64) -> ([u8; 32], Transaction, OutPoint) { let chan_id = *node.network_chan_count.borrow(); let events = node.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, user_channel_id } => { assert_eq!(*channel_value_satoshis, expected_chan_value); assert_eq!(user_channel_id, expected_user_chan_id); let tx = Transaction { version: chan_id as u32, lock_time: 0, input: Vec::new(), output: vec![TxOut { value: *channel_value_satoshis, script_pubkey: output_script.clone(), }]}; let funding_outpoint = OutPoint::new(tx.txid(), 0); (*temporary_channel_id, tx, funding_outpoint) }, _ => panic!("Unexpected event"), } } pub fn create_chan_between_nodes_with_value_init<'a, 'b>(node_a: &Node<'a, 'b>, node_b: &Node<'a, 'b>, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> Transaction { node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42).unwrap(); node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), a_flags, &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id())); node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), b_flags, &get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id())); let (temporary_channel_id, tx, funding_output) = create_funding_transaction(node_a, channel_value, 42); { node_a.node.funding_transaction_generated(&temporary_channel_id, funding_output); let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); assert_eq!(added_monitors[0].0, funding_output); added_monitors.clear(); } node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id())); { let mut added_monitors = node_b.chan_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); assert_eq!(added_monitors[0].0, funding_output); added_monitors.clear(); } node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id())); { let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); assert_eq!(added_monitors[0].0, funding_output); added_monitors.clear(); } let events_4 = node_a.node.get_and_clear_pending_events(); assert_eq!(events_4.len(), 1); match events_4[0] { Event::FundingBroadcastSafe { ref funding_txo, user_channel_id } => { assert_eq!(user_channel_id, 42); assert_eq!(*funding_txo, funding_output); }, _ => panic!("Unexpected event"), }; tx } pub fn create_chan_between_nodes_with_value_confirm_first<'a, 'b, 'c>(node_recv: &'a Node<'a, 'b>, node_conf: &'a Node<'a, 'b>, tx: &Transaction) { confirm_transaction(&node_conf.block_notifier, &node_conf.chain_monitor, &tx, tx.version); node_recv.node.handle_funding_locked(&node_conf.node.get_our_node_id(), &get_event_msg!(node_conf, MessageSendEvent::SendFundingLocked, node_recv.node.get_our_node_id())); } pub fn create_chan_between_nodes_with_value_confirm_second<'a, 'b>(node_recv: &Node<'a, 'b>, node_conf: &Node<'a, 'b>) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) { let channel_id; let events_6 = node_conf.node.get_and_clear_pending_msg_events(); assert_eq!(events_6.len(), 2); ((match events_6[0] { MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => { channel_id = msg.channel_id.clone(); assert_eq!(*node_id, node_recv.node.get_our_node_id()); msg.clone() }, _ => panic!("Unexpected event"), }, match events_6[1] { MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => { assert_eq!(*node_id, node_recv.node.get_our_node_id()); msg.clone() }, _ => panic!("Unexpected event"), }), channel_id) } pub fn create_chan_between_nodes_with_value_confirm<'a, 'b, 'c>(node_a: &'a Node<'b, 'c>, node_b: &'a Node<'b, 'c>, tx: &Transaction) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) { create_chan_between_nodes_with_value_confirm_first(node_a, node_b, tx); confirm_transaction(&node_a.block_notifier, &node_a.chain_monitor, &tx, tx.version); create_chan_between_nodes_with_value_confirm_second(node_b, node_a) } pub fn create_chan_between_nodes_with_value_a<'a, 'b, 'c>(node_a: &'a Node<'b, 'c>, node_b: &'a Node<'b, 'c>, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32], Transaction) { let tx = create_chan_between_nodes_with_value_init(node_a, node_b, channel_value, push_msat, a_flags, b_flags); let (msgs, chan_id) = create_chan_between_nodes_with_value_confirm(node_a, node_b, &tx); (msgs, chan_id, tx) } pub fn create_chan_between_nodes_with_value_b<'a, 'b>(node_a: &Node<'a, 'b>, node_b: &Node<'a, 'b>, as_funding_msgs: &(msgs::FundingLocked, msgs::AnnouncementSignatures)) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate) { node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &as_funding_msgs.0); let bs_announcement_sigs = get_event_msg!(node_b, MessageSendEvent::SendAnnouncementSignatures, node_a.node.get_our_node_id()); node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_funding_msgs.1); let events_7 = node_b.node.get_and_clear_pending_msg_events(); assert_eq!(events_7.len(), 1); let (announcement, bs_update) = match events_7[0] { MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => { (msg, update_msg) }, _ => panic!("Unexpected event"), }; node_a.node.handle_announcement_signatures(&node_b.node.get_our_node_id(), &bs_announcement_sigs); let events_8 = node_a.node.get_and_clear_pending_msg_events(); assert_eq!(events_8.len(), 1); let as_update = match events_8[0] { MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => { assert!(*announcement == *msg); assert_eq!(update_msg.contents.short_channel_id, announcement.contents.short_channel_id); assert_eq!(update_msg.contents.short_channel_id, bs_update.contents.short_channel_id); update_msg }, _ => panic!("Unexpected event"), }; *node_a.network_chan_count.borrow_mut() += 1; ((*announcement).clone(), (*as_update).clone(), (*bs_update).clone()) } pub fn create_announced_chan_between_nodes<'a, 'b, 'c>(nodes: &'a Vec<Node<'b, 'c>>, a: usize, b: usize, a_flags: InitFeatures, b_flags: InitFeatures) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) { create_announced_chan_between_nodes_with_value(nodes, a, b, 100000, 10001, a_flags, b_flags) } pub fn create_announced_chan_between_nodes_with_value<'a, 'b, 'c>(nodes: &'a Vec<Node<'b, 'c>>, a: usize, b: usize, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) { let chan_announcement = create_chan_between_nodes_with_value(&nodes[a], &nodes[b], channel_value, push_msat, a_flags, b_flags); for node in nodes { assert!(node.router.handle_channel_announcement(&chan_announcement.0).unwrap()); node.router.handle_channel_update(&chan_announcement.1).unwrap(); node.router.handle_channel_update(&chan_announcement.2).unwrap(); } (chan_announcement.1, chan_announcement.2, chan_announcement.3, chan_announcement.4) } macro_rules! check_spends { ($tx: expr, $spends_tx: expr) => { { $tx.verify(|out_point| { if out_point.txid == $spends_tx.txid() { $spends_tx.output.get(out_point.vout as usize).cloned() } else { None } }).unwrap(); } } } macro_rules! get_closing_signed_broadcast { ($node: expr, $dest_pubkey: expr) => { { let events = $node.get_and_clear_pending_msg_events(); assert!(events.len() == 1 || events.len() == 2); (match events[events.len() - 1] { MessageSendEvent::BroadcastChannelUpdate { ref msg } => { assert_eq!(msg.contents.flags & 2, 2); msg.clone() }, _ => panic!("Unexpected event"), }, if events.len() == 2 { match events[0] { MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { assert_eq!(*node_id, $dest_pubkey); Some(msg.clone()) }, _ => panic!("Unexpected event"), } } else { None }) } } } macro_rules! check_closed_broadcast { ($node: expr, $with_error_msg: expr) => {{ let events = $node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), if $with_error_msg { 2 } else { 1 }); match events[0] { MessageSendEvent::BroadcastChannelUpdate { ref msg } => { assert_eq!(msg.contents.flags & 2, 2); }, _ => panic!("Unexpected event"), } if $with_error_msg { match events[1] { MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => { // TODO: Check node_id Some(msg.clone()) }, _ => panic!("Unexpected event"), } } else { None } }} } pub fn close_channel<'a, 'b>(outbound_node: &Node<'a, 'b>, inbound_node: &Node<'a, 'b>, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, Transaction) { let (node_a, broadcaster_a, struct_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) } else { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) }; let (node_b, broadcaster_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster) } else { (&inbound_node.node, &inbound_node.tx_broadcaster) }; let (tx_a, tx_b); node_a.close_channel(channel_id).unwrap(); node_b.handle_shutdown(&node_a.get_our_node_id(), &get_event_msg!(struct_a, MessageSendEvent::SendShutdown, node_b.get_our_node_id())); let events_1 = node_b.get_and_clear_pending_msg_events(); assert!(events_1.len() >= 1); let shutdown_b = match events_1[0] { MessageSendEvent::SendShutdown { ref node_id, ref msg } => { assert_eq!(node_id, &node_a.get_our_node_id()); msg.clone() }, _ => panic!("Unexpected event"), }; let closing_signed_b = if !close_inbound_first { assert_eq!(events_1.len(), 1); None } else { Some(match events_1[1] { MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { assert_eq!(node_id, &node_a.get_our_node_id()); msg.clone() }, _ => panic!("Unexpected event"), }) }; node_a.handle_shutdown(&node_b.get_our_node_id(), &shutdown_b); let (as_update, bs_update) = if close_inbound_first { assert!(node_a.get_and_clear_pending_msg_events().is_empty()); node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()); assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1); tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0); let (as_update, closing_signed_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id()); node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a.unwrap()); let (bs_update, none_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id()); assert!(none_b.is_none()); assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1); tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0); (as_update, bs_update) } else { let closing_signed_a = get_event_msg!(struct_a, MessageSendEvent::SendClosingSigned, node_b.get_our_node_id()); node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a); assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1); tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0); let (bs_update, closing_signed_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id()); node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()); let (as_update, none_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id()); assert!(none_a.is_none()); assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1); tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0); (as_update, bs_update) }; assert_eq!(tx_a, tx_b); check_spends!(tx_a, funding_tx); (as_update, bs_update, tx_a) } pub struct SendEvent { pub node_id: PublicKey, pub msgs: Vec<msgs::UpdateAddHTLC>, pub commitment_msg: msgs::CommitmentSigned, } impl SendEvent { pub fn from_commitment_update(node_id: PublicKey, updates: msgs::CommitmentUpdate) -> SendEvent { assert!(updates.update_fulfill_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); SendEvent { node_id: node_id, msgs: updates.update_add_htlcs, commitment_msg: updates.commitment_signed } } pub fn from_event(event: MessageSendEvent) -> SendEvent { match event { MessageSendEvent::UpdateHTLCs { node_id, updates } => SendEvent::from_commitment_update(node_id, updates), _ => panic!("Unexpected event type!"), } } pub fn from_node<'a, 'b>(node: &Node<'a, 'b>) -> SendEvent { let mut events = node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); SendEvent::from_event(events.pop().unwrap()) } } macro_rules! check_added_monitors { ($node: expr, $count: expr) => { { let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), $count); added_monitors.clear(); } } } macro_rules! commitment_signed_dance { ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr, true /* skip last step */) => { { check_added_monitors!($node_a, 0); assert!($node_a.node.get_and_clear_pending_msg_events().is_empty()); $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed); check_added_monitors!($node_a, 1); commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, false); } }; ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */, true /* return last RAA */) => { { let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!($node_a, $node_b.node.get_our_node_id()); check_added_monitors!($node_b, 0); assert!($node_b.node.get_and_clear_pending_msg_events().is_empty()); $node_b.node.handle_revoke_and_ack(&$node_a.node.get_our_node_id(), &as_revoke_and_ack); assert!($node_b.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!($node_b, 1); $node_b.node.handle_commitment_signed(&$node_a.node.get_our_node_id(), &as_commitment_signed); let (bs_revoke_and_ack, extra_msg_option) = { let events = $node_b.node.get_and_clear_pending_msg_events(); assert!(events.len() <= 2); (match events[0] { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { assert_eq!(*node_id, $node_a.node.get_our_node_id()); (*msg).clone() }, _ => panic!("Unexpected event"), }, events.get(1).map(|e| e.clone())) }; check_added_monitors!($node_b, 1); if $fail_backwards { assert!($node_a.node.get_and_clear_pending_events().is_empty()); assert!($node_a.node.get_and_clear_pending_msg_events().is_empty()); } (extra_msg_option, bs_revoke_and_ack) } }; ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr, true /* skip last step */, false /* return extra message */, true /* return last RAA */) => { { check_added_monitors!($node_a, 0); assert!($node_a.node.get_and_clear_pending_msg_events().is_empty()); $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed); check_added_monitors!($node_a, 1); let (extra_msg_option, bs_revoke_and_ack) = commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true, true); assert!(extra_msg_option.is_none()); bs_revoke_and_ack } }; ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */) => { { let (extra_msg_option, bs_revoke_and_ack) = commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true, true); $node_a.node.handle_revoke_and_ack(&$node_b.node.get_our_node_id(), &bs_revoke_and_ack); check_added_monitors!($node_a, 1); extra_msg_option } }; ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, false /* no extra message */) => { { assert!(commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true).is_none()); } }; ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr) => { { commitment_signed_dance!($node_a, $node_b, $commitment_signed, $fail_backwards, true); if $fail_backwards { expect_pending_htlcs_forwardable!($node_a); check_added_monitors!($node_a, 1); let channel_state = $node_a.node.channel_state.lock().unwrap(); assert_eq!(channel_state.pending_msg_events.len(), 1); if let MessageSendEvent::UpdateHTLCs { ref node_id, .. } = channel_state.pending_msg_events[0] { assert_ne!(*node_id, $node_b.node.get_our_node_id()); } else { panic!("Unexpected event"); } } else { assert!($node_a.node.get_and_clear_pending_msg_events().is_empty()); } } } } macro_rules! get_payment_preimage_hash { ($node: expr) => { { let payment_preimage = PaymentPreimage([*$node.network_payment_count.borrow(); 32]); *$node.network_payment_count.borrow_mut() += 1; let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()); (payment_preimage, payment_hash) } } } macro_rules! expect_pending_htlcs_forwardable { ($node: expr) => {{ let events = $node.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { Event::PendingHTLCsForwardable { .. } => { }, _ => panic!("Unexpected event"), }; $node.node.process_pending_htlc_forwards(); }} } macro_rules! expect_payment_received { ($node: expr, $expected_payment_hash: expr, $expected_recv_value: expr) => { let events = $node.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { Event::PaymentReceived { ref payment_hash, amt } => { assert_eq!($expected_payment_hash, *payment_hash); assert_eq!($expected_recv_value, amt); }, _ => panic!("Unexpected event"), } } } macro_rules! expect_payment_sent { ($node: expr, $expected_payment_preimage: expr) => { let events = $node.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { Event::PaymentSent { ref payment_preimage } => { assert_eq!($expected_payment_preimage, *payment_preimage); }, _ => panic!("Unexpected event"), } } } pub fn send_along_route_with_hash<'a, 'b>(origin_node: &Node<'a, 'b>, route: Route, expected_route: &[&Node<'a, 'b>], recv_value: u64, our_payment_hash: PaymentHash) { let mut payment_event = { origin_node.node.send_payment(route, our_payment_hash).unwrap(); check_added_monitors!(origin_node, 1); let mut events = origin_node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; let mut prev_node = origin_node; for (idx, &node) in expected_route.iter().enumerate() { assert_eq!(node.node.get_our_node_id(), payment_event.node_id); node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(node, 0); commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(node); if idx == expected_route.len() - 1 { let events_2 = node.node.get_and_clear_pending_events(); assert_eq!(events_2.len(), 1); match events_2[0] { Event::PaymentReceived { ref payment_hash, amt } => { assert_eq!(our_payment_hash, *payment_hash); assert_eq!(amt, recv_value); }, _ => panic!("Unexpected event"), } } else { let mut events_2 = node.node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); check_added_monitors!(node, 1); payment_event = SendEvent::from_event(events_2.remove(0)); assert_eq!(payment_event.msgs.len(), 1); } prev_node = node; } } pub fn send_along_route<'a, 'b>(origin_node: &Node<'a, 'b>, route: Route, expected_route: &[&Node<'a, 'b>], recv_value: u64) -> (PaymentPreimage, PaymentHash) { let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(origin_node); send_along_route_with_hash(origin_node, route, expected_route, recv_value, our_payment_hash); (our_payment_preimage, our_payment_hash) } pub fn claim_payment_along_route<'a, 'b>(origin_node: &Node<'a, 'b>, expected_route: &[&Node<'a, 'b>], skip_last: bool, our_payment_preimage: PaymentPreimage, expected_amount: u64) { assert!(expected_route.last().unwrap().node.claim_funds(our_payment_preimage, expected_amount)); check_added_monitors!(expected_route.last().unwrap(), 1); let mut next_msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)> = None; let mut expected_next_node = expected_route.last().unwrap().node.get_our_node_id(); macro_rules! get_next_msgs { ($node: expr) => { { let events = $node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { assert!(update_add_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); assert!(update_fail_htlcs.is_empty()); assert!(update_fail_malformed_htlcs.is_empty()); assert!(update_fee.is_none()); expected_next_node = node_id.clone(); Some((update_fulfill_htlcs[0].clone(), commitment_signed.clone())) }, _ => panic!("Unexpected event"), } } } } macro_rules! last_update_fulfill_dance { ($node: expr, $prev_node: expr) => { { $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0); check_added_monitors!($node, 0); assert!($node.node.get_and_clear_pending_msg_events().is_empty()); commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, false); } } } macro_rules! mid_update_fulfill_dance { ($node: expr, $prev_node: expr, $new_msgs: expr) => { { $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0); check_added_monitors!($node, 1); let new_next_msgs = if $new_msgs { get_next_msgs!($node) } else { assert!($node.node.get_and_clear_pending_msg_events().is_empty()); None }; commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, false); next_msgs = new_next_msgs; } } } let mut prev_node = expected_route.last().unwrap(); for (idx, node) in expected_route.iter().rev().enumerate() { assert_eq!(expected_next_node, node.node.get_our_node_id()); let update_next_msgs = !skip_last || idx != expected_route.len() - 1; if next_msgs.is_some() { mid_update_fulfill_dance!(node, prev_node, update_next_msgs); } else if update_next_msgs { next_msgs = get_next_msgs!(node); } else { assert!(node.node.get_and_clear_pending_msg_events().is_empty()); } if !skip_last && idx == expected_route.len() - 1 { assert_eq!(expected_next_node, origin_node.node.get_our_node_id()); } prev_node = node; } if !skip_last { last_update_fulfill_dance!(origin_node, expected_route.first().unwrap()); expect_payment_sent!(origin_node, our_payment_preimage); } } pub fn claim_payment<'a, 'b>(origin_node: &Node<'a, 'b>, expected_route: &[&Node<'a, 'b>], our_payment_preimage: PaymentPreimage, expected_amount: u64) { claim_payment_along_route(origin_node, expected_route, false, our_payment_preimage, expected_amount); } pub const TEST_FINAL_CLTV: u32 = 32; pub fn route_payment<'a, 'b>(origin_node: &Node<'a, 'b>, expected_route: &[&Node<'a, 'b>], recv_value: u64) -> (PaymentPreimage, PaymentHash) { let route = origin_node.router.get_route(&expected_route.last().unwrap().node.get_our_node_id(), None, &Vec::new(), recv_value, TEST_FINAL_CLTV).unwrap(); assert_eq!(route.hops.len(), expected_route.len()); for (node, hop) in expected_route.iter().zip(route.hops.iter()) { assert_eq!(hop.pubkey, node.node.get_our_node_id()); } send_along_route(origin_node, route, expected_route, recv_value) } pub fn route_over_limit<'a, 'b>(origin_node: &Node<'a, 'b>, expected_route: &[&Node<'a, 'b>], recv_value: u64) { let route = origin_node.router.get_route(&expected_route.last().unwrap().node.get_our_node_id(), None, &Vec::new(), recv_value, TEST_FINAL_CLTV).unwrap(); assert_eq!(route.hops.len(), expected_route.len()); for (node, hop) in expected_route.iter().zip(route.hops.iter()) { assert_eq!(hop.pubkey, node.node.get_our_node_id()); } let (_, our_payment_hash) = get_payment_preimage_hash!(origin_node); let err = origin_node.node.send_payment(route, our_payment_hash).err().unwrap(); match err { APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight our peer will accept"), _ => panic!("Unknown error variants"), }; } pub fn send_payment<'a, 'b>(origin: &Node<'a, 'b>, expected_route: &[&Node<'a, 'b>], recv_value: u64, expected_value: u64) { let our_payment_preimage = route_payment(&origin, expected_route, recv_value).0; claim_payment(&origin, expected_route, our_payment_preimage, expected_value); } pub fn fail_payment_along_route<'a, 'b>(origin_node: &Node<'a, 'b>, expected_route: &[&Node<'a, 'b>], skip_last: bool, our_payment_hash: PaymentHash) { assert!(expected_route.last().unwrap().node.fail_htlc_backwards(&our_payment_hash)); expect_pending_htlcs_forwardable!(expected_route.last().unwrap()); check_added_monitors!(expected_route.last().unwrap(), 1); let mut next_msgs: Option<(msgs::UpdateFailHTLC, msgs::CommitmentSigned)> = None; macro_rules! update_fail_dance { ($node: expr, $prev_node: expr, $last_node: expr) => { { $node.node.handle_update_fail_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0); commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, !$last_node); if skip_last && $last_node { expect_pending_htlcs_forwardable!($node); } } } } let mut expected_next_node = expected_route.last().unwrap().node.get_our_node_id(); let mut prev_node = expected_route.last().unwrap(); for (idx, node) in expected_route.iter().rev().enumerate() { assert_eq!(expected_next_node, node.node.get_our_node_id()); if next_msgs.is_some() { // We may be the "last node" for the purpose of the commitment dance if we're // skipping the last node (implying it is disconnected) and we're the // second-to-last node! update_fail_dance!(node, prev_node, skip_last && idx == expected_route.len() - 1); } let events = node.node.get_and_clear_pending_msg_events(); if !skip_last || idx != expected_route.len() - 1 { assert_eq!(events.len(), 1); match events[0] { MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { assert!(update_add_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert_eq!(update_fail_htlcs.len(), 1); assert!(update_fail_malformed_htlcs.is_empty()); assert!(update_fee.is_none()); expected_next_node = node_id.clone(); next_msgs = Some((update_fail_htlcs[0].clone(), commitment_signed.clone())); }, _ => panic!("Unexpected event"), } } else { assert!(events.is_empty()); } if !skip_last && idx == expected_route.len() - 1 { assert_eq!(expected_next_node, origin_node.node.get_our_node_id()); } prev_node = node; } if !skip_last { update_fail_dance!(origin_node, expected_route.first().unwrap(), true); let events = origin_node.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { Event::PaymentFailed { payment_hash, rejected_by_dest, .. } => { assert_eq!(payment_hash, our_payment_hash); assert!(rejected_by_dest); }, _ => panic!("Unexpected event"), } } } pub fn fail_payment<'a, 'b>(origin_node: &Node<'a, 'b>, expected_route: &[&Node<'a, 'b>], our_payment_hash: PaymentHash) { fail_payment_along_route(origin_node, expected_route, false, our_payment_hash); } pub fn create_node_cfgs(node_count: usize) -> Vec<NodeCfg> { let mut nodes = Vec::new(); let mut rng = thread_rng(); for i in 0..node_count { let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i))); let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }); let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet, logger.clone() as Arc<Logger>)); let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), broadcasted_txn: Mutex::new(HashSet::new())}); let mut seed = [0; 32]; rng.fill_bytes(&mut seed); let keys_manager = Arc::new(test_utils::TestKeysInterface::new(&seed, Network::Testnet, logger.clone() as Arc<Logger>)); let chan_monitor = test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone()); nodes.push(NodeCfg { chain_monitor, logger, tx_broadcaster, fee_estimator, chan_monitor, keys_manager, node_seed: seed }); } nodes } pub fn create_node_chanmgrs<'a, 'b>(node_count: usize, cfgs: &'a Vec<NodeCfg>, node_config: &[Option<UserConfig>]) -> Vec<ChannelManager<EnforcingChannelKeys, &'a TestChannelMonitor>> { let mut chanmgrs = Vec::new(); for i in 0..node_count { let mut default_config = UserConfig::default(); default_config.channel_options.announced_channel = true; default_config.peer_channel_config_limits.force_announced_channel_preference = false; let node = ChannelManager::new(Network::Testnet, cfgs[i].fee_estimator.clone(), &cfgs[i].chan_monitor, cfgs[i].tx_broadcaster.clone(), cfgs[i].logger.clone(), cfgs[i].keys_manager.clone(), if node_config[i].is_some() { node_config[i].clone().unwrap() } else { default_config }, 0).unwrap(); chanmgrs.push(node); } chanmgrs } pub fn create_network<'a, 'b>(node_count: usize, cfgs: &'a Vec<NodeCfg>, chan_mgrs: &'b Vec<ChannelManager<EnforcingChannelKeys, &'a TestChannelMonitor>>) -> Vec<Node<'a, 'b>> { let secp_ctx = Secp256k1::new(); let mut nodes = Vec::new(); let chan_count = Rc::new(RefCell::new(0)); let payment_count = Rc::new(RefCell::new(0)); for i in 0..node_count { let block_notifier = chaininterface::BlockNotifier::new(cfgs[i].chain_monitor.clone()); block_notifier.register_listener(&cfgs[i].chan_monitor.simple_monitor as &chaininterface::ChainListener); block_notifier.register_listener(&chan_mgrs[i] as &chaininterface::ChainListener); let router = Router::new(PublicKey::from_secret_key(&secp_ctx, &cfgs[i].keys_manager.get_node_secret()), cfgs[i].chain_monitor.clone(), cfgs[i].logger.clone() as Arc<Logger>); nodes.push(Node{ chain_monitor: cfgs[i].chain_monitor.clone(), block_notifier, tx_broadcaster: cfgs[i].tx_broadcaster.clone(), chan_monitor: &cfgs[i].chan_monitor, keys_manager: cfgs[i].keys_manager.clone(), node: &chan_mgrs[i], router, node_seed: cfgs[i].node_seed, network_chan_count: chan_count.clone(), network_payment_count: payment_count.clone(), logger: cfgs[i].logger.clone(), }) } nodes } pub const ACCEPTED_HTLC_SCRIPT_WEIGHT: usize = 138; //Here we have a diff due to HTLC CLTV expiry being < 2^15 in test pub const OFFERED_HTLC_SCRIPT_WEIGHT: usize = 133; #[derive(PartialEq)] pub enum HTLCType { NONE, TIMEOUT, SUCCESS } /// Tests that the given node has broadcast transactions for the given Channel /// /// First checks that the latest local commitment tx has been broadcast, unless an explicit /// commitment_tx is provided, which may be used to test that a remote commitment tx was /// broadcast and the revoked outputs were claimed. /// /// Next tests that there is (or is not) a transaction that spends the commitment transaction /// that appears to be the type of HTLC transaction specified in has_htlc_tx. /// /// All broadcast transactions must be accounted for in one of the above three types of we'll /// also fail. pub fn test_txn_broadcast<'a, 'b>(node: &Node<'a, 'b>, chan: &(msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction), commitment_tx: Option<Transaction>, has_htlc_tx: HTLCType) -> Vec<Transaction> { let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap(); assert!(node_txn.len() >= if commitment_tx.is_some() { 0 } else { 1 } + if has_htlc_tx == HTLCType::NONE { 0 } else { 1 }); let mut res = Vec::with_capacity(2); node_txn.retain(|tx| { if tx.input.len() == 1 && tx.input[0].previous_output.txid == chan.3.txid() { check_spends!(tx, chan.3.clone()); if commitment_tx.is_none() { res.push(tx.clone()); } false } else { true } }); if let Some(explicit_tx) = commitment_tx { res.push(explicit_tx.clone()); } assert_eq!(res.len(), 1); if has_htlc_tx != HTLCType::NONE { node_txn.retain(|tx| { if tx.input.len() == 1 && tx.input[0].previous_output.txid == res[0].txid() { check_spends!(tx, res[0].clone()); if has_htlc_tx == HTLCType::TIMEOUT { assert!(tx.lock_time != 0); } else { assert!(tx.lock_time == 0); } res.push(tx.clone()); false } else { true } }); assert!(res.len() == 2 || res.len() == 3); if res.len() == 3 { assert_eq!(res[1], res[2]); } } assert!(node_txn.is_empty()); res } /// Tests that the given node has broadcast a claim transaction against the provided revoked /// HTLC transaction. pub fn test_revoked_htlc_claim_txn_broadcast<'a, 'b>(node: &Node<'a, 'b>, revoked_tx: Transaction, commitment_revoked_tx: Transaction) { let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap(); // We should issue a 2nd transaction if one htlc is dropped from initial claiming tx // but sometimes not as feerate is too-low if node_txn.len() != 1 && node_txn.len() != 2 { assert!(false); } node_txn.retain(|tx| { if tx.input.len() == 1 && tx.input[0].previous_output.txid == revoked_tx.txid() { check_spends!(tx, revoked_tx); false } else { true } }); node_txn.retain(|tx| { check_spends!(tx, commitment_revoked_tx); false }); assert!(node_txn.is_empty()); } pub fn check_preimage_claim<'a, 'b>(node: &Node<'a, 'b>, prev_txn: &Vec<Transaction>) -> Vec<Transaction> { let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap(); assert!(node_txn.len() >= 1); assert_eq!(node_txn[0].input.len(), 1); let mut found_prev = false; for tx in prev_txn { if node_txn[0].input[0].previous_output.txid == tx.txid() { check_spends!(node_txn[0], tx.clone()); assert!(node_txn[0].input[0].witness[2].len() > 106); // must spend an htlc output assert_eq!(tx.input.len(), 1); // must spend a commitment tx found_prev = true; break; } } assert!(found_prev); let mut res = Vec::new(); mem::swap(&mut *node_txn, &mut res); res } pub fn get_announce_close_broadcast_events<'a, 'b>(nodes: &Vec<Node<'a, 'b>>, a: usize, b: usize) { let events_1 = nodes[a].node.get_and_clear_pending_msg_events(); assert_eq!(events_1.len(), 1); let as_update = match events_1[0] { MessageSendEvent::BroadcastChannelUpdate { ref msg } => { msg.clone() }, _ => panic!("Unexpected event"), }; let events_2 = nodes[b].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); let bs_update = match events_2[0] { MessageSendEvent::BroadcastChannelUpdate { ref msg } => { msg.clone() }, _ => panic!("Unexpected event"), }; for node in nodes { node.router.handle_channel_update(&as_update).unwrap(); node.router.handle_channel_update(&bs_update).unwrap(); } } macro_rules! get_channel_value_stat { ($node: expr, $channel_id: expr) => {{ let chan_lock = $node.node.channel_state.lock().unwrap(); let chan = chan_lock.by_id.get(&$channel_id).unwrap(); chan.get_value_stat() }} } macro_rules! get_chan_reestablish_msgs { ($src_node: expr, $dst_node: expr) => { { let mut res = Vec::with_capacity(1); for msg in $src_node.node.get_and_clear_pending_msg_events() { if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg { assert_eq!(*node_id, $dst_node.node.get_our_node_id()); res.push(msg.clone()); } else { panic!("Unexpected event") } } res } } } macro_rules! handle_chan_reestablish_msgs { ($src_node: expr, $dst_node: expr) => { { let msg_events = $src_node.node.get_and_clear_pending_msg_events(); let mut idx = 0; let funding_locked = if let Some(&MessageSendEvent::SendFundingLocked { ref node_id, ref msg }) = msg_events.get(0) { idx += 1; assert_eq!(*node_id, $dst_node.node.get_our_node_id()); Some(msg.clone()) } else { None }; let mut revoke_and_ack = None; let mut commitment_update = None; let order = if let Some(ev) = msg_events.get(idx) { idx += 1; match ev { &MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { assert_eq!(*node_id, $dst_node.node.get_our_node_id()); revoke_and_ack = Some(msg.clone()); RAACommitmentOrder::RevokeAndACKFirst }, &MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { assert_eq!(*node_id, $dst_node.node.get_our_node_id()); commitment_update = Some(updates.clone()); RAACommitmentOrder::CommitmentFirst }, _ => panic!("Unexpected event"), } } else { RAACommitmentOrder::CommitmentFirst }; if let Some(ev) = msg_events.get(idx) { match ev { &MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { assert_eq!(*node_id, $dst_node.node.get_our_node_id()); assert!(revoke_and_ack.is_none()); revoke_and_ack = Some(msg.clone()); }, &MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { assert_eq!(*node_id, $dst_node.node.get_our_node_id()); assert!(commitment_update.is_none()); commitment_update = Some(updates.clone()); }, _ => panic!("Unexpected event"), } } (funding_locked, revoke_and_ack, commitment_update, order) } } } /// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas /// for claims/fails they are separated out. pub fn reconnect_nodes<'a, 'b>(node_a: &Node<'a, 'b>, node_b: &Node<'a, 'b>, send_funding_locked: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool)) { node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b); node_b.node.peer_connected(&node_a.node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); let reestablish_2 = get_chan_reestablish_msgs!(node_b, node_a); if send_funding_locked.0 { // If a expects a funding_locked, it better not think it has received a revoke_and_ack // from b for reestablish in reestablish_1.iter() { assert_eq!(reestablish.next_remote_commitment_number, 0); } } if send_funding_locked.1 { // If b expects a funding_locked, it better not think it has received a revoke_and_ack // from a for reestablish in reestablish_2.iter() { assert_eq!(reestablish.next_remote_commitment_number, 0); } } if send_funding_locked.0 || send_funding_locked.1 { // If we expect any funding_locked's, both sides better have set // next_local_commitment_number to 1 for reestablish in reestablish_1.iter() { assert_eq!(reestablish.next_local_commitment_number, 1); } for reestablish in reestablish_2.iter() { assert_eq!(reestablish.next_local_commitment_number, 1); } } let mut resp_1 = Vec::new(); for msg in reestablish_1 { node_b.node.handle_channel_reestablish(&node_a.node.get_our_node_id(), &msg); resp_1.push(handle_chan_reestablish_msgs!(node_b, node_a)); } if pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 { check_added_monitors!(node_b, 1); } else { check_added_monitors!(node_b, 0); } let mut resp_2 = Vec::new(); for msg in reestablish_2 { node_a.node.handle_channel_reestablish(&node_b.node.get_our_node_id(), &msg); resp_2.push(handle_chan_reestablish_msgs!(node_a, node_b)); } if pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 { check_added_monitors!(node_a, 1); } else { check_added_monitors!(node_a, 0); } // We don't yet support both needing updates, as that would require a different commitment dance: assert!((pending_htlc_adds.0 == 0 && pending_htlc_claims.0 == 0 && pending_cell_htlc_claims.0 == 0 && pending_cell_htlc_fails.0 == 0) || (pending_htlc_adds.1 == 0 && pending_htlc_claims.1 == 0 && pending_cell_htlc_claims.1 == 0 && pending_cell_htlc_fails.1 == 0)); for chan_msgs in resp_1.drain(..) { if send_funding_locked.0 { node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &chan_msgs.0.unwrap()); let announcement_event = node_a.node.get_and_clear_pending_msg_events(); if !announcement_event.is_empty() { assert_eq!(announcement_event.len(), 1); if let MessageSendEvent::SendAnnouncementSignatures { .. } = announcement_event[0] { //TODO: Test announcement_sigs re-sending } else { panic!("Unexpected event!"); } } } else { assert!(chan_msgs.0.is_none()); } if pending_raa.0 { assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst); node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &chan_msgs.1.unwrap()); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(node_a, 1); } else { assert!(chan_msgs.1.is_none()); } if pending_htlc_adds.0 != 0 || pending_htlc_claims.0 != 0 || pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 { let commitment_update = chan_msgs.2.unwrap(); if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.0 as usize); } else { assert!(commitment_update.update_add_htlcs.is_empty()); } assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0); assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0); assert!(commitment_update.update_fail_malformed_htlcs.is_empty()); for update_add in commitment_update.update_add_htlcs { node_a.node.handle_update_add_htlc(&node_b.node.get_our_node_id(), &update_add); } for update_fulfill in commitment_update.update_fulfill_htlcs { node_a.node.handle_update_fulfill_htlc(&node_b.node.get_our_node_id(), &update_fulfill); } for update_fail in commitment_update.update_fail_htlcs { node_a.node.handle_update_fail_htlc(&node_b.node.get_our_node_id(), &update_fail); } if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed commitment_signed_dance!(node_a, node_b, commitment_update.commitment_signed, false); } else { node_a.node.handle_commitment_signed(&node_b.node.get_our_node_id(), &commitment_update.commitment_signed); check_added_monitors!(node_a, 1); let as_revoke_and_ack = get_event_msg!(node_a, MessageSendEvent::SendRevokeAndACK, node_b.node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &as_revoke_and_ack); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(node_b, 1); } } else { assert!(chan_msgs.2.is_none()); } } for chan_msgs in resp_2.drain(..) { if send_funding_locked.1 { node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &chan_msgs.0.unwrap()); let announcement_event = node_b.node.get_and_clear_pending_msg_events(); if !announcement_event.is_empty() { assert_eq!(announcement_event.len(), 1); if let MessageSendEvent::SendAnnouncementSignatures { .. } = announcement_event[0] { //TODO: Test announcement_sigs re-sending } else { panic!("Unexpected event!"); } } } else { assert!(chan_msgs.0.is_none()); } if pending_raa.1 { assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst); node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &chan_msgs.1.unwrap()); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(node_b, 1); } else { assert!(chan_msgs.1.is_none()); } if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 { let commitment_update = chan_msgs.2.unwrap(); if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.1 as usize); } assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0); assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0); assert!(commitment_update.update_fail_malformed_htlcs.is_empty()); for update_add in commitment_update.update_add_htlcs { node_b.node.handle_update_add_htlc(&node_a.node.get_our_node_id(), &update_add); } for update_fulfill in commitment_update.update_fulfill_htlcs { node_b.node.handle_update_fulfill_htlc(&node_a.node.get_our_node_id(), &update_fulfill); } for update_fail in commitment_update.update_fail_htlcs { node_b.node.handle_update_fail_htlc(&node_a.node.get_our_node_id(), &update_fail); } if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed commitment_signed_dance!(node_b, node_a, commitment_update.commitment_signed, false); } else { node_b.node.handle_commitment_signed(&node_a.node.get_our_node_id(), &commitment_update.commitment_signed); check_added_monitors!(node_b, 1); let bs_revoke_and_ack = get_event_msg!(node_b, MessageSendEvent::SendRevokeAndACK, node_a.node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &bs_revoke_and_ack); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(node_a, 1); } } else { assert!(chan_msgs.2.is_none()); } } }
42.061431
292
0.71657
ac6c4e031211f9cc98e31cd236f2f85e65eb22ba
2,755
use ContextError; use CreationError; use CreationError::OsError; use GlAttributes; use GlContext; use PixelFormatRequirements; use core_foundation::base::TCFType; use core_foundation::string::CFString; use core_foundation::bundle::{CFBundleGetBundleWithIdentifier, CFBundleGetFunctionPointerForName}; use cocoa::base::{id, nil}; use cocoa::appkit::*; use PixelFormat; use api::cocoa::helpers; #[derive(Default)] pub struct PlatformSpecificHeadlessBuilderAttributes; pub struct HeadlessContext { width: u32, height: u32, context: id, } impl HeadlessContext { pub fn new((width, height): (u32, u32), pf_reqs: &PixelFormatRequirements, opengl: &GlAttributes<&HeadlessContext>, _: &PlatformSpecificHeadlessBuilderAttributes) -> Result<HeadlessContext, CreationError> { let context = unsafe { let attributes = try!(helpers::build_nsattributes(pf_reqs, opengl)); let pixelformat = NSOpenGLPixelFormat::alloc(nil).initWithAttributes_(&attributes); if pixelformat == nil { return Err(OsError(format!("Could not create the pixel format"))); } let context = NSOpenGLContext::alloc(nil).initWithFormat_shareContext_(pixelformat, nil); if context == nil { return Err(OsError(format!("Could not create the rendering context"))); } context }; let headless = HeadlessContext { width: width, height: height, context: context, }; Ok(headless) } } impl GlContext for HeadlessContext { unsafe fn make_current(&self) -> Result<(), ContextError> { self.context.makeCurrentContext(); Ok(()) } #[inline] fn is_current(&self) -> bool { unimplemented!() } #[inline] fn get_proc_address(&self, _addr: &str) -> *const () { let symbol_name: CFString = _addr.parse().unwrap(); let framework_name: CFString = "com.apple.opengl".parse().unwrap(); let framework = unsafe { CFBundleGetBundleWithIdentifier(framework_name.as_concrete_TypeRef()) }; let symbol = unsafe { CFBundleGetFunctionPointerForName(framework, symbol_name.as_concrete_TypeRef()) }; symbol as *const () } #[inline] fn swap_buffers(&self) -> Result<(), ContextError> { unsafe { self.context.flushBuffer(); } Ok(()) } #[inline] fn get_api(&self) -> ::Api { ::Api::OpenGl } #[inline] fn get_pixel_format(&self) -> PixelFormat { unimplemented!(); } } unsafe impl Send for HeadlessContext {} unsafe impl Sync for HeadlessContext {}
27.828283
101
0.622505
dd696ef6405dedec5bd1327b18485d2951f9ad91
5,468
use swc_common::{util::take::Take, DUMMY_SP}; use swc_ecma_ast::*; use swc_ecma_utils::{contains_arguments, contains_this_expr}; use super::Pure; use crate::compress::util::contains_super; /// Methods related to the option `arrows`. impl Pure<'_> { pub(super) fn unsafe_optimize_fn_as_arrow(&mut self, e: &mut Expr) { if self.options.ecma < EsVersion::Es2015 { return; } if !self.options.unsafe_arrows { return; } if let Expr::Fn(FnExpr { ident: None, function, }) = e { if contains_this_expr(&function.body) { return; } self.changed = true; report_change!("unsafe_arrows: Fn expr => arrow"); *e = Expr::Arrow(ArrowExpr { span: function.span, params: function.params.take().into_iter().map(|p| p.pat).collect(), body: BlockStmtOrExpr::BlockStmt(function.body.take().unwrap()), is_async: function.is_async, is_generator: function.is_generator, type_params: Default::default(), return_type: Default::default(), }); } } pub(super) fn optimize_arrow_body(&mut self, b: &mut BlockStmtOrExpr) { if !self.options.arrows { return; } match b { BlockStmtOrExpr::BlockStmt(s) => { if s.stmts.len() == 1 { if let Stmt::Return(s) = &mut s.stmts[0] { if let Some(arg) = &mut s.arg { report_change!("arrows: Optimizing the body of an arrow"); *b = BlockStmtOrExpr::Expr(arg.take()); } } } } BlockStmtOrExpr::Expr(_) => {} } } pub(super) fn optimize_arrow_method_prop(&mut self, p: &mut Prop) { if self.options.ecma < EsVersion::Es2015 { return; } if !self.options.unsafe_methods && !self.options.arrows { return; } if let Prop::Method(m) = p { if m.function.is_generator || contains_arguments(&m.function.body) || contains_super(&m.function.body) { return; } let m_span = m.function.span; if let Some(body) = &mut m.function.body { if body.stmts.len() == 1 && matches!( body.stmts[0], Stmt::Return(ReturnStmt { arg: Some(..), .. }) ) { if contains_this_expr(body) { return; } self.changed = true; report_change!("Method property => arrow"); let arg = body .take() .stmts .remove(0) .expect_return_stmt() .arg .take() .unwrap(); *p = Prop::KeyValue(KeyValueProp { key: m.key.take(), value: Box::new(Expr::Arrow(ArrowExpr { span: m_span, params: m .function .params .take() .into_iter() .map(|v| v.pat) .collect(), body: BlockStmtOrExpr::Expr(arg), is_async: m.function.is_async, is_generator: m.function.is_generator, type_params: Default::default(), return_type: Default::default(), })), }); return; } } } if let Prop::KeyValue(kv) = p { // if contains_this_expr(&kv.value) { return; } if let Expr::Arrow( m @ ArrowExpr { body: BlockStmtOrExpr::BlockStmt(..), .. }, ) = &mut *kv.value { *p = Prop::Method(MethodProp { key: kv.key.take(), function: Function { params: m .params .take() .into_iter() .map(|pat| Param { span: DUMMY_SP, decorators: Default::default(), pat, }) .collect(), decorators: Default::default(), span: m.span, body: m.body.take().block_stmt(), is_generator: m.is_generator, is_async: m.is_async, type_params: Default::default(), return_type: Default::default(), }, }); } } } }
32.939759
86
0.380212
486a1927747f5e9e0811e9a053003fd52f0ee965
9,938
//! For very small Persistent maps; this is a persistent map implemented upon //! persistent lists, which already can share structure. In this case, each //! element of the list is a key-value pair (so this is very similar to implementing //! a persistent associative structure in Elisp with alists) //! //! (MapEntry :a 1) //! / \ //! / \ //!(MapEntry :b 2) (MapEntry :b 3) //! / \ //! a b //! ------------------- //! a => {:a 1 :b 2} //! b => {:a 1 :b 3} use crate::maps::MapEntry; use crate::value::Value; use crate::traits; use std::collections::HashMap; use std::convert::From; use std::fmt; use std::iter::FromIterator; use std::rc::Rc; #[derive(Debug, Clone, PartialEq, Hash)] pub enum PersistentListMap { Map(Rc<PersistentListMap>, MapEntry), Empty, } impl Eq for PersistentListMap {} /// map_entry!("doc", "this is a docstring"); #[macro_export] macro_rules! map_entry { ($key:expr, $value:expr) => {{ MapEntry { key: Keyword::intern($key).to_rc_value(), val: $value.to_rc_value(), } }}; } /// persistent_list_map!(map_entry!("key1", "value1"), map_entry!("key2", "value2")); #[macro_export] macro_rules! persistent_list_map { ($($kv:expr),*) => { { let mut temp_vec = Vec::new(); $( temp_vec.push($kv); )* temp_vec.into_iter().collect::<PersistentListMap>() } }; {$($key:expr => $val:expr),*} => { { let mut temp_vec = Vec::new(); $( temp_vec.push(map_entry!($key,$val)); )* temp_vec.into_iter().collect::<PersistentListMap>() } }; } /// Just like conj in Clojure, conj allows you to conjoin a new mapentry onto a map /// although currently, that is all it allows /// conj!(base_meta(name, ns), map_entry!("key1", "value1"), map_entry!("key2", "value2")); #[macro_export] macro_rules! conj { ( $plistmap:expr, $($kv:expr), *) => { { let mut temp_plistmap_as_vec = $plistmap.clone().iter().collect::<Vec<MapEntry>>(); $( temp_plistmap_as_vec.push($kv); )* temp_plistmap_as_vec.into_iter().collect::<PersistentListMap>() } }; } /// merge!(persistent_list_map!{"a" => 1 "b" => 2} /// persistent_list_map!{"b" => 3} /// persistent_list_map!{"d" => 5 "e" => 7} /// ), ; #[macro_export] macro_rules! merge { ( $plistmap:expr, $($map:expr), *) => { { let mut temp_plistmap_as_vec = $plistmap.clone().iter().collect::<Vec<MapEntry>>(); $( let next_map_as_vec = $map.clone().iter().collect::<Vec<MapEntry>>(); temp_plistmap_as_vec.extend_from_slice(&next_map_as_vec); )* temp_plistmap_as_vec.into_iter().collect::<PersistentListMap>() } }; } // @TODO put note on IBlah traits in doc /// A PersistentListMap. pub trait IPersistentMap { fn get(&self, key: &Rc<Value>) -> Rc<Value>; fn assoc(&self, key: Rc<Value>, value: Rc<Value>) -> Self; fn contains_key(&self,key: &Rc<Value>) -> bool; } impl IPersistentMap for PersistentListMap { // @TODO make fn of ILookup fn get(&self, key: &Rc<Value>) -> Rc<Value> { match self { PersistentListMap::Map(parent, entry) => { if entry.key == *key { return Rc::clone(&entry.val); } parent.get(key) } PersistentListMap::Empty => Rc::new(Value::Nil), } } fn assoc(&self, key: Rc<Value>, val: Rc<Value>) -> PersistentListMap { PersistentListMap::Map(Rc::new(self.clone()), MapEntry { key, val }) } fn contains_key(&self,key: &Rc<Value>) -> bool { match self { PersistentListMap::Map(parent, entry) => { if entry.key == *key { return true; } parent.contains_key(key) }, PersistentListMap::Empty => false } } } impl IPersistentMap for Rc<PersistentListMap> { // @TODO make fn of ILookup fn get(&self, key: &Rc<Value>) -> Rc<Value> { match &**self { PersistentListMap::Map(parent, entry) => { if entry.key == *key { return Rc::clone(&entry.val); } parent.get(key) } PersistentListMap::Empty => Rc::new(Value::Nil), } } fn assoc(&self, key: Rc<Value>, val: Rc<Value>) -> Rc<PersistentListMap> { Rc::new(PersistentListMap::Map( Rc::clone(self), MapEntry { key, val }, )) } fn contains_key(&self,key: &Rc<Value>) -> bool { match &**self { PersistentListMap::Map(parent, entry) => { if entry.key == *key { return true; } parent.contains_key(key) }, PersistentListMap::Empty => false } } } // The purpose of these functions are no longer to implement conversion, // but to give us a cleaner way to invoke it pub trait ToPersistentListMap { fn into_list_map(self) -> PersistentListMap; } impl<T> ToPersistentListMap for T where T: Into<PersistentListMap>, { fn into_list_map(self) -> PersistentListMap { Into::<PersistentListMap>::into(self) } } impl From<Vec<MapEntry>> for PersistentListMap { fn from(item: Vec<MapEntry>) -> Self { item.into_iter().collect::<PersistentListMap>() } } //////////////////////////////////////////////////////////////////////////////////////////////////// // Iterating // //////////////////////////////////////////////////////////////////////////////////////////////////// pub struct PersistentListMapIter { node: Rc<PersistentListMap>, seen: HashMap<Rc<Value>, bool>, } pub trait ToPersistentListMapIter { fn iter(&self) -> PersistentListMapIter; } impl Iterator for PersistentListMapIter { type Item = MapEntry; fn next(&mut self) -> Option<Self::Item> { match &*(Rc::clone(&self.node)) { PersistentListMap::Map(parent, mapentry) => { self.node = Rc::clone(parent); if self.seen.contains_key(&mapentry.key) { return self.next(); } self.seen.insert(mapentry.key.clone(), true); Some(mapentry.clone()) } PersistentListMap::Empty => None, } } } impl ToPersistentListMapIter for Rc<PersistentListMap> { fn iter(&self) -> PersistentListMapIter { PersistentListMapIter { node: Rc::clone(self), seen: HashMap::new(), } } } impl ToPersistentListMapIter for PersistentListMap { fn iter(&self) -> PersistentListMapIter { Rc::new(self.clone()).iter() } } impl FromIterator<MapEntry> for PersistentListMap { fn from_iter<I: IntoIterator<Item = MapEntry>>(iter: I) -> Self { let mut map_so_far = PersistentListMap::Empty; for i in iter { map_so_far = PersistentListMap::Map(Rc::new(map_so_far), i.clone()); } map_so_far } } //////////////////////////////////////////////////////////////////////////////////////////////////// // End Iteration //////////////////////////////////////////////////////////////////////////////////////////////////// impl traits::IMeta for PersistentListMap { fn meta(&self) -> PersistentListMap { // @TODO implement PersistentListMap::Empty } } impl traits::IObj for PersistentListMap { fn with_meta(&self,meta: PersistentListMap) -> PersistentListMap { // @TODO implement self.clone() } } impl fmt::Display for PersistentListMap { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut as_str = String::from("{"); let mut first_loop = true; for mapentry in self.iter() { if !first_loop { as_str.push_str(", "); } first_loop = false; as_str.push_str(&format!( "{} {}", mapentry.key.to_string_explicit(), mapentry.val.to_string_explicit() )); } as_str.push_str("}"); write!(f, "{}", as_str) } } #[cfg(test)] mod tests { use crate::persistent_list_map::*; use crate::symbol::Symbol; use crate::keyword::Keyword; use crate::value::ToValue; #[test] fn persistent_list_map() { let map1 = vec![ MapEntry { key: Symbol::intern("a").to_rc_value(), val: 15_i32.to_rc_value(), }, MapEntry { key: Symbol::intern("b").to_rc_value(), val: "stuff".to_rc_value(), }, ] .into_iter() .collect::<PersistentListMap>(); println!("{}", map1); let map2 = map1.assoc(Symbol::intern("c").to_rc_value(), 100_i32.to_rc_value()); println!("{}", map1); println!("{}", map2); let map3 = map1.assoc(Symbol::intern("a").to_rc_value(), 100_i32.to_rc_value()); println!("{}", map1); println!("{}", map2); println!("{}", map3); let map4 = map2.assoc(Symbol::intern("a").to_rc_value(), 100_i32.to_rc_value()); println!("{}", map1); println!("{}", map2); println!("{}", map3); println!("{}", map4); } #[test] fn contains_key() { let map1 = persistent_list_map!{ "a" => 12, "b" => 13 }; assert!(map1.contains_key(&Keyword::intern("a").to_rc_value())); assert!(map1.contains_key(&Keyword::intern("b").to_rc_value())); assert!(!map1.contains_key(&Keyword::intern("c").to_rc_value())); } }
31.350158
100
0.516804
76b9d91dbe594594344f5cf11e4e1021f390177c
13
pub mod pos;
6.5
12
0.692308
338927597271331fe862af59ad81f264e67cbc05
2,542
use yew::{agent::Bridged, Bridge, Component, ComponentLink, Html, html}; use yew_router::prelude::*; use crate::{pages::{ dashboard::Dashboard, hello_world::HelloWorld, page_not_found::PageNotFound, }, routes::{AppRoute, fix_fragment_routes}, shared::layout::{Header, Footer}}; pub struct App { #[allow(unused)] link: ComponentLink<Self>, current_route: Option<AppRoute>, #[allow(unused)] router_agent: Box<dyn Bridge<RouteAgent>>, } pub enum Msg { UpdateRoute(Route) } impl Component for App { type Message = Msg; type Properties = (); fn create(_: Self::Properties, link: ComponentLink<Self>) -> Self { let router_agent = RouteAgent::bridge(link.callback(Msg::UpdateRoute)); let route_service: RouteService = RouteService::new(); let mut route = route_service.get_route(); fix_fragment_routes(&mut route); App { link, current_route: AppRoute::switch(route), router_agent, } } fn update(&mut self, msg: Self::Message) -> bool { match msg { Msg::UpdateRoute(mut route) => { fix_fragment_routes(&mut route); self.current_route = AppRoute::switch(route); } } true } fn change(&mut self, _: Self::Properties) -> yew::ShouldRender { false } fn view(&self) -> Html { html! { <> <Header current_route={&self.current_route}/> <main class="flex-shrink-0 mt-3"> <div class="container-fluid"> { if let Some(route) = &self.current_route { log::info!("{:#?}", self.current_route); match route { AppRoute::Index => html!{<HelloWorld />}, AppRoute::Home => html!{<HelloWorld />}, AppRoute::HelloWorld => html!{<HelloWorld />}, AppRoute::Dashboard => html!{<Dashboard />}, AppRoute::PageNotFound | _ => html!{<PageNotFound />}, } } else { html!{<PageNotFound />} } } </div> </main> <Footer /> </> } } }
33.012987
90
0.466562
e21f08c67d2e6e7ab6efcbf26c05c1e91a8a2894
3,348
// Copyright 2018 The Exonum Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #[macro_use] extern crate exonum; #[macro_use] extern crate exonum_testkit; extern crate serde_json; use exonum::api::public::BlocksRange; use exonum::blockchain::{ExecutionResult, Schema, Service, Transaction, TransactionSet}; use exonum::crypto::{gen_keypair, CryptoHash, Hash, PublicKey}; use exonum::encoding; use exonum::messages::{Message, RawTransaction}; use exonum::storage::{Fork, Snapshot}; use exonum_testkit::{ApiKind, TestKitBuilder}; // Simple service implementation. const SERVICE_ID: u16 = 512; transactions! { TimestampingServiceTransactions { const SERVICE_ID = SERVICE_ID; struct TxTimestamp { from: &PublicKey, msg: &str, } } } struct TimestampingService; impl Transaction for TxTimestamp { fn verify(&self) -> bool { self.verify_signature(self.from()) } fn execute(&self, _fork: &mut Fork) -> ExecutionResult { Ok(()) } } impl Service for TimestampingService { fn service_name(&self) -> &str { "timestamping" } fn state_hash(&self, _: &Snapshot) -> Vec<Hash> { Vec::new() } fn service_id(&self) -> u16 { SERVICE_ID } fn tx_from_raw(&self, raw: RawTransaction) -> Result<Box<Transaction>, encoding::Error> { let tx = TimestampingServiceTransactions::tx_from_raw(raw)?; Ok(tx.into()) } } fn main() { // Create testkit for network with four validators. let mut testkit = TestKitBuilder::validator() .with_validators(4) .with_service(TimestampingService) .create(); // Create few transactions. let keypair = gen_keypair(); let tx1 = TxTimestamp::new(&keypair.0, "Down To Earth", &keypair.1); let tx2 = TxTimestamp::new(&keypair.0, "Cry Over Spilt Milk", &keypair.1); let tx3 = TxTimestamp::new(&keypair.0, "Dropping Like Flies", &keypair.1); // Commit them into blockchain. let block = testkit.create_block_with_transactions(txvec![tx1.clone(), tx2.clone(), tx3.clone(),]); assert_eq!(block.len(), 3); assert!(block.iter().all(|transaction| transaction.status().is_ok())); // Check results with schema. let snapshot = testkit.snapshot(); let schema = Schema::new(&snapshot); assert!(schema.transactions().contains(&tx1.hash())); assert!(schema.transactions().contains(&tx2.hash())); assert!(schema.transactions().contains(&tx3.hash())); // Check results with api. let api = testkit.api(); let blocks_range: BlocksRange = api.get(ApiKind::Explorer, "v1/blocks?count=10"); assert_eq!(blocks_range.blocks.len(), 2); api.get::<serde_json::Value>( ApiKind::Explorer, &format!("v1/transactions/{}", tx1.hash().to_string()), ); }
30.715596
95
0.666667
144c53e32d57d47bc3ef24ed51c2948db8601809
923
// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Efficient large, fixed-size big integers and hashes. #![cfg_attr(not(feature = "std"), no_std)] #[doc(hidden)] pub use byteorder; // Re-export libcore using an alias so that the macros can work without // requiring `extern crate core` downstream. #[doc(hidden)] pub use core as core_; #[doc(hidden)] pub use rustc_hex; #[cfg(feature = "quickcheck")] #[doc(hidden)] pub use qc; #[cfg(feature = "quickcheck")] #[doc(hidden)] pub use rand; #[doc(hidden)] pub use static_assertions; pub use crunchy::unroll; #[macro_use] #[rustfmt::skip] mod uint; pub use crate::uint::*;
22.512195
71
0.711809
f79f1cfb047fad5f2659d896f676e74dc2e2ac3a
1,446
pub struct IconTextFormat { props: crate::Props, } impl yew::Component for IconTextFormat { type Properties = crate::Props; type Message = (); fn create(props: Self::Properties, _: yew::prelude::ComponentLink<Self>) -> Self { Self { props } } fn update(&mut self, _: Self::Message) -> yew::prelude::ShouldRender { true } fn change(&mut self, _: Self::Properties) -> yew::prelude::ShouldRender { false } fn view(&self) -> yew::prelude::Html { yew::prelude::html! { <svg class=self.props.class.unwrap_or("") width=self.props.size.unwrap_or(24).to_string() height=self.props.size.unwrap_or(24).to_string() viewBox="0 0 24 24" fill=self.props.fill.unwrap_or("none") stroke=self.props.color.unwrap_or("currentColor") stroke-width=self.props.stroke_width.unwrap_or(2).to_string() stroke-linecap=self.props.stroke_linecap.unwrap_or("round") stroke-linejoin=self.props.stroke_linejoin.unwrap_or("round") > <svg xmlns="http://www.w3.org/2000/svg" height="24" viewBox="0 0 24 24" width="24"><path d="M0 0h24v24H0z" fill="none"/><path d="M5 17v2h14v-2H5zm4.5-4.2h5l.9 2.2h2.1L12.75 4h-1.5L6.5 15h2.1l.9-2.2zM12 5.98L13.87 11h-3.74L12 5.98z"/></svg> </svg> } } }
31.434783
251
0.57538
d98b87d254cdf3b4af9ed574f5c0f34ab9a33d35
3,083
use crate::registry::Registry; use candid::CandidType; use ic_base_types::SubnetId; use ic_registry_keys::make_subnet_record_key; use ic_registry_routing_table::{are_disjoint, is_subset_of, CanisterIdRange, CanisterIdRanges}; use serde::{Deserialize, Serialize}; use std::convert::TryFrom; impl Registry { /// Adds new entries to `canister_migrations`. /// /// Validates the payload and applies the mutation derived from the payload /// to the registry. pub fn prepare_canister_migration( &mut self, payload: PrepareCanisterMigrationPayload, ) -> Result<(), String> { // Construct the canister ID ranges from payload. let ranges_to_migrate = payload.canister_id_ranges.clone(); // Check if the canister ID ranges are well formed. let ranges_to_migrate = CanisterIdRanges::try_from(ranges_to_migrate) .map_err(|e| format!("canister ID ranges are not well formed: {:?}", e))?; let source = payload.source_subnet; let destination = payload.destination_subnet; let version = self.latest_version(); self.get(&make_subnet_record_key(source).into_bytes(), version) .ok_or_else(|| format!("source {} is not a known subnet", source))?; self.get(&make_subnet_record_key(destination).into_bytes(), version) .ok_or_else(|| format!("destination {} is not a known subnet", destination))?; let routing_table = self.get_routing_table_or_panic(version); let source_subnet_ranges = routing_table.ranges(source); // Check if all the canister ID ranges to be migrated are from the source subnet. if !is_subset_of(ranges_to_migrate.iter(), source_subnet_ranges.iter()) { return Err(format!( "not all canisters to be migrated are hosted by the provided source subnet {}", source )); } // Check if the canister ID ranges to be migrated are NOT in active canister migration. if let Some(canister_migrations) = self.get_canister_migrations(version) { if !are_disjoint(canister_migrations.ranges(), ranges_to_migrate.iter()) { return Err(format!( "some of the canister in the given ranges {:?} are already being migrated", ranges_to_migrate )); } } self.maybe_apply_mutation_internal(vec![self.migrate_canister_ranges_mutation( version, ranges_to_migrate, source, destination, )]); Ok(()) } } /// The argument for the `prepare_canister_migration` update call. #[derive(Debug, CandidType, Serialize, Deserialize)] pub struct PrepareCanisterMigrationPayload { /// The list of canister ID ranges to be added into canister migrations. pub canister_id_ranges: Vec<CanisterIdRange>, /// The source of the canister ID ranges. pub source_subnet: SubnetId, /// The new destination for the canister ID ranges. pub destination_subnet: SubnetId, }
41.106667
95
0.661369
aca1a7a45533eec84c5a9a0093e535934cdd234f
278
// Copyright 2015 Michael Yang. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. #[derive(Debug)] pub enum Error { DimensionMismatch, IllegalParameter(usize), DiagonalElementZero(usize), }
25.272727
53
0.730216
03eca367e90eb0b5f26a524171e8dcb349ccf89e
2,378
use getrandom::register_custom_getrandom; use proptest::test_runner::{ Config, TestRunner }; use sudograph::graphql_database; use test_utilities::{ arbitraries::queries::queries::QueriesArbitrary, utilities::assert::assert_correct_result }; fn custom_getrandom(buf: &mut [u8]) -> Result<(), getrandom::Error> { // TODO get some randomness return Ok(()); } register_custom_getrandom!(custom_getrandom); graphql_database!("canisters/create/src/schema.graphql"); // TODO also add in some counter to at least know what iteration you're on #[ic_cdk_macros::update] fn test_create( cases: u32, logging: String ) -> bool { let graphql_ast = Box::leak(Box::new(graphql_parser::schema::parse_schema::<String>(static_schema).unwrap())); let object_types = Box::leak(Box::new(get_object_types(graphql_ast))); for object_type in object_types.iter() { let mut runner = TestRunner::new(Config { cases, max_shrink_iters: 0, .. Config::default() }); let mutation_create_arbitrary = object_type.mutation_create_arbitrary( graphql_ast, object_types, object_type, 1, &graphql_mutation ).unwrap(); runner.run(&mutation_create_arbitrary, |mutation_create| { if logging == "verbose" { ic_cdk::println!("query: {}", mutation_create.query); ic_cdk::println!("variables: {}", mutation_create.variables); } let result_string = futures::executor::block_on(async { return graphql_mutation( mutation_create.query.clone(), mutation_create.variables.clone() ).await; }); let result_json = serde_json::from_str(&result_string).unwrap(); assert_eq!( true, assert_correct_result( &result_json, &mutation_create.selection_name, &mutation_create.input_infos ).unwrap() ); if logging == "verbose" { ic_cdk::println!("Test complete"); ic_cdk::println!("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"); } return Ok(()); }).unwrap(); } return true; }
30.101266
114
0.57233
394a78ee4cceb2064d33c715bdbdcf3b0e65a08a
3,206
//! Transaction fees use super::Gas; use crate::{prost_ext::ParseOptional, proto, AccountId, Coin, ErrorReport, Result}; /// Fee includes the amount of coins paid in fees and the maximum gas to be /// used by the transaction. /// /// The ratio yields an effective “gasprice”, which must be above some minimum /// to be accepted into the mempool. #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord)] pub struct Fee { /// Amount of coins to be paid as a fee. pub amount: Vec<Coin>, /// Maximum gas that can be used in transaction processing before an out /// of gas error occurs. pub gas_limit: Gas, /// Payer: if [`None`], the first signer is responsible for paying the fees. /// /// If [`Some`], the specified account must pay the fees. The payer must be /// a tx signer (and thus have signed this field in AuthInfo). /// /// Setting this field does not change the ordering of required signers for /// the transaction. pub payer: Option<AccountId>, /// Granter: if [`Some`], the fee payer (either the first signer or the /// value of the payer field) requests that a fee grant be used to pay fees /// instead of the fee payer’s own balance. /// /// If an appropriate fee grant does not exist or the chain does not /// support fee grants, this will fail. pub granter: Option<AccountId>, } impl Fee { /// Simple constructor for a single [`Coin`] amount and the given amount /// of [`Gas`]. pub fn from_amount_and_gas(amount: Coin, gas_limit: impl Into<Gas>) -> Fee { Fee { amount: vec![amount], gas_limit: gas_limit.into(), payer: None, granter: None, } } } impl TryFrom<proto::cosmos::tx::v1beta1::Fee> for Fee { type Error = ErrorReport; fn try_from(proto: proto::cosmos::tx::v1beta1::Fee) -> Result<Fee> { Fee::try_from(&proto) } } impl TryFrom<&proto::cosmos::tx::v1beta1::Fee> for Fee { type Error = ErrorReport; fn try_from(proto: &proto::cosmos::tx::v1beta1::Fee) -> Result<Fee> { let amount = proto .amount .iter() .map(TryFrom::try_from) .collect::<Result<_, _>>()?; Ok(Fee { amount, gas_limit: proto.gas_limit.into(), payer: proto.payer.parse_optional()?, granter: proto.granter.parse_optional()?, }) } } impl From<Fee> for proto::cosmos::tx::v1beta1::Fee { fn from(fee: Fee) -> proto::cosmos::tx::v1beta1::Fee { proto::cosmos::tx::v1beta1::Fee::from(&fee) } } impl From<&Fee> for proto::cosmos::tx::v1beta1::Fee { fn from(fee: &Fee) -> proto::cosmos::tx::v1beta1::Fee { proto::cosmos::tx::v1beta1::Fee { amount: fee.amount.iter().map(Into::into).collect(), gas_limit: fee.gas_limit.value(), payer: fee .payer .as_ref() .map(|id| id.to_string()) .unwrap_or_default(), granter: fee .granter .as_ref() .map(|id| id.to_string()) .unwrap_or_default(), } } }
31.431373
83
0.581722
297c5371bb56c3d2dc868d0737d5112c22e287b8
32,672
// useful things for dealing with gd level data use crate::ast::ObjectMode; use crate::builtin::*; use crate::compiler_types::*; use crate::context::Context; use std::collections::{HashMap, HashSet}; #[derive(Clone, PartialEq, Debug)] pub enum ObjParam { Group(Group), Color(Color), Block(Block), Item(Item), Number(f64), Bool(bool), Text(String), GroupList(Vec<Group>), Epsilon, } impl std::cmp::PartialOrd for GdObj { fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> { for param in [1, 51, 57].iter() { if let Some(p1) = self.params.get(param) { if let Some(p2) = other.params.get(param) { match (p1, p2) { (ObjParam::Number(n1), ObjParam::Number(n2)) => { return (*n1).partial_cmp(n2) } (ObjParam::Group(g1), ObjParam::Group(g2)) => { let num1 = match g1.id { Id::Arbitrary(n) => n, Id::Specific(n) => n, }; let num2 = match g2.id { Id::Arbitrary(n) => n, Id::Specific(n) => n, }; return num1.partial_cmp(&num2); } (_, _) => (), } } } } Some(std::cmp::Ordering::Equal) } } use std::fmt; impl fmt::Display for ObjParam { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { ObjParam::Group(Group { id }) | ObjParam::Color(Color { id }) | ObjParam::Block(Block { id }) | ObjParam::Item(Item { id }) => match id { Id::Specific(id) => write!(f, "{}", id), _ => write!(f, "0"), }, ObjParam::Number(n) => { if (n.round() - n).abs() < 0.001 { write!(f, "{}", *n as i32) } else { write!(f, "{:.1$}", n, 3) } } ObjParam::Bool(b) => write!(f, "{}", if *b { "1" } else { "0" }), ObjParam::Text(t) => write!(f, "{}", t), ObjParam::GroupList(list) => { let mut out = String::new(); for g in list { if let Id::Specific(id) = g.id { out += &(id.to_string() + ".") } else { out += "?." }; } out.pop(); write!(f, "{}", out) } ObjParam::Epsilon => write!(f, "{{epsilon}}"), } } } #[derive(Clone, PartialEq, Debug)] pub struct GdObj { /*pub obj_id: u16, pub groups: Vec<Group>, pub target: Group, pub spawn_triggered: bool,*/ pub func_id: usize, pub params: HashMap<u16, ObjParam>, pub mode: ObjectMode, pub unique_id: usize, } impl GdObj { pub fn context_parameters(&mut self, context: &Context) -> GdObj { self.params.insert(57, ObjParam::Group(context.start_group)); (*self).clone() } } pub fn get_used_ids(ls: &str) -> [HashSet<u16>; 4] { let mut out = [ HashSet::<u16>::new(), HashSet::<u16>::new(), HashSet::<u16>::new(), HashSet::<u16>::new(), ]; let objects = ls.split(';'); for obj in objects { let props: Vec<&str> = obj.split(',').collect(); let mut map = HashMap::new(); for i in (0..props.len() - 1).step_by(2) { map.insert(props[i], props[i + 1]); } for (key, value) in &map { match *key { "57" => { //GROUPS let groups = value.split('.'); for g in groups { let group = g.parse().unwrap(); out[0].insert(group); } } "51" => { match (map.get("1"), map.get("52")) { (Some(&"1006"), Some(&"1")) => out[0].insert(value.parse().unwrap()), (Some(&"1006"), _) => out[1].insert(value.parse().unwrap()), _ => out[0].insert(value.parse().unwrap()), }; } "71" => { out[0].insert(value.parse().unwrap()); } //colors "21" => { out[1].insert(value.parse().unwrap()); } "22" => { out[1].insert(value.parse().unwrap()); } "23" => { out[1].insert(value.parse().unwrap()); } "80" => { match map.get("1") { //if collision trigger or block, add block id Some(&"1815") | Some(&"1816") => out[2].insert(value.parse().unwrap()), //counter display => do nothing Some(&"1615") => false, // else add item id _ => out[3].insert(value.parse().unwrap()), }; } "95" => { out[2].insert(value.parse().unwrap()); } //some of these depends on what object it is //pulse target depends on group mode/color mode //figure this out, future me _ => (), } } } out } const START_HEIGHT: u16 = 10; const MAX_HEIGHT: u16 = 40; pub const SPWN_SIGNATURE_GROUP: Group = Group { id: Id::Specific(1001), }; //use crate::ast::ObjectMode; pub fn remove_spwn_objects(file_content: &mut String) { let spwn_group = match SPWN_SIGNATURE_GROUP.id { Id::Specific(n) => n.to_string(), _ => unreachable!(), }; (*file_content) = file_content //remove previous spwn objects .split(';') .map(|obj| { let key_val: Vec<&str> = obj.split(',').collect(); let mut ret = obj; for i in (0..key_val.len()).step_by(2) { if key_val[i] == "57" { let mut groups = key_val[i + 1].split('.'); if groups.any(|x| x == spwn_group) { ret = ""; } } } ret }) .collect::<Vec<&str>>() .join(";"); } //returns the string to be appended to the old string pub fn append_objects( mut objects: Vec<GdObj>, old_ls: &str, ) -> Result<(String, [usize; 4]), String> { let mut closed_ids = get_used_ids(old_ls); //collect all specific ids mentioned into closed_[id] lists for obj in &objects { for prop in obj.params.values() { let class_index; let id; match prop { ObjParam::Group(g) => { class_index = 0; id = vec![g.id]; } ObjParam::GroupList(l) => { class_index = 0; id = l.iter().map(|g| g.id).collect(); } ObjParam::Color(g) => { class_index = 1; id = vec![g.id]; } ObjParam::Block(g) => { class_index = 2; id = vec![g.id]; } ObjParam::Item(g) => { class_index = 3; id = vec![g.id]; } _ => continue, } for id in id { match id { Id::Specific(i) => { closed_ids[class_index].insert(i); } _ => continue, } } } } //find new ids for all the arbitrary ones let mut id_maps: [HashMap<ArbitraryId, SpecificId>; 4] = [ HashMap::new(), HashMap::new(), HashMap::new(), HashMap::new(), ]; const ID_MAX: u16 = 999; for obj in &mut objects { for prop in obj.params.values_mut() { let class_index; let ids: Vec<&mut Id>; match prop { ObjParam::Group(g) => { class_index = 0; ids = vec![&mut g.id]; } ObjParam::GroupList(g) => { class_index = 0; ids = g.iter_mut().map(|x| &mut x.id).collect(); } ObjParam::Color(g) => { class_index = 1; ids = vec![&mut g.id]; } ObjParam::Block(g) => { class_index = 2; ids = vec![&mut g.id]; } ObjParam::Item(g) => { class_index = 3; ids = vec![&mut g.id]; } _ => continue, } for id in ids { match &id { Id::Arbitrary(i) => { *id = Id::Specific(match id_maps[class_index].get(i) { Some(a) => *a, None => { let mut out = None; for i in 1..10000 { if !closed_ids[class_index].contains(&i) { out = Some(i); closed_ids[class_index].insert(i); break; } } if let Some(id) = out { id_maps[class_index].insert(*i, id); id } else { return Err(format!( "This level exceeds the {} limit!", ["group", "color", "block ID", "item ID"][class_index] )); } } }) } _ => continue, } } } } for (i, list) in closed_ids.iter_mut().enumerate() { list.remove(&0); if list.len() > ID_MAX as usize { return Err(format!( "This level exceeds the {} limit! ({}/{})", ["group", "color", "block ID", "item ID"][i], list.len(), ID_MAX )); } } //println!("group_map: {:?}", id_maps[0]); fn serialize_obj(mut trigger: GdObj) -> String { let mut obj_string = String::new(); match trigger.mode { ObjectMode::Object => { match trigger.params.get_mut(&57) { Some(ObjParam::GroupList(l)) => (*l).push(SPWN_SIGNATURE_GROUP), Some(ObjParam::Group(g)) => { let group = *g; trigger .params .insert(57, ObjParam::GroupList(vec![group, SPWN_SIGNATURE_GROUP])); } _ => { trigger .params .insert(57, ObjParam::Group(SPWN_SIGNATURE_GROUP)); } }; let mut param_list = trigger.params.iter().collect::<Vec<(&u16, &ObjParam)>>(); param_list.sort_by(|a, b| (*a.0).cmp(b.0)); for param in param_list { obj_string += &format!("{},{},", param.0, param.1); } obj_string + ";" } ObjectMode::Trigger => { match trigger.params.get_mut(&57) { Some(ObjParam::GroupList(l)) => { (*l).push(SPWN_SIGNATURE_GROUP); //list } Some(ObjParam::Group(g)) => { let group = *g; trigger .params .insert(57, ObjParam::GroupList(vec![group, SPWN_SIGNATURE_GROUP])); } _ => { trigger .params .insert(57, ObjParam::Group(SPWN_SIGNATURE_GROUP)); //Vec::new() } }; /*let spawned = match trigger.params.get(&62) { Some(ObjParam::Bool(b)) => *b, _ => groups.iter().any(|x| x.id != ID::Specific(0)), }; if spawned { obj_string += "87,1,"; }*/ let mut param_list = trigger.params.iter().collect::<Vec<(&u16, &ObjParam)>>(); param_list.sort_by(|a, b| (*a.0).cmp(b.0)); for param in param_list { obj_string += &format!("{},{},", param.0, param.1); } obj_string + "108,1;" //linked group } } } let mut full_obj_string = String::new(); for obj in objects { full_obj_string += &serialize_obj(obj) } Ok(( full_obj_string, [ closed_ids[0].len(), closed_ids[1].len(), closed_ids[2].len(), closed_ids[3].len(), ], )) } pub fn apply_fn_ids(func_ids: &[FunctionId]) -> Vec<GdObj> { //println!("{:?}", trigger); fn apply_fn_id( id_index: usize, func_ids: &[FunctionId], x_offset: u32, y_offset: u16, ) -> (Vec<GdObj>, u32) { let id = func_ids[id_index].clone(); let mut objects = Vec::<GdObj>::new(); let mut current_x = 0; /*if !id.obj_list.is_empty() { //add label obj_string += &format!( "1,914,2,{},3,{},31,{},32,0.5;", x_offset * 30 + 15, ((81 - START_HEIGHT) - y_offset) * 30 + 15, base64::encode(id.name.as_bytes()) ); }*/ //add top layer let possible_height = MAX_HEIGHT - (START_HEIGHT + y_offset); //30 is max (TODO: case for if y_offset is more than 30) let mut objectlist = id.obj_list; objectlist.sort_by(|x, y| x.0.partial_cmp(&y.0).unwrap()); for (i, (obj, _)) in objectlist.iter().enumerate() { match obj.mode { ObjectMode::Object => { objects.push(obj.clone()); } ObjectMode::Trigger => { let y_pos = (i as u16) % possible_height + START_HEIGHT + y_offset; let x_pos = (i as f64 / possible_height as f64).floor() as u32 + x_offset; let spawned = match obj.params.get(&62) { Some(ObjParam::Bool(b)) => *b, _ => match obj.params.get(&57) { None => false, // Some(ObjParam::GroupList(l)) => { // l.iter().any(|x| x.id != ID::Specific(0)) // } Some(ObjParam::Group(g)) => g.id != Id::Specific(0), _ => unreachable!(), }, }; let mut new_obj = obj.clone(); if spawned { new_obj.params.insert(62, ObjParam::Bool(true)); new_obj.params.insert(87, ObjParam::Bool(true)); } new_obj.params.insert( 2, if spawned { ObjParam::Number((x_pos * 30 + 15) as f64) } else { ObjParam::Number(0.0) }, ); new_obj .params .insert(3, ObjParam::Number(((80 - y_pos) * 30 + 15) as f64)); objects.push(new_obj); } } } if !objectlist.is_empty() { current_x += (objectlist.len() as f64 / possible_height as f64).floor() as u32 + 1; } //add all children for (i, func_id) in func_ids.iter().enumerate() { if func_id.parent == Some(id_index) { let (obj, new_length) = apply_fn_id(i, func_ids, current_x + x_offset, y_offset); objects.extend(obj); if new_length > 0 { current_x += new_length + 1; } } } (objects, current_x) } let mut full_obj_list = Vec::<GdObj>::new(); let mut current_x = 0; for (i, func_id) in func_ids.iter().enumerate() { if func_id.parent == None { let (objects, new_length) = apply_fn_id(i, func_ids, current_x, 0); full_obj_list.extend(objects); current_x += new_length; } } full_obj_list } /* PYTHON CODE IM USING def Xor(data,key): res = [] for i in data: res.append(i^key) return bytearray(res).decode() def Base64Decrypt(encodedData): while (len(encodedData) % 4 != 0): encodedData += "=" encodedDataAsBytes = base64.b64decode(encodedData) return encodedDataAsBytes def decrypt(ls): fin = ls.replace('-', '+').replace('_', '/').replace("\0", "") fin = Base64Decrypt(fin) fin = gzip.decompress(fin) return(fin) */ //<OLD> /* pub fn serialize_triggers_old(func_ids: Vec<FunctionID>) -> String { //println!("{:?}", trigger); fn group_string(list: Vec<Group>) -> String { let mut string = String::new(); for group in list.iter() { string += &(group.id.to_string() + "."); } string.pop(); string } fn serialize_obj(mut trigger: GDObj, x: u32, y: u16) -> String { let mut obj_string = String::new(); let spawned = trigger.params.get(&62) == Some(&String::from("1")); if spawned { obj_string += "87,1,"; } /*if !trigger.groups.is_empty() { obj_string += &(String::from("57,") + &group_string(trigger.groups) + "." + SPWN_SIGNATURE_GROUP + ","); }*/ match trigger.params.get_mut(&2) { None => { trigger.params.insert( 2, if spawned { (x * 30 + 15).to_string() } else { "0".to_string() }, ); } _ => (), }; match trigger.params.get_mut(&3) { None => { trigger.params.insert(3, ((80 - y) * 30 + 15).to_string()); } _ => (), }; //((80 - y) * 30 + 15) as u32,) match trigger.params.get_mut(&57) { Some(group_str) => (*group_str) += &format!(".{}", SPWN_SIGNATURE_GROUP), None => { trigger.params.insert(57, SPWN_SIGNATURE_GROUP.to_string()); } }; for param in trigger.params { obj_string += &(param.0.to_string() + "," + &param.1 + ","); } obj_string + "108,1;" //linked group } fn serialize_func_id( id_index: usize, func_ids: Vec<FunctionID>, x_offset: u32, y_offset: u16, ) -> (String, u32) { let id = func_ids[id_index].clone(); let mut obj_string = String::new(); let mut current_x = 0; /*if !id.obj_list.is_empty() { //add label obj_string += &format!( "1,914,2,{},3,{},31,{},32,0.5;", x_offset * 30 + 15, ((81 - START_HEIGHT) - y_offset) * 30 + 15, base64::encode(id.name.as_bytes()) ); }*/ //add top layer let possible_height = MAX_HEIGHT - (START_HEIGHT + y_offset); //30 is max (TODO: case for if y_offset is more than 30) for (i, obj) in id.obj_list.iter().enumerate() { let y_pos = (i as u16) % possible_height + START_HEIGHT + y_offset; let x_pos = (i as f64 / possible_height as f64).floor() as u32 + x_offset; obj_string += &serialize_obj(obj.clone(), x_pos, y_pos); } if !id.obj_list.is_empty() { current_x += (id.obj_list.len() as f64 / possible_height as f64).floor() as u32 + 1; } //add all children for (i, func_id) in func_ids.iter().enumerate() { if func_id.parent == Some(id_index) { let (child_string, new_length) = serialize_func_id(i, func_ids.clone(), current_x + x_offset, y_offset + 1); obj_string += &child_string; if new_length > 0 { current_x += new_length + 1; } } } (obj_string, current_x) } let mut full_obj_string = String::new(); let mut current_x = 0; for (i, func_id) in func_ids.iter().enumerate() { if func_id.parent == None { let (obj_string, new_length) = serialize_func_id(i, func_ids.clone(), current_x, 0); full_obj_string += &obj_string; current_x += new_length; } } full_obj_string } */ //</OLD> use libflate::{gzip, zlib}; use std::io::Read; fn xor(data: Vec<u8>, key: u8) -> Vec<u8> { let mut new_data = Vec::new(); for b in data { //let new_byte = u64::from(b).pow(key); new_data.push(b ^ key) } new_data } fn base_64_decrypt(encoded: Vec<u8>) -> Vec<u8> { let mut new_data = encoded; while new_data.len() % 4 != 0 { new_data.push(b'=') } base64::decode(String::from_utf8(new_data).unwrap().as_str()).unwrap() } use quick_xml::events::{BytesText, Event}; use quick_xml::Reader; //use std::io::BufReader; fn decrypt_savefile(mut sf: Vec<u8>) -> Result<Vec<u8>, String> { if cfg!(target_os = "macos") { use aes::Aes256; use block_modes::block_padding::Pkcs7; use block_modes::{BlockMode, Ecb}; const IOS_KEY: &[u8] = &[ 0x69, 0x70, 0x75, 0x39, 0x54, 0x55, 0x76, 0x35, 0x34, 0x79, 0x76, 0x5D, 0x69, 0x73, 0x46, 0x4D, 0x68, 0x35, 0x40, 0x3B, 0x74, 0x2E, 0x35, 0x77, 0x33, 0x34, 0x45, 0x32, 0x52, 0x79, 0x40, 0x7B, ]; type AesEcb = Ecb<Aes256, Pkcs7>; // re-create cipher mode instance let cipher = AesEcb::new_var(IOS_KEY, &[]).unwrap(); Ok(match cipher.decrypt(&mut sf) { Ok(v) => v, Err(e) => return Err(format!("{}", e)), } .to_vec()) } else { let xor = xor(sf.to_vec(), 11); let replaced = String::from_utf8_lossy(&xor) .replace("-", "+") .replace("_", "/") .replace("\0", ""); let b64 = match base64::decode(replaced.as_str()) { Ok(b) => b, Err(e) => return Err(format!("{}", e)), }; let mut decoder = gzip::Decoder::new(&b64[..]).unwrap(); let mut data = Vec::new(); decoder.read_to_end(&mut data).unwrap(); Ok(data) } } pub fn get_level_string(ls: Vec<u8>, level_name: Option<String>) -> Result<String, String> { //decrypting the savefile let content = decrypt_savefile(ls)?; let string_content = String::from_utf8_lossy(&content); let mut reader = Reader::from_str(&string_content); reader.trim_text(true); let mut buf = Vec::new(); // The `Reader` does not implement `Iterator` because it outputs borrowed data (`Cow`s) let mut level_string = String::new(); let mut k4_detected = false; let mut k2_detected = false; let mut level_detected = false; loop { match reader.read_event(&mut buf) { // unescape and decode the text event using the reader encoding Ok(Event::Text(e)) => { let text = e.unescape_and_decode(&reader).unwrap(); if text == "k2" { k2_detected = true; if level_detected { return Err( "Level is not initialized! Please open the level, place some objects, then save and quit to initialize the level." .to_string() ); } } else if k2_detected { if let Some(level_name) = level_name.clone() { if text == level_name { level_detected = true } } else { level_detected = true } k2_detected = false } if level_detected && text == "k4" { k4_detected = true } else if k4_detected { level_string = text; break; } } Ok(Event::Eof) => break, // exits the loop when reaching end of file Err(e) => panic!("Error at position {}: {:?}", reader.buffer_position(), e), _ => (), // There are several other `Event`s we do not consider here } // if we don't keep a borrow elsewhere, we can clear the buffer to keep memory usage low buf.clear(); } buf.clear(); if level_detected && !k4_detected { return Err( "Level is not initialized! Please open the level, place some objects, then save and quit to initialize the level." .to_string() ); } else if !k4_detected { if let Some(level_name) = level_name { return Err(format!("Level named \"{}\" was not found!", level_name)); } else { return Err( "No level found! Please create a level for SPWN to operate on!".to_string(), ); } } /*let mut k4_detected = false; for token in xmlparser::Tokenizer::from(String::from_utf8(buf).unwrap().as_str()) { if let xmlparser::Token::Text { text } = token.unwrap() { if k4_detected { level_string = text.as_str().to_string(); break; } if text.as_str() == "k4" { k4_detected = true; } } }*/ //decrypting level string let ls_b64 = base_64_decrypt( level_string .replace("-", "+") .replace("_", "/") .replace("\0", "") .as_bytes() .to_vec(), ); //println!("{}", String::from_utf8(ls_b64.clone()).unwrap()); let mut ls_decoder = gzip::Decoder::new(&ls_b64[..]).unwrap(); let mut ls_buf = Vec::new(); ls_decoder.read_to_end(&mut ls_buf).unwrap(); Ok(String::from_utf8(ls_buf).unwrap()) } use quick_xml::Writer; use std::fs; use std::io::Cursor; use std::path::PathBuf; pub fn encrypt_level_string( ls: String, old_ls: String, path: PathBuf, level_name: Option<String>, ) -> Result<(), String> { let mut file = fs::File::open(path.clone()).unwrap(); let mut file_content = Vec::new(); file.read_to_end(&mut file_content).unwrap(); //decrypting the savefile let content = decrypt_savefile(file_content)?; let string_content = String::from_utf8_lossy(&content); let mut reader = Reader::from_str(&string_content); reader.trim_text(true); let mut writer = Writer::new(Cursor::new(Vec::new())); let mut buf = Vec::new(); let mut k4_detected = false; let mut done = false; let mut k2_detected = false; let mut level_detected = false; //println!("{}", old_ls); let full_ls = old_ls + &ls; loop { match reader.read_event(&mut buf) { // unescape and decode the text event using the reader encoding Ok(Event::Text(e)) => { let text = e.unescape_and_decode(&reader).unwrap(); if k4_detected && level_detected { let encrypted_ls: String = { let mut ls_encoder = gzip::Encoder::new(Vec::new()).unwrap(); ls_encoder.write_all(full_ls.as_bytes()).unwrap(); let b64_encrypted = base64::encode(&ls_encoder.finish().into_result().unwrap()); let fin = b64_encrypted.replace("+", "-").replace("/", "_"); "H4sIAAAAAAAAC".to_string() + &fin[13..] }; assert!(writer .write_event(Event::Text(BytesText::from_plain_str(&encrypted_ls))) .is_ok()); done = true; k4_detected = false; } else { if k4_detected { k4_detected = false; } assert!(writer.write_event(Event::Text(e)).is_ok()); if k2_detected { if let Some(level_name) = &level_name { if level_name == &text { level_detected = true; println!("Writing to level: {}", text); } } else { level_detected = true; println!("Writing to level: {}", text); } k2_detected = false; } } if !done && text == "k4" { k4_detected = true } if !done && text == "k2" { k2_detected = true } } Ok(Event::Eof) => break, // exits the loop when reaching end of file Err(e) => panic!("Error at position {}: {:?}", reader.buffer_position(), e), Ok(e) => assert!(writer.write_event(e).is_ok()), } // if we don't keep a borrow elsewhere, we can clear the buffer to keep memory usage low buf.clear(); } let bytes = writer.into_inner().into_inner(); //encrypt level save use std::io::Write; if cfg!(target_os = "macos") { use aes::Aes256; use block_modes::block_padding::Pkcs7; use block_modes::{BlockMode, Ecb}; const IOS_KEY: &[u8] = &[ 0x69, 0x70, 0x75, 0x39, 0x54, 0x55, 0x76, 0x35, 0x34, 0x79, 0x76, 0x5D, 0x69, 0x73, 0x46, 0x4D, 0x68, 0x35, 0x40, 0x3B, 0x74, 0x2E, 0x35, 0x77, 0x33, 0x34, 0x45, 0x32, 0x52, 0x79, 0x40, 0x7B, ]; type AesEcb = Ecb<Aes256, Pkcs7>; // re-create cipher mode instance let cipher = AesEcb::new_var(IOS_KEY, &[]).unwrap(); let fin = cipher.encrypt_vec(&bytes); assert!(fs::write(path, fin).is_ok()); } else { let mut encoder = zlib::Encoder::new(Vec::new()).unwrap(); encoder.write_all(&bytes).unwrap(); let compressed = encoder.finish().into_result().unwrap(); use crc32fast::Hasher; let mut hasher = Hasher::new(); hasher.update(&bytes); let checksum = hasher.finalize(); let data_size = bytes.len() as u32; let mut with_signature = b"\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x0b".to_vec(); with_signature.extend(&compressed[2..compressed.len() - 4]); with_signature.extend(checksum.to_le_bytes().to_vec()); with_signature.extend(data_size.to_le_bytes().to_vec()); let encoded = base64::encode(&with_signature) .replace("+", "-") .replace("/", "_") .as_bytes() .to_vec(); let fin = xor(encoded, 11); assert!(fs::write(path, fin).is_ok()); } Ok(()) }
32.836181
142
0.437041
14561e487c922c6b6c77d5504188e945a1c1ed8c
376
//! platform-independent traits. Submodules with backends will be selectable //! via cargo features in future mod sound_sdl; pub use self::sound_sdl::SoundSdl; use zx::sound::sample::SoundSample; // default sample type pub type ZXSample = SoundSample<f32>; pub trait SoundDevice { // blocking function to send new sample fn send_sample(&mut self, sample: ZXSample); }
31.333333
76
0.75266
dec74e60c71f3523348a506f1bac18d0ec05a5e7
2,549
//! Shim which is passed to Cargo as "rustdoc" when running the bootstrap. //! //! See comments in `src/bootstrap/rustc.rs` for more information. #![deny(warnings)] extern crate bootstrap; use std::env; use std::process::Command; use std::path::PathBuf; fn main() { let args = env::args_os().skip(1).collect::<Vec<_>>(); let rustdoc = env::var_os("RUSTDOC_REAL").expect("RUSTDOC_REAL was not set"); let libdir = env::var_os("RUSTDOC_LIBDIR").expect("RUSTDOC_LIBDIR was not set"); let stage = env::var("RUSTC_STAGE").expect("RUSTC_STAGE was not set"); let sysroot = env::var_os("RUSTC_SYSROOT").expect("RUSTC_SYSROOT was not set"); use std::str::FromStr; let verbose = match env::var("RUSTC_VERBOSE") { Ok(s) => usize::from_str(&s).expect("RUSTC_VERBOSE should be an integer"), Err(_) => 0, }; let mut dylib_path = bootstrap::util::dylib_path(); dylib_path.insert(0, PathBuf::from(libdir.clone())); //FIXME(misdreavus): once stdsimd uses cfg(rustdoc) instead of cfg(dox), remove the `--cfg dox` //arguments here let mut cmd = Command::new(rustdoc); cmd.args(&args) .arg("--cfg") .arg(format!("stage{}", stage)) .arg("--cfg") .arg("dox") .arg("--sysroot") .arg(sysroot) .env(bootstrap::util::dylib_path_var(), env::join_paths(&dylib_path).unwrap()); // Force all crates compiled by this compiler to (a) be unstable and (b) // allow the `rustc_private` feature to link to other unstable crates // also in the sysroot. if env::var_os("RUSTC_FORCE_UNSTABLE").is_some() { cmd.arg("-Z").arg("force-unstable-if-unmarked"); } if let Some(linker) = env::var_os("RUSTC_TARGET_LINKER") { cmd.arg("--linker").arg(linker).arg("-Z").arg("unstable-options"); } // Bootstrap's Cargo-command builder sets this variable to the current Rust version; let's pick // it up so we can make rustdoc print this into the docs if let Some(version) = env::var_os("RUSTDOC_CRATE_VERSION") { // This "unstable-options" can be removed when `--crate-version` is stabilized cmd.arg("-Z") .arg("unstable-options") .arg("--crate-version").arg(version); } if verbose > 1 { eprintln!("rustdoc command: {:?}", cmd); eprintln!("libdir: {:?}", libdir); } std::process::exit(match cmd.status() { Ok(s) => s.code().unwrap_or(1), Err(e) => panic!("\n\nfailed to run {:?}: {}\n\n", cmd, e), }) }
35.402778
99
0.61122
8fa7924aa23090bac211f743dc5ac9773350e84d
9,459
// Copyright 2020 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! This module allows to register callbacks on certain events. To add a custom //! callback simply implement the coresponding trait and add it to the init function extern crate hyper; extern crate hyper_rustls; extern crate tokio; use crate::chain::BlockStatus; use crate::common::types::{ServerConfig, WebHooksConfig}; use crate::core::core; use crate::core::core::hash::Hashed; use crate::p2p::types::PeerAddr; use futures::future::Future; use hyper::client::HttpConnector; use hyper::header::HeaderValue; use hyper::Client; use hyper::{Body, Method, Request}; use hyper_rustls::HttpsConnector; use serde::Serialize; use serde_json::{json, to_string}; use std::time::Duration; use tokio::runtime::Runtime; /// Returns the list of event hooks that will be initialized for network events pub fn init_net_hooks(config: &ServerConfig) -> Vec<Box<dyn NetEvents + Send + Sync>> { let mut list: Vec<Box<dyn NetEvents + Send + Sync>> = Vec::new(); list.push(Box::new(EventLogger)); if config.webhook_config.block_received_url.is_some() || config.webhook_config.tx_received_url.is_some() || config.webhook_config.header_received_url.is_some() { list.push(Box::new(WebHook::from_config(&config.webhook_config))); } list } /// Returns the list of event hooks that will be initialized for chain events pub fn init_chain_hooks(config: &ServerConfig) -> Vec<Box<dyn ChainEvents + Send + Sync>> { let mut list: Vec<Box<dyn ChainEvents + Send + Sync>> = Vec::new(); list.push(Box::new(EventLogger)); if config.webhook_config.block_accepted_url.is_some() { list.push(Box::new(WebHook::from_config(&config.webhook_config))); } list } #[allow(unused_variables)] /// Trait to be implemented by Network Event Hooks pub trait NetEvents { /// Triggers when a new transaction arrives fn on_transaction_received(&self, tx: &core::Transaction) {} /// Triggers when a new block arrives fn on_block_received(&self, block: &core::Block, addr: &PeerAddr) {} /// Triggers when a new block header arrives fn on_header_received(&self, header: &core::BlockHeader, addr: &PeerAddr) {} } #[allow(unused_variables)] /// Trait to be implemented by Chain Event Hooks pub trait ChainEvents { /// Triggers when a new block is accepted by the chain (might be a Reorg or a Fork) fn on_block_accepted(&self, block: &core::Block, status: &BlockStatus) {} } /// Basic Logger struct EventLogger; impl NetEvents for EventLogger { fn on_transaction_received(&self, tx: &core::Transaction) { debug!( "Received tx {}, [in/out/kern: {}/{}/{}] going to process.", tx.hash(), tx.inputs().len(), tx.outputs().len(), tx.kernels().len(), ); } fn on_block_received(&self, block: &core::Block, addr: &PeerAddr) { debug!( "Received block {} at {} from {} [in/out/kern: {}/{}/{}] going to process.", block.hash(), block.header.height, addr, block.inputs().len(), block.outputs().len(), block.kernels().len(), ); } fn on_header_received(&self, header: &core::BlockHeader, addr: &PeerAddr) { debug!( "Received block header {} at {} from {}, going to process.", header.hash(), header.height, addr ); } } impl ChainEvents for EventLogger { fn on_block_accepted(&self, block: &core::Block, status: &BlockStatus) { match status { BlockStatus::Reorg(depth) => { warn!( "block_accepted (REORG!): {:?} at {} (depth: {}, diff: {})", block.hash(), block.header.height, depth, block.header.total_difficulty(), ); } BlockStatus::Fork => { debug!( "block_accepted (fork?): {:?} at {} (diff: {})", block.hash(), block.header.height, block.header.total_difficulty(), ); } BlockStatus::Next => { debug!( "block_accepted (head+): {:?} at {} (diff: {})", block.hash(), block.header.height, block.header.total_difficulty(), ); } } } } fn parse_url(value: &Option<String>) -> Option<hyper::Uri> { match value { Some(url) => { let uri: hyper::Uri = match url.parse() { Ok(value) => value, Err(_) => panic!("Invalid url : {}", url), }; let scheme = uri.scheme_part().map(|s| s.as_str()); if (scheme != Some("http")) && (scheme != Some("https")) { panic!( "Invalid url scheme {}, expected one of ['http', https']", url ) }; Some(uri) } None => None, } } /// A struct that holds the hyper/tokio runtime. struct WebHook { /// url to POST transaction data when a new transaction arrives from a peer tx_received_url: Option<hyper::Uri>, /// url to POST header data when a new header arrives from a peer header_received_url: Option<hyper::Uri>, /// url to POST block data when a new block arrives from a peer block_received_url: Option<hyper::Uri>, /// url to POST block data when a new block is accepted by our node (might be a reorg or a fork) block_accepted_url: Option<hyper::Uri>, /// The hyper client to be used for all requests client: Client<HttpsConnector<HttpConnector>>, /// The tokio event loop runtime: Runtime, } impl WebHook { /// Instantiates a Webhook struct fn new( tx_received_url: Option<hyper::Uri>, header_received_url: Option<hyper::Uri>, block_received_url: Option<hyper::Uri>, block_accepted_url: Option<hyper::Uri>, nthreads: u16, timeout: u16, ) -> WebHook { let keep_alive = Duration::from_secs(timeout as u64); info!( "Spawning {} threads for webhooks (timeout set to {} secs)", nthreads, timeout ); let https = HttpsConnector::new(nthreads as usize); let client = Client::builder() .keep_alive_timeout(keep_alive) .build::<_, hyper::Body>(https); WebHook { tx_received_url, block_received_url, header_received_url, block_accepted_url, client, runtime: Runtime::new().unwrap(), } } /// Instantiates a Webhook struct from a configuration file fn from_config(config: &WebHooksConfig) -> WebHook { WebHook::new( parse_url(&config.tx_received_url), parse_url(&config.header_received_url), parse_url(&config.block_received_url), parse_url(&config.block_accepted_url), config.nthreads, config.timeout, ) } fn post(&self, url: hyper::Uri, data: String) { let mut req = Request::new(Body::from(data)); *req.method_mut() = Method::POST; *req.uri_mut() = url.clone(); req.headers_mut().insert( hyper::header::CONTENT_TYPE, HeaderValue::from_static("application/json"), ); let future = self .client .request(req) .map(|_res| {}) .map_err(move |_res| { warn!("Error sending POST request to {}", url); }); let handle = self.runtime.executor(); handle.spawn(future); } fn make_request<T: Serialize>(&self, payload: &T, uri: &Option<hyper::Uri>) -> bool { if let Some(url) = uri { let payload = match to_string(payload) { Ok(serialized) => serialized, Err(_) => { return false; // print error message } }; self.post(url.clone(), payload); } true } } impl ChainEvents for WebHook { fn on_block_accepted(&self, block: &core::Block, status: &BlockStatus) { let status_str = match status { BlockStatus::Reorg(_) => "reorg", BlockStatus::Fork => "fork", BlockStatus::Next => "head", }; // Add additional `depth` field to the JSON in case of reorg let payload = if let BlockStatus::Reorg(depth) = status { json!({ "hash": block.header.hash().to_hex(), "status": status_str, "data": block, "depth": depth }) } else { json!({ "hash": block.header.hash().to_hex(), "status": status_str, "data": block }) }; if !self.make_request(&payload, &self.block_accepted_url) { error!( "Failed to serialize block {} at height {}", block.hash(), block.header.height ); } } } impl NetEvents for WebHook { /// Triggers when a new transaction arrives fn on_transaction_received(&self, tx: &core::Transaction) { let payload = json!({ "hash": tx.hash().to_hex(), "data": tx }); if !self.make_request(&payload, &self.tx_received_url) { error!("Failed to serialize transaction {}", tx.hash()); } } /// Triggers when a new block arrives fn on_block_received(&self, block: &core::Block, addr: &PeerAddr) { let payload = json!({ "hash": block.header.hash().to_hex(), "peer": addr, "data": block }); if !self.make_request(&payload, &self.block_received_url) { error!( "Failed to serialize block {} at height {}", block.hash().to_hex(), block.header.height ); } } /// Triggers when a new block header arrives fn on_header_received(&self, header: &core::BlockHeader, addr: &PeerAddr) { let payload = json!({ "hash": header.hash().to_hex(), "peer": addr, "data": header }); if !self.make_request(&payload, &self.header_received_url) { error!( "Failed to serialize header {} at height {}", header.hash(), header.height ); } } }
27.657895
97
0.664447
dd995c383d528853274accd0951ad7aa62bd6314
1,075
mod starwars; use async_graphql::http::{playground_source, GraphQLPlaygroundConfig}; use async_graphql::{EmptyMutation, EmptySubscription, Request, Response, Schema}; use axum::response::IntoResponse; use axum::{prelude::*, AddExtensionLayer}; use starwars::{QueryRoot, StarWars, StarWarsSchema}; async fn graphql_handler( schema: extract::Extension<StarWarsSchema>, req: extract::Json<Request>, ) -> response::Json<Response> { schema.execute(req.0).await.into() } async fn graphql_playground() -> impl IntoResponse { response::Html(playground_source(GraphQLPlaygroundConfig::new("/"))) } #[tokio::main] async fn main() { let schema = Schema::build(QueryRoot, EmptyMutation, EmptySubscription) .data(StarWars::new()) .finish(); let app = route("/", get(graphql_playground).post(graphql_handler)) .layer(AddExtensionLayer::new(schema)); println!("Playground: http://localhost:3000"); axum::Server::bind(&"0.0.0.0:3000".parse().unwrap()) .serve(app.into_make_service()) .await .unwrap(); }
29.861111
81
0.692093
6a34a17d6bbef7f020ded04c8de1302d3ebd1dac
6,650
extern crate skimmer; use model::{ EncodedString, Model, Node, Rope, Renderer, Tagged, TaggedValue }; use std::any::Any; use std::borrow::Cow; use std::iter::Iterator; pub static TAG: &'static str = "tag:yaml.org,2002:yaml"; #[derive (Clone, Copy)] pub struct Yaml; impl Yaml { pub fn get_tag () -> Cow<'static, str> { Cow::from (TAG) } /* pub fn new (cset: &CharSet<Char, DoubleChar>) -> Yaml<Char, DoubleChar> { Yaml { encoding: cset.encoding, marker_tag: cset.exclamation, marker_alias: cset.asterisk, marker_anchor: cset.ampersand, s_quote: cset.apostrophe, d_quote: cset.quotation, _dchr: PhantomData } } */ } impl Model for Yaml { fn get_tag (&self) -> Cow<'static, str> { Self::get_tag () } fn as_any (&self) -> &Any { self } fn as_mut_any (&mut self) -> &mut Any { self } fn is_decodable (&self) -> bool { true } fn is_encodable (&self) -> bool { true } fn encode (&self, _renderer: &Renderer, value: TaggedValue, _tags: &mut Iterator<Item=&(Cow<'static, str>, Cow<'static, str>)>) -> Result<Rope, TaggedValue> { match <TaggedValue as Into<Result<YamlValue, TaggedValue>>>::into (value) { Ok (yp) => Ok (Rope::from (Node::String (EncodedString::from (match yp { YamlValue::Alias => "*".as_bytes (), // self.marker_alias.new_vec (), YamlValue::Anchor => "&".as_bytes (), // self.marker_anchor.new_vec (), YamlValue::Tag => "!".as_bytes () // self.marker_tag.new_vec () })))), Err (value) => Err (value) } } fn decode (&self, explicit: bool, value: &[u8]) -> Result<TaggedValue, ()> { // let vlen = value.len (); let mut ptr = 0; let mut quote_state = 0; // 1 - single, 2 - double if explicit { match value.get (ptr).map (|b| *b) { Some (b'\'') => { ptr += 1; quote_state = 1; } Some (b'"') => { ptr += 1; quote_state = 2; } _ => () } /* if self.s_quote.contained_at (value, 0) { ptr += self.s_quote.len (); quote_state = 1; } else if self.d_quote.contained_at (value, 0) { ptr += self.d_quote.len (); quote_state = 2; } */ } let val = match value.get (ptr).map (|b| *b) { Some (b'*') => { ptr += 1; YamlValue::Alias } Some (b'&') => { ptr += 1; YamlValue::Anchor } Some (b'!') => { ptr += 1; YamlValue::Tag } _ => return Err ( () ) }; /* let val = if self.marker_tag.contained_at (value, ptr) { ptr += self.marker_tag.len (); YamlValue::Tag } else if self.marker_alias.contained_at (value, ptr) { ptr += self.marker_alias.len (); YamlValue::Alias } else if self.marker_anchor.contained_at (value, ptr) { ptr += self.marker_anchor.len (); YamlValue::Anchor } else { return Err ( () ) }; */ if quote_state > 0 { match value.get (ptr).map (|b| *b) { Some (b'\'') | Some (b'"') => { ptr += 1; } _ => return Err ( () ) } /* if quote_state == 1 && self.s_quote.contained_at (value, ptr) { ptr += self.s_quote.len (); } else if quote_state == 2 && self.d_quote.contained_at (value, ptr) { ptr += self.d_quote.len (); } else { return Err ( () ) } */ } if value.len () > ptr { return Err ( () ) } Ok ( TaggedValue::from (val) ) } } #[derive (Debug)] pub enum YamlValue { Alias, Anchor, Tag } impl Tagged for YamlValue { fn get_tag (&self) -> Cow<'static, str> { Cow::from (TAG) } fn as_any (&self) -> &Any { self as &Any } fn as_mut_any (&mut self) -> &mut Any { self as &mut Any } } impl AsRef<str> for YamlValue { fn as_ref (&self) -> &'static str { match *self { YamlValue::Alias => "*", YamlValue::Anchor => "&", YamlValue::Tag => "!" } } } #[cfg (all (test, not (feature = "dev")))] mod tests { use super::*; use model::{ Tagged, Renderer }; // use txt::get_charset_utf8; use std::iter; #[test] fn tag () { // let yaml = YamlFactory.build_model (&get_charset_utf8 ()); let yaml = Yaml; // ::new (&get_charset_utf8 ()); assert_eq! (yaml.get_tag (), TAG); } #[test] fn encode () { let renderer = Renderer; // ::new (&get_charset_utf8 ()); let yaml = Yaml; // ::new (&get_charset_utf8 ()); assert_eq! (yaml.encode (&renderer, TaggedValue::from (YamlValue::Tag), &mut iter::empty ()).ok ().unwrap ().render (&renderer), vec! [b'!']); assert_eq! (yaml.encode (&renderer, TaggedValue::from (YamlValue::Alias), &mut iter::empty ()).ok ().unwrap ().render (&renderer), vec! [b'*']); assert_eq! (yaml.encode (&renderer, TaggedValue::from (YamlValue::Anchor), &mut iter::empty ()).ok ().unwrap ().render (&renderer), vec! [b'&']); } #[test] fn decode () { let yaml = Yaml; // ::new (&get_charset_utf8 ()); if let Ok (tagged) = yaml.decode (true, "!".as_bytes ()) { assert_eq! (tagged.get_tag (), Cow::from (TAG)); if let Some (&YamlValue::Tag) = tagged.as_any ().downcast_ref::<YamlValue> () {} else { assert! (false) } } else { assert! (false) } if let Ok (tagged) = yaml.decode (true, "*".as_bytes ()) { assert_eq! (tagged.get_tag (), Cow::from (TAG)); if let Some (&YamlValue::Alias) = tagged.as_any ().downcast_ref::<YamlValue> () {} else { assert! (false) } } else { assert! (false) } if let Ok (tagged) = yaml.decode (true, "&".as_bytes ()) { assert_eq! (tagged.get_tag (), Cow::from (TAG)); if let Some (&YamlValue::Anchor) = tagged.as_any ().downcast_ref::<YamlValue> () {} else { assert! (false) } } else { assert! (false) } assert! (yaml.decode (true, "=".as_bytes ()).is_err ()); } }
27.03252
162
0.481053
48023101a2420dd9c16a4521f6940c8782f841cb
49,158
use crate::config::CargoConfig; use crate::dependencies::resolve; use crate::dh_installsystemd; use crate::error::*; use crate::listener::Listener; use crate::ok_or::OkOrThen; use crate::util::read_file_to_bytes; use rayon::prelude::*; use serde::Deserialize; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::convert::From; use std::env::consts::{DLL_PREFIX, DLL_SUFFIX}; use std::fs; use std::path::{Path, PathBuf}; use std::process::Command; fn is_glob_pattern(s: &str) -> bool { s.contains('*') || s.contains('[') || s.contains(']') || s.contains('!') } #[derive(Debug, Clone)] pub enum AssetSource { /// Copy file from the path (and strip binary if needed). Path(PathBuf), /// Write data to destination as-is. Data(Vec<u8>), } impl AssetSource { #[must_use] pub fn path(&self) -> Option<&Path> { match *self { AssetSource::Path(ref p) => Some(p), _ => None, } } #[must_use] pub fn len(&self) -> Option<u64> { match *self { // FIXME: may not be accurate if the executable is not stripped yet? AssetSource::Path(ref p) => fs::metadata(p).ok().map(|m| m.len()), AssetSource::Data(ref d) => Some(d.len() as u64), } } pub fn data(&self) -> CDResult<Cow<'_, [u8]>> { Ok(match *self { AssetSource::Path(ref p) => { let data = read_file_to_bytes(p) .map_err(|e| CargoDebError::IoFile("unable to read asset to add to archive", e, p.to_owned()))?; Cow::Owned(data) }, AssetSource::Data(ref d) => { Cow::Borrowed(d) }, }) } /// Return the file that will hold debug symbols for this asset. /// This is just `<original-file>.debug` #[must_use] pub fn debug_source(&self) -> Option<PathBuf> { match *self { AssetSource::Path(ref p) => Some(debug_filename(p)), _ => None, } } } /// Configuration settings for the systemd_units functionality. /// /// `unit_scripts`: (optional) relative path to a directory containing correctly /// named systemd unit files. See `dh_lib::pkgfile()` and `dh_installsystemd.rs` /// for more details on file naming. If not supplied, defaults to the /// `maintainer_scripts` directory. /// /// `unit_name`: (optjonal) in cases where the `unit_scripts` directory contains /// multiple units, only process those matching this unit name. /// /// For details on the other options please see `dh_installsystemd::Options`. #[derive(Clone, Debug, Deserialize, Default)] #[serde(rename_all = "kebab-case", deny_unknown_fields)] pub(crate) struct SystemdUnitsConfig { pub unit_scripts: Option<PathBuf>, pub unit_name: Option<String>, pub enable: Option<bool>, pub start: Option<bool>, pub restart_after_upgrade: Option<bool>, pub stop_on_upgrade: Option<bool>, } /// Match the official dh_installsystemd defaults and rename the confusing /// dh_installsystemd option names to be consistently positive rather than /// mostly, but not always, negative. impl From<&SystemdUnitsConfig> for dh_installsystemd::Options { fn from(config: &SystemdUnitsConfig) -> Self { Self { no_enable: !config.enable.unwrap_or(true), no_start: !config.start.unwrap_or(true), restart_after_upgrade: config.restart_after_upgrade.unwrap_or(true), no_stop_on_upgrade: !config.stop_on_upgrade.unwrap_or(true), } } } #[derive(Debug, Clone)] pub(crate) struct Assets { pub unresolved: Vec<UnresolvedAsset>, pub resolved: Vec<Asset>, } impl Assets { fn new() -> Assets { Assets { unresolved: vec![], resolved: vec![], } } fn with_resolved_assets(assets: Vec<Asset>) -> Assets { Assets { unresolved: vec![], resolved: assets, } } fn with_unresolved_assets(assets: Vec<UnresolvedAsset>) -> Assets { Assets { unresolved: assets, resolved: vec![], } } fn is_empty(&self) -> bool { self.unresolved.is_empty() && self.resolved.is_empty() } } #[derive(Debug, Clone)] pub struct UnresolvedAsset { pub source_path: PathBuf, pub target_path: PathBuf, pub chmod: u32, pub is_built: bool, } #[derive(Debug, Clone)] pub struct Asset { pub source: AssetSource, pub target_path: PathBuf, pub chmod: u32, is_built: bool, } impl Asset { #[must_use] pub fn new(source: AssetSource, mut target_path: PathBuf, chmod: u32, is_built: bool) -> Self { // is_dir() is only for paths that exist if target_path.to_string_lossy().ends_with('/') { let file_name = source.path().and_then(|p| p.file_name()).expect("source must be a file"); target_path = target_path.join(file_name); } if target_path.is_absolute() || target_path.has_root() { target_path = target_path.strip_prefix("/").expect("no root dir").to_owned(); } Self { source, target_path, chmod, is_built, } } fn is_executable(&self) -> bool { 0 != (self.chmod & 0o111) } fn is_dynamic_library(&self) -> bool { self.target_path.file_name() .and_then(|f| f.to_str()) .map_or(false, |f| f.ends_with(DLL_SUFFIX)) } /// Returns the target path for the debug symbol file, which will be /// /usr/lib/debug/<path-to-executable>.debug #[must_use] pub fn debug_target(&self) -> Option<PathBuf> { if self.is_built { // Turn an absolute path into one relative to "/" let relative = match self.target_path.strip_prefix(Path::new("/")) { Ok(path) => path, Err(_) => self.target_path.as_path(), }; // Prepend the debug location let debug_path = Path::new("/usr/lib/debug").join(relative); // Add `.debug` to the end of the filename Some(debug_filename(&debug_path)) } else { None } } } /// Adds `.debug` to the end of a path to a filename /// fn debug_filename(path: &Path) -> PathBuf { let mut debug_filename = path.as_os_str().to_os_string(); debug_filename.push(".debug"); Path::new(&debug_filename).to_path_buf() } #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] enum ArchSpec { /// e.g. [armhf] Require(String), /// e.g. [!armhf] NegRequire(String), } fn get_architecture_specification(depend: &str) -> CDResult<(String, Option<ArchSpec>)> { use ArchSpec::*; let re = regex::Regex::new(r#"(.*)\[(!?)(.*)\]"#).unwrap(); match re.captures(depend) { Some(caps) => { let spec = if &caps[2] == "!" { NegRequire(caps[3].to_string()) } else { assert_eq!(&caps[2], ""); Require(caps[3].to_string()) }; Ok((caps[1].trim().to_string(), Some(spec))) } None => Ok((depend.to_string(), None)), } } /// Architecture specification strings /// <https://www.debian.org/doc/debian-policy/ch-customized-programs.html#s-arch-spec> fn match_architecture(spec: ArchSpec, target_arch: &str) -> CDResult<bool> { let (neg, spec) = match spec { ArchSpec::NegRequire(pkg) => (true, pkg), ArchSpec::Require(pkg) => (false, pkg), }; let output = Command::new("dpkg-architecture") .args(&["-a", target_arch, "-i", &spec]) .output() .map_err(|e| CargoDebError::CommandFailed(e, "dpkg-architecture"))?; if neg { Ok(!output.status.success()) } else { Ok(output.status.success()) } } #[derive(Debug)] /// Cargo deb configuration read from the manifest and cargo metadata pub struct Config { /// Root directory where `Cargo.toml` is located. It's a subdirectory in workspaces. pub manifest_dir: PathBuf, /// User-configured output path for *.deb pub deb_output_path: Option<String>, /// Triple. `None` means current machine architecture. pub target: Option<String>, /// `CARGO_TARGET_DIR` pub target_dir: PathBuf, /// The name of the project to build pub name: String, /// The name to give the Debian package; usually the same as the Cargo project name pub deb_name: String, /// The version to give the Debian package; usually the same as the Cargo version pub deb_version: String, /// The software license of the project (SPDX format). pub license: Option<String>, /// The location of the license file pub license_file: Option<PathBuf>, /// number of lines to skip when reading `license_file` pub license_file_skip_lines: usize, /// The copyright of the project /// (Debian's `copyright` file contents). pub copyright: String, pub changelog: Option<String>, /// The homepage URL of the project. pub homepage: Option<String>, /// Documentation URL from `Cargo.toml`. Fallback if `homepage` is missing. pub documentation: Option<String>, /// The URL of the software repository. pub repository: Option<String>, /// A short description of the project. pub description: String, /// An extended description of the project. pub extended_description: Option<String>, /// The maintainer of the Debian package. /// In Debian `control` file `Maintainer` field format. pub maintainer: String, /// The Debian dependencies required to run the project. pub depends: String, /// The Debian pre-dependencies. pub pre_depends: Option<String>, /// The Debian recommended dependencies. pub recommends: Option<String>, /// The Debian suggested dependencies. pub suggests: Option<String>, /// The list of packages this package can enhance. pub enhances: Option<String>, /// The Debian software category to which the package belongs. pub section: Option<String>, /// The Debian priority of the project. Typically 'optional'. pub priority: String, /// `Conflicts` Debian control field. /// /// See [PackageTransition](https://wiki.debian.org/PackageTransition). pub conflicts: Option<String>, /// `Breaks` Debian control field. /// /// See [PackageTransition](https://wiki.debian.org/PackageTransition). pub breaks: Option<String>, /// `Replaces` Debian control field. /// /// See [PackageTransition](https://wiki.debian.org/PackageTransition). pub replaces: Option<String>, /// `Provides` Debian control field. /// /// See [PackageTransition](https://wiki.debian.org/PackageTransition). pub provides: Option<String>, /// The Debian architecture of the target system. pub architecture: String, /// A list of configuration files installed by the package. pub conf_files: Option<String>, /// All of the files that are to be packaged. pub(crate) assets: Assets, /// The location of the triggers file pub triggers_file: Option<PathBuf>, /// The path where possible maintainer scripts live pub maintainer_scripts: Option<PathBuf>, /// List of Cargo features to use during build pub features: Vec<String>, pub default_features: bool, /// Should the binary be stripped from debug symbols? pub strip: bool, /// Should the debug symbols be moved to a separate file included in the package? (implies `strip:true`) pub separate_debug_symbols: bool, /// Should symlinks be preserved in the assets pub preserve_symlinks: bool, /// Details of how to install any systemd units pub(crate) systemd_units: Option<SystemdUnitsConfig>, _use_constructor_to_make_this_struct_: (), } impl Config { /// Makes a new config from `Cargo.toml` in the current working directory. /// /// `None` target means the host machine's architecture. pub fn from_manifest(manifest_path: &Path, package_name: Option<&str>, output_path: Option<String>, target: Option<&str>, variant: Option<&str>, deb_version: Option<String>, listener: &dyn Listener) -> CDResult<Config> { let metadata = cargo_metadata(manifest_path)?; let available_package_names = || { metadata.packages.iter() .filter(|p| metadata.workspace_members.iter().any(|w| w == &p.id)) .map(|p| p.name.as_str()) .collect::<Vec<_>>().join(", ") }; let root_package = if let Some(name) = package_name { metadata.packages.iter().find(|p| { p.name == name }) .ok_or_else(|| CargoDebError::PackageNotFoundInWorkspace(name.into(), available_package_names())) } else { metadata.resolve.root.as_ref().and_then(|root_id| { metadata.packages.iter() .find(|p| &p.id == root_id) }) .ok_or_else(|| CargoDebError::NoRootFoundInWorkspace(available_package_names())) }?; let target_dir = Path::new(&metadata.target_directory); let manifest_path = Path::new(&root_package.manifest_path); let manifest_dir = manifest_path.parent().unwrap(); let content = fs::read(&manifest_path) .map_err(|e| CargoDebError::IoFile("unable to read Cargo.toml", e, manifest_path.to_owned()))?; toml::from_slice::<Cargo>(&content)?.into_config(root_package, manifest_dir, output_path, target_dir, target, variant, deb_version, listener) } pub(crate) fn get_dependencies(&self, listener: &dyn Listener) -> CDResult<String> { let mut deps = HashSet::new(); for word in self.depends.split(',') { let word = word.trim(); if word == "$auto" { let bin = self.all_binaries(); let resolved = bin.par_iter() .filter_map(|p| p.path()) .filter_map(|bname| match resolve(bname) { Ok(bindeps) => Some(bindeps), Err(err) => { listener.warning(format!("{} (no auto deps for {})", err, bname.display())); None }, }) .collect::<Vec<_>>(); for dep in resolved.into_iter().flat_map(|s| s.into_iter()) { deps.insert(dep); } } else { let (dep, arch_spec) = get_architecture_specification(&word)?; if let Some(spec) = arch_spec { if match_architecture(spec, &self.architecture)? { deps.insert(dep); } } else { deps.insert(dep); } } } Ok(deps.into_iter().collect::<Vec<_>>().join(", ")) } pub fn resolve_assets(&mut self) -> CDResult<()> { for UnresolvedAsset { source_path, target_path, chmod, is_built } in self.assets.unresolved.drain(..) { let source_prefix: PathBuf = source_path.iter() .take_while(|part| !is_glob_pattern(part.to_str().unwrap())) .collect(); let source_is_glob = is_glob_pattern(source_path.to_str().unwrap()); let file_matches = glob::glob(source_path.to_str().expect("utf8 path"))? // Remove dirs from globs without throwing away errors .map(|entry| { let source_file = entry?; Ok(if source_file.is_dir() { None } else { Some(source_file) }) }) .filter_map(|res| match res { Ok(None) => None, Ok(Some(x)) => Some(Ok(x)), Err(x) => Some(Err(x)), }) .collect::<CDResult<Vec<_>>>()?; // If glob didn't match anything, it's likely an error // as all files should exist when called to resolve if file_matches.is_empty() { return Err(CargoDebError::AssetFileNotFound(source_path)); } for source_file in file_matches { // XXX: how do we handle duplicated assets? let target_file = if source_is_glob { target_path.join(source_file.strip_prefix(&source_prefix).unwrap()) } else { target_path.clone() }; self.assets.resolved.push(Asset::new( AssetSource::Path(source_file), target_file, chmod, is_built, )); } } Ok(()) } pub(crate) fn add_copyright_asset(&mut self) -> CDResult<()> { let copyright_file = crate::data::generate_copyright_asset(self)?; self.assets.resolved.push(Asset::new( AssetSource::Data(copyright_file), Path::new("usr/share/doc") .join(&self.deb_name) .join("copyright"), 0o644, false, )); Ok(()) } pub fn add_debug_assets(&mut self) { let mut assets_to_add: Vec<Asset> = Vec::new(); for asset in self.built_binaries().into_iter().filter(|a| a.source.path().is_some()) { let debug_source = asset.source.debug_source().expect("debug asset"); if debug_source.exists() { let debug_target = asset.debug_target().expect("debug asset"); assets_to_add.push(Asset::new( AssetSource::Path(debug_source), debug_target, 0o644, false, )); } } self.assets.resolved.append(&mut assets_to_add); } fn add_changelog_asset(&mut self) -> CDResult<()> { // The file is autogenerated later if self.changelog.is_some() { if let Some(changelog_file) = crate::data::generate_changelog_asset(self)? { self.assets.resolved.push(Asset::new( AssetSource::Data(changelog_file), Path::new("usr/share/doc") .join(&self.deb_name) .join("changelog.Debian.gz"), 0o644, false, )); } } Ok(()) } fn add_systemd_assets(&mut self) -> CDResult<()> { if let Some(ref config) = self.systemd_units { let units_dir_option = config.unit_scripts.as_ref() .or(self.maintainer_scripts.as_ref()); if let Some(unit_dir) = units_dir_option { let search_path = self.path_in_workspace(unit_dir); let package = &self.name; let unit_name = if let Some(ref unit_name) = config.unit_name { Some(unit_name.as_str()) } else { None }; let units = dh_installsystemd::find_units(&search_path, package, unit_name); for (source, target) in &units { self.assets.resolved.push(Asset::new( AssetSource::Path(source.clone()), target.path.clone(), target.mode, false, )); } } } Ok(()) } /// Executables AND dynamic libraries fn all_binaries(&self) -> Vec<&AssetSource> { self.binaries(false).iter().map(|asset| &asset.source).collect() } /// Executables AND dynamic libraries, but only in `target/release` pub(crate) fn built_binaries(&self) -> Vec<&Asset> { self.binaries(true) } fn binaries(&self, built_only: bool) -> Vec<&Asset> { self.assets .resolved .iter() .filter(|asset| { // Assumes files in build dir which have executable flag set are binaries (!built_only || asset.is_built) && (asset.is_dynamic_library() || asset.is_executable()) }) .collect() } /// Tries to guess type of source control used for the repo URL. /// It's a guess, and it won't be 100% accurate, because Cargo suggests using /// user-friendly URLs or webpages instead of tool-specific URL schemes. pub(crate) fn repository_type(&self) -> Option<&str> { if let Some(ref repo) = self.repository { if repo.starts_with("git+") || repo.ends_with(".git") || repo.contains("git@") || repo.contains("github.com") || repo.contains("gitlab.com") { return Some("Git"); } if repo.starts_with("cvs+") || repo.contains("pserver:") || repo.contains("@cvs.") { return Some("Cvs"); } if repo.starts_with("hg+") || repo.contains("hg@") || repo.contains("/hg.") { return Some("Hg"); } if repo.starts_with("svn+") || repo.contains("/svn.") { return Some("Svn"); } return None; } None } pub(crate) fn path_in_build<P: AsRef<Path>>(&self, rel_path: P) -> PathBuf { self.target_dir.join("release").join(rel_path) } pub(crate) fn path_in_workspace<P: AsRef<Path>>(&self, rel_path: P) -> PathBuf { self.manifest_dir.join(rel_path) } /// Store intermediate files here pub(crate) fn deb_temp_dir(&self) -> PathBuf { self.target_dir.join("debian").join(&self.name) } /// Save final .deb here pub(crate) fn deb_output_path(&self, filename: &str) -> PathBuf { if let Some(ref path_str) = self.deb_output_path { let path = Path::new(path_str); if path_str.ends_with('/') || path.is_dir() { path.join(filename) } else { path.to_owned() } } else { self.default_deb_output_dir().join(filename) } } pub(crate) fn default_deb_output_dir(&self) -> PathBuf { self.target_dir.join("debian") } pub(crate) fn cargo_config(&self) -> CDResult<Option<CargoConfig>> { CargoConfig::new(&self.target_dir) } } #[derive(Clone, Debug, Deserialize)] struct Cargo { pub package: cargo_toml::Package<CargoPackageMetadata>, pub profile: Option<cargo_toml::Profiles>, } impl Cargo { /// Convert Cargo.toml/metadata information into internal configu structure /// /// **IMPORTANT**: This function must not create or expect to see any files on disk! /// It's run before destination directory is cleaned up, and before the build start! /// fn into_config( mut self, root_package: &CargoMetadataPackage, manifest_dir: &Path, deb_output_path: Option<String>, target_dir: &Path, target: Option<&str>, variant: Option<&str>, deb_version: Option<String>, listener: &dyn Listener, ) -> CDResult<Config> { // Cargo cross-compiles to a dir let target_dir = if let Some(target) = target { target_dir.join(target) } else { target_dir.to_owned() }; // If we build against a variant use that config and change the package name let mut deb = if let Some(variant) = variant { // Use dash as underscore is not allowed in package names self.package.name = format!("{}-{}", self.package.name, variant); let mut deb = self.package .metadata .take() .and_then(|m| m.deb) .unwrap_or_else(CargoDeb::default); let variant = deb.variants .as_mut() .and_then(|v| v.remove(variant)) .ok_or_else(|| CargoDebError::VariantNotFound(variant.to_string()))?; variant.inherit_from(deb) } else { self.package .metadata .take() .and_then(|m| m.deb) .unwrap_or_else(CargoDeb::default) }; let (license_file, license_file_skip_lines) = self.license_file(deb.license_file.as_ref())?; let readme = self.package.readme.as_ref(); self.check_config(manifest_dir, readme, &deb, listener); let mut config = Config { manifest_dir: manifest_dir.to_owned(), deb_output_path, target: target.map(|t| t.to_string()), target_dir, name: self.package.name.clone(), deb_name: deb.name.take().unwrap_or_else(|| self.package.name.clone()), deb_version: deb_version.unwrap_or(self.version_string(deb.revision)), license: self.package.license.take(), license_file, license_file_skip_lines, copyright: deb.copyright.take().ok_or_then(|| { if self.package.authors.is_empty() { return Err("The package must have a copyright or authors property".into()); } Ok(self.package.authors.join(", ")) })?, homepage: self.package.homepage.clone(), documentation: self.package.documentation.clone(), repository: self.package.repository.take(), description: self.package.description.take().unwrap_or_else(||format!("[generated from Rust crate {}]", self.package.name)), extended_description: self.extended_description( deb.extended_description.take(), deb.extended_description_file.as_ref().or(readme))?, maintainer: deb.maintainer.take().ok_or_then(|| { Ok(self.package.authors.get(0) .ok_or("The package must have a maintainer or authors property")?.to_owned()) })?, depends: deb.depends.take().unwrap_or_else(|| "$auto".to_owned()), pre_depends: deb.pre_depends.take(), recommends: deb.recommends.take(), suggests: deb.suggests.take(), enhances: deb.enhances.take(), conflicts: deb.conflicts.take(), breaks: deb.breaks.take(), replaces: deb.replaces.take(), provides: deb.provides.take(), section: deb.section.take(), priority: deb.priority.take().unwrap_or_else(|| "optional".to_owned()), architecture: get_arch(target.unwrap_or(crate::DEFAULT_TARGET)).to_owned(), conf_files: deb.conf_files.map(|x| format_conffiles(&x)), assets: Assets::new(), triggers_file: deb.triggers_file.map(PathBuf::from), changelog: deb.changelog.take(), maintainer_scripts: deb.maintainer_scripts.map(PathBuf::from), features: deb.features.take().unwrap_or_default(), default_features: deb.default_features.unwrap_or(true), separate_debug_symbols: deb.separate_debug_symbols.unwrap_or(false), strip: self.profile.as_ref().and_then(|p|p.release.as_ref()) .and_then(|r| r.debug.as_ref()) .map_or(true, |debug| match *debug { toml::Value::Integer(0) => false, toml::Value::Boolean(value) => value, _ => true }), preserve_symlinks: deb.preserve_symlinks.unwrap_or(false), systemd_units: deb.systemd_units.take(), _use_constructor_to_make_this_struct_: (), }; let assets = self.take_assets(&config, deb.assets.take(), &root_package.targets, readme)?; if assets.is_empty() { return Err("No binaries or cdylibs found. The package is empty. Please specify some assets to package in Cargo.toml".into()); } config.assets = assets; config.add_copyright_asset()?; config.add_changelog_asset()?; config.add_systemd_assets()?; Ok(config) } fn check_config(&self, manifest_dir: &Path, readme: Option<&String>, deb: &CargoDeb, listener: &dyn Listener) { if self.package.description.is_none() { listener.warning("description field is missing in Cargo.toml".to_owned()); } if self.package.license.is_none() && self.package.license_file.is_none() { listener.warning("license field is missing in Cargo.toml".to_owned()); } if let Some(readme) = readme { if deb.extended_description.is_none() && deb.extended_description_file.is_none() && (readme.ends_with(".md") || readme.ends_with(".markdown")) { listener.warning(format!("extended-description field missing. Using {}, but markdown may not render well.",readme)); } } else { for p in &["README.md", "README.markdown", "README.txt", "README"] { if manifest_dir.join(p).exists() { listener.warning(format!("{} file exists, but is not specified in `readme` Cargo.toml field", p)); break; } } } } fn extended_description(&self, desc: Option<String>, desc_file: Option<&String>) -> CDResult<Option<String>> { Ok(if desc.is_some() { desc } else if let Some(desc_file) = desc_file { Some(fs::read_to_string(desc_file) .map_err(|err| CargoDebError::IoFile( "unable to read extended description from file", err, PathBuf::from(desc_file)))?) } else { None }) } fn license_file(&mut self, license_file: Option<&LicenseFile>) -> CDResult<(Option<PathBuf>, usize)> { Ok(match license_file { Some(LicenseFile::Vec(args)) => { let mut args = args.iter(); let file = args.next(); let lines = if let Some(lines) = args.next() { lines.parse().map_err(|e| CargoDebError::NumParse("invalid number of lines", e))? } else {0}; (file.map(|s|s.into()), lines) }, Some(LicenseFile::String(s)) => { (Some(s.into()), 0) } None => { (self.package.license_file.as_ref().map(|s| s.into()), 0) } }) } fn take_assets(&self, options: &Config, assets: Option<Vec<Vec<String>>>, targets: &[CargoMetadataTarget], readme: Option<&String>) -> CDResult<Assets> { Ok(if let Some(assets) = assets { // Treat all explicit assets as unresolved until after the build step let mut unresolved_assets = vec![]; for mut asset_line in assets { let mut asset_parts = asset_line.drain(..); let source_path = PathBuf::from(asset_parts.next() .ok_or("missing path (first array entry) for asset in Cargo.toml")?); let (is_built, source_path) = if let Ok(rel_path) = source_path.strip_prefix("target/release") { (true, options.path_in_build(rel_path)) } else { (false, options.path_in_workspace(&source_path)) }; let target_path = PathBuf::from(asset_parts.next().ok_or("missing target (second array entry) for asset in Cargo.toml")?); let chmod = u32::from_str_radix(&asset_parts.next().ok_or("missing chmod (third array entry) for asset in Cargo.toml")?, 8) .map_err(|e| CargoDebError::NumParse("unable to parse chmod argument", e))?; unresolved_assets.push(UnresolvedAsset { source_path, target_path, chmod, is_built, }) } Assets::with_unresolved_assets(unresolved_assets) } else { let mut implied_assets: Vec<_> = targets .iter() .filter_map(|t| { if t.crate_types.iter().any(|ty| ty == "bin") && t.kind.iter().any(|k| k == "bin") { Some(Asset::new( AssetSource::Path(options.path_in_build(&t.name)), Path::new("usr/bin").join(&t.name), 0o755, true, )) } else if t.crate_types.iter().any(|ty| ty == "cdylib") && t.kind.iter().any(|k| k == "cdylib") { // FIXME: std has constants for the host arch, but not for cross-compilation let lib_name = format!("{}{}{}", DLL_PREFIX, t.name, DLL_SUFFIX); Some(Asset::new( AssetSource::Path(options.path_in_build(&lib_name)), Path::new("usr/lib").join(lib_name), 0o644, true, )) } else { None } }) .collect(); if let Some(readme) = readme { let target_path = Path::new("usr/share/doc").join(&self.package.name).join(readme); implied_assets.push(Asset::new( AssetSource::Path(PathBuf::from(readme)), target_path, 0o644, false, )); } Assets::with_resolved_assets(implied_assets) }) } /// Debian-compatible version of the semver version fn version_string(&self, revision: Option<String>) -> String { let debianized_version; let mut version = &self.package.version; // Make debian's version ordering (newer versions) more compatible with semver's. // Keep "semver-1" and "semver-xxx" as-is (assuming these are irrelevant, or debian revision already), // but change "semver-beta.1" to "semver~beta.1" let mut parts = version.splitn(2, '-'); let semver_main = parts.next().unwrap(); if let Some(semver_pre) = parts.next() { let pre_ascii = semver_pre.as_bytes(); if pre_ascii.iter().any(|c| !c.is_ascii_digit()) && pre_ascii.iter().any(|c| c.is_ascii_digit()) { debianized_version = format!("{}~{}", semver_main, semver_pre); version = &debianized_version; } } if let Some(revision) = revision { format!("{}-{}", version, revision) } else { version.to_owned() } } } #[derive(Clone, Debug, Deserialize)] struct CargoPackageMetadata { pub deb: Option<CargoDeb>, } #[derive(Clone, Debug, Deserialize)] #[serde(untagged)] enum LicenseFile { String(String), Vec(Vec<String>), } #[derive(Clone, Debug, Deserialize, Default)] #[serde(rename_all = "kebab-case", deny_unknown_fields)] struct CargoDeb { pub name: Option<String>, pub maintainer: Option<String>, pub copyright: Option<String>, pub license_file: Option<LicenseFile>, pub changelog: Option<String>, pub depends: Option<String>, pub pre_depends: Option<String>, pub recommends: Option<String>, pub suggests: Option<String>, pub enhances: Option<String>, pub conflicts: Option<String>, pub breaks: Option<String>, pub replaces: Option<String>, pub provides: Option<String>, pub extended_description: Option<String>, pub extended_description_file: Option<String>, pub section: Option<String>, pub priority: Option<String>, pub revision: Option<String>, pub conf_files: Option<Vec<String>>, pub assets: Option<Vec<Vec<String>>>, pub triggers_file: Option<String>, pub maintainer_scripts: Option<String>, pub features: Option<Vec<String>>, pub default_features: Option<bool>, pub separate_debug_symbols: Option<bool>, pub preserve_symlinks: Option<bool>, pub systemd_units: Option<SystemdUnitsConfig>, pub variants: Option<HashMap<String, CargoDeb>>, } impl CargoDeb { fn inherit_from(self, parent: CargoDeb) -> CargoDeb { CargoDeb { name: self.name.or(parent.name), maintainer: self.maintainer.or(parent.maintainer), copyright: self.copyright.or(parent.copyright), license_file: self.license_file.or(parent.license_file), changelog: self.changelog.or(parent.changelog), depends: self.depends.or(parent.depends), pre_depends: self.pre_depends.or(parent.pre_depends), recommends: self.recommends.or(parent.recommends), suggests: self.suggests.or(parent.suggests), enhances: self.enhances.or(parent.enhances), conflicts: self.conflicts.or(parent.conflicts), breaks: self.breaks.or(parent.breaks), replaces: self.replaces.or(parent.replaces), provides: self.provides.or(parent.provides), extended_description: self.extended_description.or(parent.extended_description), extended_description_file: self.extended_description_file.or(parent.extended_description_file), section: self.section.or(parent.section), priority: self.priority.or(parent.priority), revision: self.revision.or(parent.revision), conf_files: self.conf_files.or(parent.conf_files), assets: self.assets.or(parent.assets), triggers_file: self.triggers_file.or(parent.triggers_file), maintainer_scripts: self.maintainer_scripts.or(parent.maintainer_scripts), features: self.features.or(parent.features), default_features: self.default_features.or(parent.default_features), separate_debug_symbols: self.separate_debug_symbols.or(parent.separate_debug_symbols), preserve_symlinks: self.preserve_symlinks.or(parent.preserve_symlinks), systemd_units: self.systemd_units.or(parent.systemd_units), variants: self.variants.or(parent.variants), } } } #[derive(Deserialize)] struct CargoMetadata { packages: Vec<CargoMetadataPackage>, resolve: CargoMetadataResolve, #[serde(default)] workspace_members: Vec<String>, target_directory: String, } #[derive(Deserialize)] struct CargoMetadataResolve { root: Option<String>, } #[derive(Deserialize)] struct CargoMetadataPackage { pub id: String, pub name: String, pub targets: Vec<CargoMetadataTarget>, pub manifest_path: String, } #[derive(Deserialize)] struct CargoMetadataTarget { pub name: String, pub kind: Vec<String>, pub crate_types: Vec<String>, } /// Returns the path of the `Cargo.toml` that we want to build. fn cargo_metadata(manifest_path: &Path) -> CDResult<CargoMetadata> { let mut cmd = Command::new("cargo"); cmd.arg("metadata"); cmd.arg("--format-version=1"); cmd.arg(format!("--manifest-path={}", manifest_path.display())); let output = cmd.output() .map_err(|e| CargoDebError::CommandFailed(e, "cargo (is it in your PATH?)"))?; if !output.status.success() { return Err(CargoDebError::CommandError("cargo", "metadata".to_owned(), output.stderr)); } let stdout = String::from_utf8(output.stdout).unwrap(); let metadata = serde_json::from_str(&stdout)?; Ok(metadata) } /// Debianizes the architecture name pub(crate) fn get_arch(target: &str) -> &str { let mut parts = target.split('-'); let arch = parts.next().unwrap(); let abi = parts.last().unwrap_or(""); match (arch, abi) { // https://wiki.debian.org/Multiarch/Tuples // rustc --print target-list // https://doc.rust-lang.org/std/env/consts/constant.ARCH.html ("aarch64", _) => "arm64", ("mips64", "gnuabin32") => "mipsn32", ("mips64el", "gnuabin32") => "mipsn32el", ("mipsisa32r6", _) => "mipsr6", ("mipsisa32r6el", _) => "mipsr6el", ("mipsisa64r6", "gnuabi64") => "mips64r6", ("mipsisa64r6", "gnuabin32") => "mipsn32r6", ("mipsisa64r6el", "gnuabi64") => "mips64r6el", ("mipsisa64r6el", "gnuabin32") => "mipsn32r6el", ("powerpc", "gnuspe") => "powerpcspe", ("powerpc64", _) => "ppc64", ("powerpc64le", _) => "ppc64el", ("riscv64gc", _) => "riscv64", ("i586", _) | ("i686", _) | ("x86", _) => "i386", ("x86_64", "gnux32") => "x32", ("x86_64", _) => "amd64", (arm, gnueabi) if arm.starts_with("arm") && gnueabi.ends_with("hf") => "armhf", (arm, _) if arm.starts_with("arm") => "armel", (other_arch, _) => other_arch, } } /// Format conffiles section, ensuring each path has a leading slash /// /// Starting with [dpkg 1.20.1](https://github.com/guillemj/dpkg/blob/68ab722604217d3ab836276acfc0ae1260b28f5f/debian/changelog#L393), /// which is what Ubuntu 21.04 uses, relative conf-files are no longer /// accepted (the deb-conffiles man page states that "they should be listed as /// absolute pathnames"). So we prepend a leading slash to the given strings /// as needed fn format_conffiles<S: AsRef<str>>(files: &[S]) -> String { files.iter().fold(String::new(), |mut acc, x| { let pth = x.as_ref(); if !pth.starts_with('/') { acc.push('/'); } acc + pth + "\n" }) } #[cfg(test)] mod tests { use super::*; use crate::util::tests::add_test_fs_paths; #[test] fn match_arm_arch() { assert_eq!("armhf", get_arch("arm-unknown-linux-gnueabihf")); } #[test] fn arch_spec() { use ArchSpec::*; // req assert_eq!( get_architecture_specification("libjpeg64-turbo [armhf]").expect("arch"), ("libjpeg64-turbo".to_owned(), Some(Require("armhf".to_owned())))); // neg assert_eq!( get_architecture_specification("libjpeg64-turbo [!amd64]").expect("arch"), ("libjpeg64-turbo".to_owned(), Some(NegRequire("amd64".to_owned())))); } #[test] fn assets() { let a = Asset::new( AssetSource::Path(PathBuf::from("target/release/bar")), PathBuf::from("baz/"), 0o644, true, ); assert_eq!("baz/bar", a.target_path.to_str().unwrap()); assert!(a.is_built); let a = Asset::new( AssetSource::Path(PathBuf::from("foo/bar")), PathBuf::from("/baz/quz"), 0o644, false, ); assert_eq!("baz/quz", a.target_path.to_str().unwrap()); assert!(!a.is_built); } /// Tests that getting the debug filename from a path returns the same path /// with ".debug" appended #[test] fn test_debug_filename() { let path = Path::new("/my/test/file"); assert_eq!(debug_filename(path), Path::new("/my/test/file.debug")); } /// Tests that getting the debug target for an Asset that `is_built` returns /// the path "/usr/lib/debug/<path-to-target>.debug" #[test] fn test_debug_target_ok() { let a = Asset::new( AssetSource::Path(PathBuf::from("target/release/bar")), PathBuf::from("/usr/bin/baz/"), 0o644, true, ); let debug_target = a.debug_target().expect("Got unexpected None"); assert_eq!(debug_target, Path::new("/usr/lib/debug/usr/bin/baz/bar.debug")); } /// Tests that getting the debug target for an Asset that `is_built` and that /// has a relative path target returns the path "/usr/lib/debug/<path-to-target>.debug" #[test] fn test_debug_target_ok_relative() { let a = Asset::new( AssetSource::Path(PathBuf::from("target/release/bar")), PathBuf::from("baz/"), 0o644, true, ); let debug_target = a.debug_target().expect("Got unexpected None"); assert_eq!(debug_target, Path::new("/usr/lib/debug/baz/bar.debug")); } /// Tests that getting the debug target for an Asset that with `is_built` false /// returns None #[test] fn test_debug_target_not_built() { let a = Asset::new( AssetSource::Path(PathBuf::from("target/release/bar")), PathBuf::from("baz/"), 0o644, false, ); assert_eq!(a.debug_target(), None); } /// Tests that debug_source() for an AssetSource::Path returns the same path /// but with ".debug" appended #[test] fn test_debug_source_path() { let a = AssetSource::Path(PathBuf::from("target/release/bar")); let debug_source = a.debug_source().expect("Got unexpected None"); assert_eq!(debug_source, Path::new("target/release/bar.debug")); } /// Tests that debug_source() for an AssetSource::Data returns None #[test] fn test_debug_source_data() { let data: Vec<u8> = Vec::new(); let a = AssetSource::Data(data); assert_eq!(a.debug_source(), None); } fn to_canon_static_str(s: &str) -> &'static str { let cwd = std::env::current_dir().unwrap(); let abs_path = cwd.join(s); let abs_path_string = abs_path.to_string_lossy().into_owned(); Box::leak(abs_path_string.into_boxed_str()) } #[test] fn add_systemd_assets_with_no_config_does_nothing() { let mut mock_listener = crate::listener::MockListener::new(); mock_listener.expect_info().return_const(()); // supply a systemd unit file as if it were available on disk let _g = add_test_fs_paths(&vec![to_canon_static_str("cargo-deb.service")]); let config = Config::from_manifest(Path::new("Cargo.toml"), None, None, None, None, None, &mut mock_listener).unwrap(); let num_unit_assets = config.assets.resolved .iter() .filter(|v| v.target_path.starts_with("lib/systemd/system/")) .count(); assert_eq!(0, num_unit_assets); } #[test] fn add_systemd_assets_with_config_adds_unit_assets() { let mut mock_listener = crate::listener::MockListener::new(); mock_listener.expect_info().return_const(()); // supply a systemd unit file as if it were available on disk let _g = add_test_fs_paths(&vec![to_canon_static_str("cargo-deb.service")]); let mut config = Config::from_manifest(Path::new("Cargo.toml"), None, None, None, None, None, &mut mock_listener).unwrap(); config.systemd_units.get_or_insert(SystemdUnitsConfig::default()); config.maintainer_scripts.get_or_insert(PathBuf::new()); config.add_systemd_assets().unwrap(); let num_unit_assets = config.assets.resolved .iter() .filter(|v| v.target_path.starts_with("lib/systemd/system/")) .count(); assert_eq!(1, num_unit_assets); } #[test] fn format_conffiles_empty() { let actual = format_conffiles::<String>(&[]); assert_eq!("", actual); } #[test] fn format_conffiles_one() { let actual = format_conffiles(&["/etc/my-pkg/conf.toml"]); assert_eq!("/etc/my-pkg/conf.toml\n", actual); } #[test] fn format_conffiles_multiple() { let actual = format_conffiles(&[ "/etc/my-pkg/conf.toml", "etc/my-pkg/conf2.toml" ]); assert_eq!("/etc/my-pkg/conf.toml\n/etc/my-pkg/conf2.toml\n", actual); } } #[test] fn deb_ver() { let mut c = Cargo { package: cargo_toml::Package { version: "1.2.3-1".into(), authors: vec![], autobenches: false, autobins: false, autotests: false, autoexamples: false, categories: vec![], name: "test".into(), edition: Default::default(), homepage: None, keywords: vec![], publish: Default::default(), repository: None, workspace: None, license: None, license_file: None, links: None, metadata: None, readme: None, documentation: None, description: Default::default(), build: None, default_run: None, resolver: None, }, profile: None, }; assert_eq!("1.2.3-1", c.version_string(None)); assert_eq!("1.2.3-1-2", c.version_string(Some("2".into()))); c.package.version = "1.2.0-beta.3".into(); assert_eq!("1.2.0~beta.3", c.version_string(None)); assert_eq!("1.2.0~beta.3-4", c.version_string(Some("4".into()))); c.package.version = "1.2.0-new".into(); assert_eq!("1.2.0-new", c.version_string(None)); assert_eq!("1.2.0-new-11", c.version_string(Some("11".into()))); }
38.018561
224
0.574494
03d3483902dcf87b899aefcbe1f2a66ea07d2a30
25,565
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use prelude::v1::*; use cell::UnsafeCell; use fmt; use marker; use mem; use ops::{Deref, DerefMut}; use ptr; use sys_common::poison::{self, LockResult, TryLockError, TryLockResult}; use sys_common::rwlock as sys; /// A reader-writer lock /// /// This type of lock allows a number of readers or at most one writer at any /// point in time. The write portion of this lock typically allows modification /// of the underlying data (exclusive access) and the read portion of this lock /// typically allows for read-only access (shared access). /// /// The priority policy of the lock is dependent on the underlying operating /// system's implementation, and this type does not guarantee that any /// particular policy will be used. /// /// The type parameter `T` represents the data that this lock protects. It is /// required that `T` satisfies `Send` to be shared across threads and `Sync` to /// allow concurrent access through readers. The RAII guards returned from the /// locking methods implement `Deref` (and `DerefMut` for the `write` methods) /// to allow access to the contained of the lock. /// /// # Poisoning /// /// An `RwLock`, like `Mutex`, will become poisoned on a panic. Note, however, /// that an `RwLock` may only be poisoned if a panic occurs while it is locked /// exclusively (write mode). If a panic occurs in any reader, then the lock /// will not be poisoned. /// /// # Examples /// /// ``` /// use std::sync::RwLock; /// /// let lock = RwLock::new(5); /// /// // many reader locks can be held at once /// { /// let r1 = lock.read().unwrap(); /// let r2 = lock.read().unwrap(); /// assert_eq!(*r1, 5); /// assert_eq!(*r2, 5); /// } // read locks are dropped at this point /// /// // only one write lock may be held, however /// { /// let mut w = lock.write().unwrap(); /// *w += 1; /// assert_eq!(*w, 6); /// } // write lock is dropped here /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[allow(deprecated)] pub struct RwLock<T: ?Sized> { inner: Box<StaticRwLock>, data: UnsafeCell<T>, } #[stable(feature = "rust1", since = "1.0.0")] unsafe impl<T: ?Sized + Send + Sync> Send for RwLock<T> {} #[stable(feature = "rust1", since = "1.0.0")] unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {} /// Structure representing a statically allocated RwLock. /// /// This structure is intended to be used inside of a `static` and will provide /// automatic global access as well as lazy initialization. The internal /// resources of this RwLock, however, must be manually deallocated. /// /// # Examples /// /// ``` /// #![feature(static_rwlock)] /// /// use std::sync::{StaticRwLock, RW_LOCK_INIT}; /// /// static LOCK: StaticRwLock = RW_LOCK_INIT; /// /// { /// let _g = LOCK.read().unwrap(); /// // ... shared read access /// } /// { /// let _g = LOCK.write().unwrap(); /// // ... exclusive write access /// } /// unsafe { LOCK.destroy() } // free all resources /// ``` #[unstable(feature = "static_rwlock", reason = "may be merged with RwLock in the future", issue = "27717")] #[rustc_deprecated(since = "1.10.0", reason = "the lazy-static crate suffices for static sync \ primitives and eventually this type shouldn't \ be necessary as `RwLock::new` in a static should \ suffice")] pub struct StaticRwLock { lock: sys::RWLock, poison: poison::Flag, } /// Constant initialization for a statically-initialized rwlock. #[unstable(feature = "static_rwlock", reason = "may be merged with RwLock in the future", issue = "27717")] #[rustc_deprecated(since = "1.10.0", reason = "the lazy-static crate suffices for static sync \ primitives and eventually this type shouldn't \ be necessary as `RwLock::new` in a static should \ suffice")] #[allow(deprecated)] pub const RW_LOCK_INIT: StaticRwLock = StaticRwLock::new(); /// RAII structure used to release the shared read access of a lock when /// dropped. #[must_use] #[stable(feature = "rust1", since = "1.0.0")] #[allow(deprecated)] pub struct RwLockReadGuard<'a, T: ?Sized + 'a> { __lock: &'a StaticRwLock, __data: &'a T, } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: ?Sized> !marker::Send for RwLockReadGuard<'a, T> {} /// RAII structure used to release the exclusive write access of a lock when /// dropped. #[must_use] #[stable(feature = "rust1", since = "1.0.0")] #[allow(deprecated)] pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> { __lock: &'a StaticRwLock, __data: &'a mut T, __poison: poison::Guard, } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: ?Sized> !marker::Send for RwLockWriteGuard<'a, T> {} #[allow(deprecated)] impl<T> RwLock<T> { /// Creates a new instance of an `RwLock<T>` which is unlocked. /// /// # Examples /// /// ``` /// use std::sync::RwLock; /// /// let lock = RwLock::new(5); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn new(t: T) -> RwLock<T> { RwLock { inner: box StaticRwLock::new(), data: UnsafeCell::new(t) } } } #[allow(deprecated)] impl<T: ?Sized> RwLock<T> { /// Locks this rwlock with shared read access, blocking the current thread /// until it can be acquired. /// /// The calling thread will be blocked until there are no more writers which /// hold the lock. There may be other readers currently inside the lock when /// this method returns. This method does not provide any guarantees with /// respect to the ordering of whether contentious readers or writers will /// acquire the lock first. /// /// Returns an RAII guard which will release this thread's shared access /// once it is dropped. /// /// # Errors /// /// This function will return an error if the RwLock is poisoned. An RwLock /// is poisoned whenever a writer panics while holding an exclusive lock. /// The failure will occur immediately after the lock has been acquired. #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn read(&self) -> LockResult<RwLockReadGuard<T>> { unsafe { self.inner.lock.read(); RwLockReadGuard::new(&*self.inner, &self.data) } } /// Attempts to acquire this rwlock with shared read access. /// /// If the access could not be granted at this time, then `Err` is returned. /// Otherwise, an RAII guard is returned which will release the shared access /// when it is dropped. /// /// This function does not block. /// /// This function does not provide any guarantees with respect to the ordering /// of whether contentious readers or writers will acquire the lock first. /// /// # Errors /// /// This function will return an error if the RwLock is poisoned. An RwLock /// is poisoned whenever a writer panics while holding an exclusive lock. An /// error will only be returned if the lock would have otherwise been /// acquired. #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn try_read(&self) -> TryLockResult<RwLockReadGuard<T>> { unsafe { if self.inner.lock.try_read() { Ok(RwLockReadGuard::new(&*self.inner, &self.data)?) } else { Err(TryLockError::WouldBlock) } } } /// Locks this rwlock with exclusive write access, blocking the current /// thread until it can be acquired. /// /// This function will not return while other writers or other readers /// currently have access to the lock. /// /// Returns an RAII guard which will drop the write access of this rwlock /// when dropped. /// /// # Errors /// /// This function will return an error if the RwLock is poisoned. An RwLock /// is poisoned whenever a writer panics while holding an exclusive lock. /// An error will be returned when the lock is acquired. #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn write(&self) -> LockResult<RwLockWriteGuard<T>> { unsafe { self.inner.lock.write(); RwLockWriteGuard::new(&*self.inner, &self.data) } } /// Attempts to lock this rwlock with exclusive write access. /// /// If the lock could not be acquired at this time, then `Err` is returned. /// Otherwise, an RAII guard is returned which will release the lock when /// it is dropped. /// /// This function does not block. /// /// This function does not provide any guarantees with respect to the ordering /// of whether contentious readers or writers will acquire the lock first. /// /// # Errors /// /// This function will return an error if the RwLock is poisoned. An RwLock /// is poisoned whenever a writer panics while holding an exclusive lock. An /// error will only be returned if the lock would have otherwise been /// acquired. #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<T>> { unsafe { if self.inner.lock.try_write() { Ok(RwLockWriteGuard::new(&*self.inner, &self.data)?) } else { Err(TryLockError::WouldBlock) } } } /// Determines whether the lock is poisoned. /// /// If another thread is active, the lock can still become poisoned at any /// time. You should not trust a `false` value for program correctness /// without additional synchronization. #[inline] #[stable(feature = "sync_poison", since = "1.2.0")] pub fn is_poisoned(&self) -> bool { self.inner.poison.get() } /// Consumes this `RwLock`, returning the underlying data. /// /// # Errors /// /// This function will return an error if the RwLock is poisoned. An RwLock /// is poisoned whenever a writer panics while holding an exclusive lock. An /// error will only be returned if the lock would have otherwise been /// acquired. #[stable(feature = "rwlock_into_inner", since = "1.6.0")] pub fn into_inner(self) -> LockResult<T> where T: Sized { // We know statically that there are no outstanding references to // `self` so there's no need to lock the inner StaticRwLock. // // To get the inner value, we'd like to call `data.into_inner()`, // but because `RwLock` impl-s `Drop`, we can't move out of it, so // we'll have to destructure it manually instead. unsafe { // Like `let RwLock { inner, data } = self`. let (inner, data) = { let RwLock { ref inner, ref data } = self; (ptr::read(inner), ptr::read(data)) }; mem::forget(self); inner.lock.destroy(); // Keep in sync with the `Drop` impl. poison::map_result(inner.poison.borrow(), |_| data.into_inner()) } } /// Returns a mutable reference to the underlying data. /// /// Since this call borrows the `RwLock` mutably, no actual locking needs to /// take place---the mutable borrow statically guarantees no locks exist. /// /// # Errors /// /// This function will return an error if the RwLock is poisoned. An RwLock /// is poisoned whenever a writer panics while holding an exclusive lock. An /// error will only be returned if the lock would have otherwise been /// acquired. #[stable(feature = "rwlock_get_mut", since = "1.6.0")] pub fn get_mut(&mut self) -> LockResult<&mut T> { // We know statically that there are no other references to `self`, so // there's no need to lock the inner StaticRwLock. let data = unsafe { &mut *self.data.get() }; poison::map_result(self.inner.poison.borrow(), |_| data ) } } #[stable(feature = "rust1", since = "1.0.0")] #[allow(deprecated)] impl<T: ?Sized> Drop for RwLock<T> { #[unsafe_destructor_blind_to_params] fn drop(&mut self) { // IMPORTANT: This code needs to be kept in sync with `RwLock::into_inner`. unsafe { self.inner.lock.destroy() } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self.try_read() { Ok(guard) => write!(f, "RwLock {{ data: {:?} }}", &*guard), Err(TryLockError::Poisoned(err)) => { write!(f, "RwLock {{ data: Poisoned({:?}) }}", &**err.get_ref()) }, Err(TryLockError::WouldBlock) => write!(f, "RwLock {{ <locked> }}") } } } #[stable(feature = "rw_lock_default", since = "1.9.0")] impl<T: Default> Default for RwLock<T> { fn default() -> RwLock<T> { RwLock::new(Default::default()) } } struct Dummy(UnsafeCell<()>); unsafe impl Sync for Dummy {} static DUMMY: Dummy = Dummy(UnsafeCell::new(())); #[unstable(feature = "static_rwlock", reason = "may be merged with RwLock in the future", issue = "27717")] #[rustc_deprecated(since = "1.10.0", reason = "the lazy-static crate suffices for static sync \ primitives and eventually this type shouldn't \ be necessary as `RwLock::new` in a static should \ suffice")] #[allow(deprecated)] impl StaticRwLock { /// Creates a new rwlock. pub const fn new() -> StaticRwLock { StaticRwLock { lock: sys::RWLock::new(), poison: poison::Flag::new(), } } /// Locks this rwlock with shared read access, blocking the current thread /// until it can be acquired. /// /// See `RwLock::read`. #[inline] pub fn read(&'static self) -> LockResult<RwLockReadGuard<'static, ()>> { unsafe { self.lock.read(); RwLockReadGuard::new(self, &DUMMY.0) } } /// Attempts to acquire this lock with shared read access. /// /// See `RwLock::try_read`. #[inline] pub fn try_read(&'static self) -> TryLockResult<RwLockReadGuard<'static, ()>> { unsafe { if self.lock.try_read(){ Ok(RwLockReadGuard::new(self, &DUMMY.0)?) } else { Err(TryLockError::WouldBlock) } } } /// Locks this rwlock with exclusive write access, blocking the current /// thread until it can be acquired. /// /// See `RwLock::write`. #[inline] pub fn write(&'static self) -> LockResult<RwLockWriteGuard<'static, ()>> { unsafe { self.lock.write(); RwLockWriteGuard::new(self, &DUMMY.0) } } /// Attempts to lock this rwlock with exclusive write access. /// /// See `RwLock::try_write`. #[inline] pub fn try_write(&'static self) -> TryLockResult<RwLockWriteGuard<'static, ()>> { unsafe { if self.lock.try_write() { Ok(RwLockWriteGuard::new(self, &DUMMY.0)?) } else { Err(TryLockError::WouldBlock) } } } /// Deallocates all resources associated with this static lock. /// /// This method is unsafe to call as there is no guarantee that there are no /// active users of the lock, and this also doesn't prevent any future users /// of this lock. This method is required to be called to not leak memory on /// all platforms. pub unsafe fn destroy(&'static self) { self.lock.destroy() } } #[allow(deprecated)] impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> { unsafe fn new(lock: &'rwlock StaticRwLock, data: &'rwlock UnsafeCell<T>) -> LockResult<RwLockReadGuard<'rwlock, T>> { poison::map_result(lock.poison.borrow(), |_| { RwLockReadGuard { __lock: lock, __data: &*data.get(), } }) } } #[allow(deprecated)] impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> { unsafe fn new(lock: &'rwlock StaticRwLock, data: &'rwlock UnsafeCell<T>) -> LockResult<RwLockWriteGuard<'rwlock, T>> { poison::map_result(lock.poison.borrow(), |guard| { RwLockWriteGuard { __lock: lock, __data: &mut *data.get(), __poison: guard, } }) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> { type Target = T; fn deref(&self) -> &T { self.__data } } #[stable(feature = "rust1", since = "1.0.0")] impl<'rwlock, T: ?Sized> Deref for RwLockWriteGuard<'rwlock, T> { type Target = T; fn deref(&self) -> &T { self.__data } } #[stable(feature = "rust1", since = "1.0.0")] impl<'rwlock, T: ?Sized> DerefMut for RwLockWriteGuard<'rwlock, T> { fn deref_mut(&mut self) -> &mut T { self.__data } } #[allow(deprecated)] #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: ?Sized> Drop for RwLockReadGuard<'a, T> { fn drop(&mut self) { unsafe { self.__lock.lock.read_unlock(); } } } #[allow(deprecated)] #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: ?Sized> Drop for RwLockWriteGuard<'a, T> { fn drop(&mut self) { self.__lock.poison.done(&self.__poison); unsafe { self.__lock.lock.write_unlock(); } } } #[cfg(test)] #[allow(deprecated)] mod tests { #![allow(deprecated)] // rand use prelude::v1::*; use rand::{self, Rng}; use sync::mpsc::channel; use thread; use sync::{Arc, RwLock, StaticRwLock, TryLockError}; use sync::atomic::{AtomicUsize, Ordering}; #[derive(Eq, PartialEq, Debug)] struct NonCopy(i32); #[test] fn smoke() { let l = RwLock::new(()); drop(l.read().unwrap()); drop(l.write().unwrap()); drop((l.read().unwrap(), l.read().unwrap())); drop(l.write().unwrap()); } #[test] fn static_smoke() { static R: StaticRwLock = StaticRwLock::new(); drop(R.read().unwrap()); drop(R.write().unwrap()); drop((R.read().unwrap(), R.read().unwrap())); drop(R.write().unwrap()); unsafe { R.destroy(); } } #[test] fn frob() { static R: StaticRwLock = StaticRwLock::new(); const N: usize = 10; const M: usize = 1000; let (tx, rx) = channel::<()>(); for _ in 0..N { let tx = tx.clone(); thread::spawn(move|| { let mut rng = rand::thread_rng(); for _ in 0..M { if rng.gen_weighted_bool(N) { drop(R.write().unwrap()); } else { drop(R.read().unwrap()); } } drop(tx); }); } drop(tx); let _ = rx.recv(); unsafe { R.destroy(); } } #[test] fn test_rw_arc_poison_wr() { let arc = Arc::new(RwLock::new(1)); let arc2 = arc.clone(); let _: Result<(), _> = thread::spawn(move|| { let _lock = arc2.write().unwrap(); panic!(); }).join(); assert!(arc.read().is_err()); } #[test] fn test_rw_arc_poison_ww() { let arc = Arc::new(RwLock::new(1)); assert!(!arc.is_poisoned()); let arc2 = arc.clone(); let _: Result<(), _> = thread::spawn(move|| { let _lock = arc2.write().unwrap(); panic!(); }).join(); assert!(arc.write().is_err()); assert!(arc.is_poisoned()); } #[test] fn test_rw_arc_no_poison_rr() { let arc = Arc::new(RwLock::new(1)); let arc2 = arc.clone(); let _: Result<(), _> = thread::spawn(move|| { let _lock = arc2.read().unwrap(); panic!(); }).join(); let lock = arc.read().unwrap(); assert_eq!(*lock, 1); } #[test] fn test_rw_arc_no_poison_rw() { let arc = Arc::new(RwLock::new(1)); let arc2 = arc.clone(); let _: Result<(), _> = thread::spawn(move|| { let _lock = arc2.read().unwrap(); panic!() }).join(); let lock = arc.write().unwrap(); assert_eq!(*lock, 1); } #[test] fn test_rw_arc() { let arc = Arc::new(RwLock::new(0)); let arc2 = arc.clone(); let (tx, rx) = channel(); thread::spawn(move|| { let mut lock = arc2.write().unwrap(); for _ in 0..10 { let tmp = *lock; *lock = -1; thread::yield_now(); *lock = tmp + 1; } tx.send(()).unwrap(); }); // Readers try to catch the writer in the act let mut children = Vec::new(); for _ in 0..5 { let arc3 = arc.clone(); children.push(thread::spawn(move|| { let lock = arc3.read().unwrap(); assert!(*lock >= 0); })); } // Wait for children to pass their asserts for r in children { assert!(r.join().is_ok()); } // Wait for writer to finish rx.recv().unwrap(); let lock = arc.read().unwrap(); assert_eq!(*lock, 10); } #[test] fn test_rw_arc_access_in_unwind() { let arc = Arc::new(RwLock::new(1)); let arc2 = arc.clone(); let _ = thread::spawn(move|| -> () { struct Unwinder { i: Arc<RwLock<isize>>, } impl Drop for Unwinder { fn drop(&mut self) { let mut lock = self.i.write().unwrap(); *lock += 1; } } let _u = Unwinder { i: arc2 }; panic!(); }).join(); let lock = arc.read().unwrap(); assert_eq!(*lock, 2); } #[test] fn test_rwlock_unsized() { let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]); { let b = &mut *rw.write().unwrap(); b[0] = 4; b[2] = 5; } let comp: &[i32] = &[4, 2, 5]; assert_eq!(&*rw.read().unwrap(), comp); } #[test] fn test_rwlock_try_write() { use mem::drop; let lock = RwLock::new(0isize); let read_guard = lock.read().unwrap(); let write_result = lock.try_write(); match write_result { Err(TryLockError::WouldBlock) => (), Ok(_) => assert!(false, "try_write should not succeed while read_guard is in scope"), Err(_) => assert!(false, "unexpected error"), } drop(read_guard); } #[test] fn test_into_inner() { let m = RwLock::new(NonCopy(10)); assert_eq!(m.into_inner().unwrap(), NonCopy(10)); } #[test] fn test_into_inner_drop() { struct Foo(Arc<AtomicUsize>); impl Drop for Foo { fn drop(&mut self) { self.0.fetch_add(1, Ordering::SeqCst); } } let num_drops = Arc::new(AtomicUsize::new(0)); let m = RwLock::new(Foo(num_drops.clone())); assert_eq!(num_drops.load(Ordering::SeqCst), 0); { let _inner = m.into_inner().unwrap(); assert_eq!(num_drops.load(Ordering::SeqCst), 0); } assert_eq!(num_drops.load(Ordering::SeqCst), 1); } #[test] fn test_into_inner_poison() { let m = Arc::new(RwLock::new(NonCopy(10))); let m2 = m.clone(); let _ = thread::spawn(move || { let _lock = m2.write().unwrap(); panic!("test panic in inner thread to poison RwLock"); }).join(); assert!(m.is_poisoned()); match Arc::try_unwrap(m).unwrap().into_inner() { Err(e) => assert_eq!(e.into_inner(), NonCopy(10)), Ok(x) => panic!("into_inner of poisoned RwLock is Ok: {:?}", x), } } #[test] fn test_get_mut() { let mut m = RwLock::new(NonCopy(10)); *m.get_mut().unwrap() = NonCopy(20); assert_eq!(m.into_inner().unwrap(), NonCopy(20)); } #[test] fn test_get_mut_poison() { let m = Arc::new(RwLock::new(NonCopy(10))); let m2 = m.clone(); let _ = thread::spawn(move || { let _lock = m2.write().unwrap(); panic!("test panic in inner thread to poison RwLock"); }).join(); assert!(m.is_poisoned()); match Arc::try_unwrap(m).unwrap().get_mut() { Err(e) => assert_eq!(*e.into_inner(), NonCopy(10)), Ok(x) => panic!("get_mut of poisoned RwLock is Ok: {:?}", x), } } }
32.608418
97
0.559554
ab3590f16b6561e03a810782498d852b96b237b4
80,109
#[doc = "Reader of register RXFGMASK"] pub type R = crate::R<u32, super::RXFGMASK>; #[doc = "Writer for register RXFGMASK"] pub type W = crate::W<u32, super::RXFGMASK>; #[doc = "Register RXFGMASK `reset()`'s with value 0xffff_ffff"] impl crate::ResetValue for super::RXFGMASK { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0xffff_ffff } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM0_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM0_A> for bool { #[inline(always)] fn from(variant: FGM0_A) -> Self { match variant { FGM0_A::_0 => false, FGM0_A::_1 => true, } } } #[doc = "Reader of field `FGM0`"] pub type FGM0_R = crate::R<bool, FGM0_A>; impl FGM0_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM0_A { match self.bits { false => FGM0_A::_0, true => FGM0_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM0_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM0_A::_1 } } #[doc = "Write proxy for field `FGM0`"] pub struct FGM0_W<'a> { w: &'a mut W, } impl<'a> FGM0_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM0_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM0_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM0_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM1_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM1_A> for bool { #[inline(always)] fn from(variant: FGM1_A) -> Self { match variant { FGM1_A::_0 => false, FGM1_A::_1 => true, } } } #[doc = "Reader of field `FGM1`"] pub type FGM1_R = crate::R<bool, FGM1_A>; impl FGM1_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM1_A { match self.bits { false => FGM1_A::_0, true => FGM1_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM1_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM1_A::_1 } } #[doc = "Write proxy for field `FGM1`"] pub struct FGM1_W<'a> { w: &'a mut W, } impl<'a> FGM1_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM1_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM1_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM1_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM2_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM2_A> for bool { #[inline(always)] fn from(variant: FGM2_A) -> Self { match variant { FGM2_A::_0 => false, FGM2_A::_1 => true, } } } #[doc = "Reader of field `FGM2`"] pub type FGM2_R = crate::R<bool, FGM2_A>; impl FGM2_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM2_A { match self.bits { false => FGM2_A::_0, true => FGM2_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM2_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM2_A::_1 } } #[doc = "Write proxy for field `FGM2`"] pub struct FGM2_W<'a> { w: &'a mut W, } impl<'a> FGM2_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM2_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM2_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM2_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM3_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM3_A> for bool { #[inline(always)] fn from(variant: FGM3_A) -> Self { match variant { FGM3_A::_0 => false, FGM3_A::_1 => true, } } } #[doc = "Reader of field `FGM3`"] pub type FGM3_R = crate::R<bool, FGM3_A>; impl FGM3_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM3_A { match self.bits { false => FGM3_A::_0, true => FGM3_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM3_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM3_A::_1 } } #[doc = "Write proxy for field `FGM3`"] pub struct FGM3_W<'a> { w: &'a mut W, } impl<'a> FGM3_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM3_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM3_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM3_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM4_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM4_A> for bool { #[inline(always)] fn from(variant: FGM4_A) -> Self { match variant { FGM4_A::_0 => false, FGM4_A::_1 => true, } } } #[doc = "Reader of field `FGM4`"] pub type FGM4_R = crate::R<bool, FGM4_A>; impl FGM4_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM4_A { match self.bits { false => FGM4_A::_0, true => FGM4_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM4_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM4_A::_1 } } #[doc = "Write proxy for field `FGM4`"] pub struct FGM4_W<'a> { w: &'a mut W, } impl<'a> FGM4_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM4_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM4_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM4_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM5_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM5_A> for bool { #[inline(always)] fn from(variant: FGM5_A) -> Self { match variant { FGM5_A::_0 => false, FGM5_A::_1 => true, } } } #[doc = "Reader of field `FGM5`"] pub type FGM5_R = crate::R<bool, FGM5_A>; impl FGM5_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM5_A { match self.bits { false => FGM5_A::_0, true => FGM5_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM5_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM5_A::_1 } } #[doc = "Write proxy for field `FGM5`"] pub struct FGM5_W<'a> { w: &'a mut W, } impl<'a> FGM5_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM5_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM5_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM5_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM6_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM6_A> for bool { #[inline(always)] fn from(variant: FGM6_A) -> Self { match variant { FGM6_A::_0 => false, FGM6_A::_1 => true, } } } #[doc = "Reader of field `FGM6`"] pub type FGM6_R = crate::R<bool, FGM6_A>; impl FGM6_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM6_A { match self.bits { false => FGM6_A::_0, true => FGM6_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM6_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM6_A::_1 } } #[doc = "Write proxy for field `FGM6`"] pub struct FGM6_W<'a> { w: &'a mut W, } impl<'a> FGM6_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM6_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM6_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM6_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 6)) | (((value as u32) & 0x01) << 6); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM7_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM7_A> for bool { #[inline(always)] fn from(variant: FGM7_A) -> Self { match variant { FGM7_A::_0 => false, FGM7_A::_1 => true, } } } #[doc = "Reader of field `FGM7`"] pub type FGM7_R = crate::R<bool, FGM7_A>; impl FGM7_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM7_A { match self.bits { false => FGM7_A::_0, true => FGM7_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM7_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM7_A::_1 } } #[doc = "Write proxy for field `FGM7`"] pub struct FGM7_W<'a> { w: &'a mut W, } impl<'a> FGM7_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM7_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM7_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM7_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM8_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM8_A> for bool { #[inline(always)] fn from(variant: FGM8_A) -> Self { match variant { FGM8_A::_0 => false, FGM8_A::_1 => true, } } } #[doc = "Reader of field `FGM8`"] pub type FGM8_R = crate::R<bool, FGM8_A>; impl FGM8_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM8_A { match self.bits { false => FGM8_A::_0, true => FGM8_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM8_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM8_A::_1 } } #[doc = "Write proxy for field `FGM8`"] pub struct FGM8_W<'a> { w: &'a mut W, } impl<'a> FGM8_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM8_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM8_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM8_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM9_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM9_A> for bool { #[inline(always)] fn from(variant: FGM9_A) -> Self { match variant { FGM9_A::_0 => false, FGM9_A::_1 => true, } } } #[doc = "Reader of field `FGM9`"] pub type FGM9_R = crate::R<bool, FGM9_A>; impl FGM9_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM9_A { match self.bits { false => FGM9_A::_0, true => FGM9_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM9_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM9_A::_1 } } #[doc = "Write proxy for field `FGM9`"] pub struct FGM9_W<'a> { w: &'a mut W, } impl<'a> FGM9_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM9_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM9_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM9_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM10_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM10_A> for bool { #[inline(always)] fn from(variant: FGM10_A) -> Self { match variant { FGM10_A::_0 => false, FGM10_A::_1 => true, } } } #[doc = "Reader of field `FGM10`"] pub type FGM10_R = crate::R<bool, FGM10_A>; impl FGM10_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM10_A { match self.bits { false => FGM10_A::_0, true => FGM10_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM10_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM10_A::_1 } } #[doc = "Write proxy for field `FGM10`"] pub struct FGM10_W<'a> { w: &'a mut W, } impl<'a> FGM10_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM10_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM10_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM10_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM11_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM11_A> for bool { #[inline(always)] fn from(variant: FGM11_A) -> Self { match variant { FGM11_A::_0 => false, FGM11_A::_1 => true, } } } #[doc = "Reader of field `FGM11`"] pub type FGM11_R = crate::R<bool, FGM11_A>; impl FGM11_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM11_A { match self.bits { false => FGM11_A::_0, true => FGM11_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM11_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM11_A::_1 } } #[doc = "Write proxy for field `FGM11`"] pub struct FGM11_W<'a> { w: &'a mut W, } impl<'a> FGM11_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM11_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM11_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM11_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 11)) | (((value as u32) & 0x01) << 11); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM12_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM12_A> for bool { #[inline(always)] fn from(variant: FGM12_A) -> Self { match variant { FGM12_A::_0 => false, FGM12_A::_1 => true, } } } #[doc = "Reader of field `FGM12`"] pub type FGM12_R = crate::R<bool, FGM12_A>; impl FGM12_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM12_A { match self.bits { false => FGM12_A::_0, true => FGM12_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM12_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM12_A::_1 } } #[doc = "Write proxy for field `FGM12`"] pub struct FGM12_W<'a> { w: &'a mut W, } impl<'a> FGM12_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM12_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM12_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM12_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 12)) | (((value as u32) & 0x01) << 12); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM13_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM13_A> for bool { #[inline(always)] fn from(variant: FGM13_A) -> Self { match variant { FGM13_A::_0 => false, FGM13_A::_1 => true, } } } #[doc = "Reader of field `FGM13`"] pub type FGM13_R = crate::R<bool, FGM13_A>; impl FGM13_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM13_A { match self.bits { false => FGM13_A::_0, true => FGM13_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM13_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM13_A::_1 } } #[doc = "Write proxy for field `FGM13`"] pub struct FGM13_W<'a> { w: &'a mut W, } impl<'a> FGM13_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM13_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM13_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM13_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 13)) | (((value as u32) & 0x01) << 13); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM14_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM14_A> for bool { #[inline(always)] fn from(variant: FGM14_A) -> Self { match variant { FGM14_A::_0 => false, FGM14_A::_1 => true, } } } #[doc = "Reader of field `FGM14`"] pub type FGM14_R = crate::R<bool, FGM14_A>; impl FGM14_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM14_A { match self.bits { false => FGM14_A::_0, true => FGM14_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM14_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM14_A::_1 } } #[doc = "Write proxy for field `FGM14`"] pub struct FGM14_W<'a> { w: &'a mut W, } impl<'a> FGM14_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM14_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM14_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM14_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 14)) | (((value as u32) & 0x01) << 14); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM15_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM15_A> for bool { #[inline(always)] fn from(variant: FGM15_A) -> Self { match variant { FGM15_A::_0 => false, FGM15_A::_1 => true, } } } #[doc = "Reader of field `FGM15`"] pub type FGM15_R = crate::R<bool, FGM15_A>; impl FGM15_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM15_A { match self.bits { false => FGM15_A::_0, true => FGM15_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM15_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM15_A::_1 } } #[doc = "Write proxy for field `FGM15`"] pub struct FGM15_W<'a> { w: &'a mut W, } impl<'a> FGM15_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM15_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM15_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM15_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 15)) | (((value as u32) & 0x01) << 15); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM16_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM16_A> for bool { #[inline(always)] fn from(variant: FGM16_A) -> Self { match variant { FGM16_A::_0 => false, FGM16_A::_1 => true, } } } #[doc = "Reader of field `FGM16`"] pub type FGM16_R = crate::R<bool, FGM16_A>; impl FGM16_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM16_A { match self.bits { false => FGM16_A::_0, true => FGM16_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM16_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM16_A::_1 } } #[doc = "Write proxy for field `FGM16`"] pub struct FGM16_W<'a> { w: &'a mut W, } impl<'a> FGM16_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM16_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM16_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM16_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM17_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM17_A> for bool { #[inline(always)] fn from(variant: FGM17_A) -> Self { match variant { FGM17_A::_0 => false, FGM17_A::_1 => true, } } } #[doc = "Reader of field `FGM17`"] pub type FGM17_R = crate::R<bool, FGM17_A>; impl FGM17_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM17_A { match self.bits { false => FGM17_A::_0, true => FGM17_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM17_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM17_A::_1 } } #[doc = "Write proxy for field `FGM17`"] pub struct FGM17_W<'a> { w: &'a mut W, } impl<'a> FGM17_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM17_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM17_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM17_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 17)) | (((value as u32) & 0x01) << 17); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM18_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM18_A> for bool { #[inline(always)] fn from(variant: FGM18_A) -> Self { match variant { FGM18_A::_0 => false, FGM18_A::_1 => true, } } } #[doc = "Reader of field `FGM18`"] pub type FGM18_R = crate::R<bool, FGM18_A>; impl FGM18_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM18_A { match self.bits { false => FGM18_A::_0, true => FGM18_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM18_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM18_A::_1 } } #[doc = "Write proxy for field `FGM18`"] pub struct FGM18_W<'a> { w: &'a mut W, } impl<'a> FGM18_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM18_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM18_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM18_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 18)) | (((value as u32) & 0x01) << 18); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM19_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM19_A> for bool { #[inline(always)] fn from(variant: FGM19_A) -> Self { match variant { FGM19_A::_0 => false, FGM19_A::_1 => true, } } } #[doc = "Reader of field `FGM19`"] pub type FGM19_R = crate::R<bool, FGM19_A>; impl FGM19_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM19_A { match self.bits { false => FGM19_A::_0, true => FGM19_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM19_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM19_A::_1 } } #[doc = "Write proxy for field `FGM19`"] pub struct FGM19_W<'a> { w: &'a mut W, } impl<'a> FGM19_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM19_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM19_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM19_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 19)) | (((value as u32) & 0x01) << 19); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM20_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM20_A> for bool { #[inline(always)] fn from(variant: FGM20_A) -> Self { match variant { FGM20_A::_0 => false, FGM20_A::_1 => true, } } } #[doc = "Reader of field `FGM20`"] pub type FGM20_R = crate::R<bool, FGM20_A>; impl FGM20_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM20_A { match self.bits { false => FGM20_A::_0, true => FGM20_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM20_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM20_A::_1 } } #[doc = "Write proxy for field `FGM20`"] pub struct FGM20_W<'a> { w: &'a mut W, } impl<'a> FGM20_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM20_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM20_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM20_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 20)) | (((value as u32) & 0x01) << 20); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM21_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM21_A> for bool { #[inline(always)] fn from(variant: FGM21_A) -> Self { match variant { FGM21_A::_0 => false, FGM21_A::_1 => true, } } } #[doc = "Reader of field `FGM21`"] pub type FGM21_R = crate::R<bool, FGM21_A>; impl FGM21_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM21_A { match self.bits { false => FGM21_A::_0, true => FGM21_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM21_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM21_A::_1 } } #[doc = "Write proxy for field `FGM21`"] pub struct FGM21_W<'a> { w: &'a mut W, } impl<'a> FGM21_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM21_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM21_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM21_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 21)) | (((value as u32) & 0x01) << 21); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM22_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM22_A> for bool { #[inline(always)] fn from(variant: FGM22_A) -> Self { match variant { FGM22_A::_0 => false, FGM22_A::_1 => true, } } } #[doc = "Reader of field `FGM22`"] pub type FGM22_R = crate::R<bool, FGM22_A>; impl FGM22_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM22_A { match self.bits { false => FGM22_A::_0, true => FGM22_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM22_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM22_A::_1 } } #[doc = "Write proxy for field `FGM22`"] pub struct FGM22_W<'a> { w: &'a mut W, } impl<'a> FGM22_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM22_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM22_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM22_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 22)) | (((value as u32) & 0x01) << 22); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM23_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM23_A> for bool { #[inline(always)] fn from(variant: FGM23_A) -> Self { match variant { FGM23_A::_0 => false, FGM23_A::_1 => true, } } } #[doc = "Reader of field `FGM23`"] pub type FGM23_R = crate::R<bool, FGM23_A>; impl FGM23_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM23_A { match self.bits { false => FGM23_A::_0, true => FGM23_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM23_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM23_A::_1 } } #[doc = "Write proxy for field `FGM23`"] pub struct FGM23_W<'a> { w: &'a mut W, } impl<'a> FGM23_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM23_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM23_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM23_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 23)) | (((value as u32) & 0x01) << 23); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM24_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM24_A> for bool { #[inline(always)] fn from(variant: FGM24_A) -> Self { match variant { FGM24_A::_0 => false, FGM24_A::_1 => true, } } } #[doc = "Reader of field `FGM24`"] pub type FGM24_R = crate::R<bool, FGM24_A>; impl FGM24_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM24_A { match self.bits { false => FGM24_A::_0, true => FGM24_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM24_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM24_A::_1 } } #[doc = "Write proxy for field `FGM24`"] pub struct FGM24_W<'a> { w: &'a mut W, } impl<'a> FGM24_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM24_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM24_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM24_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 24)) | (((value as u32) & 0x01) << 24); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM25_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM25_A> for bool { #[inline(always)] fn from(variant: FGM25_A) -> Self { match variant { FGM25_A::_0 => false, FGM25_A::_1 => true, } } } #[doc = "Reader of field `FGM25`"] pub type FGM25_R = crate::R<bool, FGM25_A>; impl FGM25_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM25_A { match self.bits { false => FGM25_A::_0, true => FGM25_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM25_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM25_A::_1 } } #[doc = "Write proxy for field `FGM25`"] pub struct FGM25_W<'a> { w: &'a mut W, } impl<'a> FGM25_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM25_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM25_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM25_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 25)) | (((value as u32) & 0x01) << 25); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM26_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM26_A> for bool { #[inline(always)] fn from(variant: FGM26_A) -> Self { match variant { FGM26_A::_0 => false, FGM26_A::_1 => true, } } } #[doc = "Reader of field `FGM26`"] pub type FGM26_R = crate::R<bool, FGM26_A>; impl FGM26_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM26_A { match self.bits { false => FGM26_A::_0, true => FGM26_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM26_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM26_A::_1 } } #[doc = "Write proxy for field `FGM26`"] pub struct FGM26_W<'a> { w: &'a mut W, } impl<'a> FGM26_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM26_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM26_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM26_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 26)) | (((value as u32) & 0x01) << 26); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM27_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM27_A> for bool { #[inline(always)] fn from(variant: FGM27_A) -> Self { match variant { FGM27_A::_0 => false, FGM27_A::_1 => true, } } } #[doc = "Reader of field `FGM27`"] pub type FGM27_R = crate::R<bool, FGM27_A>; impl FGM27_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM27_A { match self.bits { false => FGM27_A::_0, true => FGM27_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM27_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM27_A::_1 } } #[doc = "Write proxy for field `FGM27`"] pub struct FGM27_W<'a> { w: &'a mut W, } impl<'a> FGM27_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM27_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM27_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM27_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 27)) | (((value as u32) & 0x01) << 27); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM28_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM28_A> for bool { #[inline(always)] fn from(variant: FGM28_A) -> Self { match variant { FGM28_A::_0 => false, FGM28_A::_1 => true, } } } #[doc = "Reader of field `FGM28`"] pub type FGM28_R = crate::R<bool, FGM28_A>; impl FGM28_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM28_A { match self.bits { false => FGM28_A::_0, true => FGM28_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM28_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM28_A::_1 } } #[doc = "Write proxy for field `FGM28`"] pub struct FGM28_W<'a> { w: &'a mut W, } impl<'a> FGM28_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM28_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM28_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM28_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 28)) | (((value as u32) & 0x01) << 28); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM29_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM29_A> for bool { #[inline(always)] fn from(variant: FGM29_A) -> Self { match variant { FGM29_A::_0 => false, FGM29_A::_1 => true, } } } #[doc = "Reader of field `FGM29`"] pub type FGM29_R = crate::R<bool, FGM29_A>; impl FGM29_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM29_A { match self.bits { false => FGM29_A::_0, true => FGM29_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM29_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM29_A::_1 } } #[doc = "Write proxy for field `FGM29`"] pub struct FGM29_W<'a> { w: &'a mut W, } impl<'a> FGM29_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM29_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM29_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM29_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 29)) | (((value as u32) & 0x01) << 29); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM30_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM30_A> for bool { #[inline(always)] fn from(variant: FGM30_A) -> Self { match variant { FGM30_A::_0 => false, FGM30_A::_1 => true, } } } #[doc = "Reader of field `FGM30`"] pub type FGM30_R = crate::R<bool, FGM30_A>; impl FGM30_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM30_A { match self.bits { false => FGM30_A::_0, true => FGM30_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM30_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM30_A::_1 } } #[doc = "Write proxy for field `FGM30`"] pub struct FGM30_W<'a> { w: &'a mut W, } impl<'a> FGM30_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM30_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM30_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM30_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 30)) | (((value as u32) & 0x01) << 30); self.w } } #[doc = "Rx FIFO Global Mask Bits\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FGM31_A { #[doc = "0: The corresponding bit in the filter is \"don't care.\""] _0, #[doc = "1: The corresponding bit in the filter is checked."] _1, } impl From<FGM31_A> for bool { #[inline(always)] fn from(variant: FGM31_A) -> Self { match variant { FGM31_A::_0 => false, FGM31_A::_1 => true, } } } #[doc = "Reader of field `FGM31`"] pub type FGM31_R = crate::R<bool, FGM31_A>; impl FGM31_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FGM31_A { match self.bits { false => FGM31_A::_0, true => FGM31_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == FGM31_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == FGM31_A::_1 } } #[doc = "Write proxy for field `FGM31`"] pub struct FGM31_W<'a> { w: &'a mut W, } impl<'a> FGM31_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FGM31_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The corresponding bit in the filter is \"don't care.\""] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FGM31_A::_0) } #[doc = "The corresponding bit in the filter is checked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FGM31_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 31)) | (((value as u32) & 0x01) << 31); self.w } } impl R { #[doc = "Bit 0 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm0(&self) -> FGM0_R { FGM0_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm1(&self) -> FGM1_R { FGM1_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 2 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm2(&self) -> FGM2_R { FGM2_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 3 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm3(&self) -> FGM3_R { FGM3_R::new(((self.bits >> 3) & 0x01) != 0) } #[doc = "Bit 4 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm4(&self) -> FGM4_R { FGM4_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bit 5 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm5(&self) -> FGM5_R { FGM5_R::new(((self.bits >> 5) & 0x01) != 0) } #[doc = "Bit 6 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm6(&self) -> FGM6_R { FGM6_R::new(((self.bits >> 6) & 0x01) != 0) } #[doc = "Bit 7 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm7(&self) -> FGM7_R { FGM7_R::new(((self.bits >> 7) & 0x01) != 0) } #[doc = "Bit 8 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm8(&self) -> FGM8_R { FGM8_R::new(((self.bits >> 8) & 0x01) != 0) } #[doc = "Bit 9 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm9(&self) -> FGM9_R { FGM9_R::new(((self.bits >> 9) & 0x01) != 0) } #[doc = "Bit 10 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm10(&self) -> FGM10_R { FGM10_R::new(((self.bits >> 10) & 0x01) != 0) } #[doc = "Bit 11 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm11(&self) -> FGM11_R { FGM11_R::new(((self.bits >> 11) & 0x01) != 0) } #[doc = "Bit 12 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm12(&self) -> FGM12_R { FGM12_R::new(((self.bits >> 12) & 0x01) != 0) } #[doc = "Bit 13 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm13(&self) -> FGM13_R { FGM13_R::new(((self.bits >> 13) & 0x01) != 0) } #[doc = "Bit 14 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm14(&self) -> FGM14_R { FGM14_R::new(((self.bits >> 14) & 0x01) != 0) } #[doc = "Bit 15 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm15(&self) -> FGM15_R { FGM15_R::new(((self.bits >> 15) & 0x01) != 0) } #[doc = "Bit 16 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm16(&self) -> FGM16_R { FGM16_R::new(((self.bits >> 16) & 0x01) != 0) } #[doc = "Bit 17 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm17(&self) -> FGM17_R { FGM17_R::new(((self.bits >> 17) & 0x01) != 0) } #[doc = "Bit 18 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm18(&self) -> FGM18_R { FGM18_R::new(((self.bits >> 18) & 0x01) != 0) } #[doc = "Bit 19 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm19(&self) -> FGM19_R { FGM19_R::new(((self.bits >> 19) & 0x01) != 0) } #[doc = "Bit 20 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm20(&self) -> FGM20_R { FGM20_R::new(((self.bits >> 20) & 0x01) != 0) } #[doc = "Bit 21 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm21(&self) -> FGM21_R { FGM21_R::new(((self.bits >> 21) & 0x01) != 0) } #[doc = "Bit 22 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm22(&self) -> FGM22_R { FGM22_R::new(((self.bits >> 22) & 0x01) != 0) } #[doc = "Bit 23 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm23(&self) -> FGM23_R { FGM23_R::new(((self.bits >> 23) & 0x01) != 0) } #[doc = "Bit 24 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm24(&self) -> FGM24_R { FGM24_R::new(((self.bits >> 24) & 0x01) != 0) } #[doc = "Bit 25 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm25(&self) -> FGM25_R { FGM25_R::new(((self.bits >> 25) & 0x01) != 0) } #[doc = "Bit 26 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm26(&self) -> FGM26_R { FGM26_R::new(((self.bits >> 26) & 0x01) != 0) } #[doc = "Bit 27 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm27(&self) -> FGM27_R { FGM27_R::new(((self.bits >> 27) & 0x01) != 0) } #[doc = "Bit 28 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm28(&self) -> FGM28_R { FGM28_R::new(((self.bits >> 28) & 0x01) != 0) } #[doc = "Bit 29 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm29(&self) -> FGM29_R { FGM29_R::new(((self.bits >> 29) & 0x01) != 0) } #[doc = "Bit 30 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm30(&self) -> FGM30_R { FGM30_R::new(((self.bits >> 30) & 0x01) != 0) } #[doc = "Bit 31 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm31(&self) -> FGM31_R { FGM31_R::new(((self.bits >> 31) & 0x01) != 0) } } impl W { #[doc = "Bit 0 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm0(&mut self) -> FGM0_W { FGM0_W { w: self } } #[doc = "Bit 1 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm1(&mut self) -> FGM1_W { FGM1_W { w: self } } #[doc = "Bit 2 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm2(&mut self) -> FGM2_W { FGM2_W { w: self } } #[doc = "Bit 3 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm3(&mut self) -> FGM3_W { FGM3_W { w: self } } #[doc = "Bit 4 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm4(&mut self) -> FGM4_W { FGM4_W { w: self } } #[doc = "Bit 5 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm5(&mut self) -> FGM5_W { FGM5_W { w: self } } #[doc = "Bit 6 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm6(&mut self) -> FGM6_W { FGM6_W { w: self } } #[doc = "Bit 7 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm7(&mut self) -> FGM7_W { FGM7_W { w: self } } #[doc = "Bit 8 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm8(&mut self) -> FGM8_W { FGM8_W { w: self } } #[doc = "Bit 9 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm9(&mut self) -> FGM9_W { FGM9_W { w: self } } #[doc = "Bit 10 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm10(&mut self) -> FGM10_W { FGM10_W { w: self } } #[doc = "Bit 11 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm11(&mut self) -> FGM11_W { FGM11_W { w: self } } #[doc = "Bit 12 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm12(&mut self) -> FGM12_W { FGM12_W { w: self } } #[doc = "Bit 13 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm13(&mut self) -> FGM13_W { FGM13_W { w: self } } #[doc = "Bit 14 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm14(&mut self) -> FGM14_W { FGM14_W { w: self } } #[doc = "Bit 15 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm15(&mut self) -> FGM15_W { FGM15_W { w: self } } #[doc = "Bit 16 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm16(&mut self) -> FGM16_W { FGM16_W { w: self } } #[doc = "Bit 17 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm17(&mut self) -> FGM17_W { FGM17_W { w: self } } #[doc = "Bit 18 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm18(&mut self) -> FGM18_W { FGM18_W { w: self } } #[doc = "Bit 19 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm19(&mut self) -> FGM19_W { FGM19_W { w: self } } #[doc = "Bit 20 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm20(&mut self) -> FGM20_W { FGM20_W { w: self } } #[doc = "Bit 21 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm21(&mut self) -> FGM21_W { FGM21_W { w: self } } #[doc = "Bit 22 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm22(&mut self) -> FGM22_W { FGM22_W { w: self } } #[doc = "Bit 23 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm23(&mut self) -> FGM23_W { FGM23_W { w: self } } #[doc = "Bit 24 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm24(&mut self) -> FGM24_W { FGM24_W { w: self } } #[doc = "Bit 25 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm25(&mut self) -> FGM25_W { FGM25_W { w: self } } #[doc = "Bit 26 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm26(&mut self) -> FGM26_W { FGM26_W { w: self } } #[doc = "Bit 27 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm27(&mut self) -> FGM27_W { FGM27_W { w: self } } #[doc = "Bit 28 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm28(&mut self) -> FGM28_W { FGM28_W { w: self } } #[doc = "Bit 29 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm29(&mut self) -> FGM29_W { FGM29_W { w: self } } #[doc = "Bit 30 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm30(&mut self) -> FGM30_W { FGM30_W { w: self } } #[doc = "Bit 31 - Rx FIFO Global Mask Bits"] #[inline(always)] pub fn fgm31(&mut self) -> FGM31_W { FGM31_W { w: self } } }
28.277091
86
0.528617
f77981f95e5c8123da7dac79d923cb1a23460ad3
3,525
//! Type debug formatting & pretty printing use std::fmt; use toc_span::Span; use crate::{ db, ty::{IntSize, NatSize, RealSize, TyRef, TypeKind}, }; use super::{Mutability, TypeId}; impl<'db, DB> fmt::Debug for TyRef<'db, DB> where DB: db::TypeDatabase + ?Sized + 'db, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { emit_debug_ty(self.db, f, self.id) } } impl<'db, DB> fmt::Display for TyRef<'db, DB> where DB: db::ConstEval + ?Sized + 'db, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { emit_display_ty(self.db, f, self.id) } } impl TypeKind { pub fn prefix(&self) -> &str { match self { // Sized charseqs use the parent type as the basename TypeKind::CharN(_) => "char", TypeKind::StringN(_) => "string", // Refs are not shown to the user TypeKind::Ref(_, _) => unreachable!("refs should be peeled before display"), _ => self.debug_prefix(), } } pub fn debug_prefix(&self) -> &str { match self { TypeKind::Error => "<error>", TypeKind::Boolean => "boolean", TypeKind::Int(IntSize::Int) => "int", TypeKind::Int(IntSize::Int1) => "int1", TypeKind::Int(IntSize::Int2) => "int2", TypeKind::Int(IntSize::Int4) => "int4", TypeKind::Nat(NatSize::Nat) => "nat", TypeKind::Nat(NatSize::Nat1) => "nat1", TypeKind::Nat(NatSize::Nat2) => "nat2", TypeKind::Nat(NatSize::Nat4) => "nat4", TypeKind::Nat(NatSize::AddressInt) => "addressint", TypeKind::Real(RealSize::Real) => "real", TypeKind::Real(RealSize::Real4) => "real4", TypeKind::Real(RealSize::Real8) => "real8", TypeKind::Integer => "{integer}", TypeKind::Char => "char", TypeKind::String => "string", TypeKind::CharN(_) => "char_n", TypeKind::StringN(_) => "string_n", TypeKind::Ref(Mutability::Const, _) => "ref", TypeKind::Ref(Mutability::Var, _) => "ref_mut", } } } fn emit_debug_ty<'db, DB>(db: &'db DB, out: &mut dyn fmt::Write, type_id: TypeId) -> fmt::Result where DB: db::TypeDatabase + ?Sized + 'db, { let ty = type_id.in_db(db); let ty_kind = &*ty.kind(); out.write_str(ty_kind.debug_prefix())?; // Extra bits match ty_kind { TypeKind::StringN(seq) | TypeKind::CharN(seq) => { out.write_fmt(format_args!(" {:?}", seq))? } TypeKind::Ref(_, to) => { out.write_char(' ')?; emit_debug_ty(db, out, *to)? } _ => {} } Ok(()) } fn emit_display_ty<'db, DB>(db: &'db DB, out: &mut dyn fmt::Write, type_id: TypeId) -> fmt::Result where DB: db::ConstEval + ?Sized + 'db, { let ty = type_id.in_db(db); let ty_kind = &*ty.kind(); out.write_str(ty_kind.prefix())?; // Extra bits match ty_kind { TypeKind::StringN(seq) | TypeKind::CharN(seq) => { out.write_char('(')?; match seq.fixed_len(db, Span::default()) { Ok(None) => out.write_char('*')?, Ok(Some(v)) => out.write_fmt(format_args!("{}", v))?, Err(_) => unreachable!("should not show errors!"), } out.write_char(')')?; } TypeKind::Ref(_, to) => emit_display_ty(db, out, *to)?, _ => {} } Ok(()) }
29.132231
98
0.516596
71ac89bf2dca90551f7308adab0e36f45f741f1f
1,944
use crate::*; use codespan_reporting::diagnostic::{Diagnostic, Label}; use codespan_reporting::term; use core::fmt::{self, Error as FmtError, Result as FmtResult, Write}; // TODO Placeholder system. Eventually we'll flesh this out maybe. For now, 'tis // a simple thing with a bit of needless complexity // - Albert Liu, Jan 23, 2022 Sun 22:21 EST #[derive(Debug, PartialEq, Hash)] pub enum Error { Simple { message: String, loc: CodeLoc }, StaticSimple { message: &'static str, loc: CodeLoc }, } #[derive(Debug)] pub struct ErrorMessage { message: String, loc: CodeLoc, } impl Error { pub fn render( &self, files: &FileDb, out: &mut impl term::termcolor::WriteColor, ) -> fmt::Result { let mut out_labels = Vec::new(); let mut out_message: String; match self { Error::Simple { message, loc } => { out_labels.push(loc.primary().with_message("")); out_message = message.to_string(); } Error::StaticSimple { message, loc } => { out_labels.push(loc.primary().with_message("")); out_message = message.to_string(); } }; let diagnostic = Diagnostic::error() .with_message(&out_message) .with_labels(out_labels); let config = codespan_reporting::term::Config::default(); return term::emit(out, &config, &files, &diagnostic).map_err(|_| core::fmt::Error); } } impl Error { pub fn new(s: impl Into<String>, loc: CodeLoc) -> Self { return Self::Simple { message: s.into(), loc, }; } pub fn expected(s: &'static str, loc: CodeLoc) -> Self { let mut message = String::new(); message += "expected "; message += s; message += " here"; return Self::Simple { message, loc }; } }
27.771429
91
0.561728
26c34294585ebd19497484b20509583badf66023
6,330
use hal::blocking::i2c; use modular_bitfield::prelude::*; const I2C_ADDR: u8 = 0x09; pub(crate) trait ReadOnlyRegister: From<u8> { const ADDR: u8; fn read<E, I2C: i2c::WriteRead<Error = E>>(i2c: &mut I2C) -> Result<Self, E> { let buf = &mut [0u8; 1]; i2c.write_read(I2C_ADDR, &[Self::ADDR], buf) .map(|_| buf[0].into()) } } impl<RWR: ReadWriteRegister> ReadOnlyRegister for RWR { const ADDR: u8 = RWR::ADDR; } pub(crate) trait ReadWriteRegister: From<u8> + Into<u8> { const ADDR: u8; fn write<E, I2C: i2c::Write<Error = E>>(self, i2c: &mut I2C) -> Result<(), E> { i2c.write(I2C_ADDR, &[Self::ADDR, self.into()]) } } macro_rules! register { ($Reg:ident, $addr:literal, RO) => { impl ReadOnlyRegister for $Reg { const ADDR: u8 = $addr; } impl From<u8> for $Reg { fn from(raw: u8) -> Self { Self::from_bytes([raw]) } } }; ($Reg:ident, $addr:literal, RW) => { impl ReadWriteRegister for $Reg { const ADDR: u8 = $addr; } impl From<u8> for $Reg { fn from(raw: u8) -> Self { Self::from_bytes([raw]) } } impl From<$Reg> for u8 { fn from(reg: $Reg) -> Self { reg.into_bytes()[0] } } }; } macro_rules! register_map { ($($Reg:ident: $addr:literal, $rw:tt,)+) => { $( register!($Reg, $addr, $rw); )+ }; } register_map!( InputSourceControl: 0x00, RW, PowerOnConfiguration: 0x01, RW, ChargeCurrentControl: 0x02, RW, DischargeAndTerminationCurrent: 0x03, RW, ChargeVoltageControl: 0x04, RW, ChargeTerminationAndTimerControl: 0x05, RW, MiscellaneousOperationControl: 0x06, RW, SystemStatus: 0x07, RO, FaultFlags: 0x08, RO, ); #[derive(BitfieldSpecifier, Debug, Eq, PartialEq)] pub enum ChargeStatus { NotCharging, PreCharge, Charge, ChargeDone, } #[bitfield] #[derive(Debug)] pub struct SystemStatus { #[skip(setters)] pub thermal_regulation: bool, #[skip(setters)] pub power_good: bool, #[skip(setters)] pub power_path_enabled: bool, #[skip(setters)] pub charge_status: ChargeStatus, #[skip(setters)] pub revision: B2, #[skip] __: bool, } #[bitfield] #[derive(Debug)] pub struct FaultFlags { #[skip] __: B2, #[skip(setters)] pub safety_timer_expired: bool, #[skip(setters)] pub battery_fault: bool, #[skip(setters)] pub thermal_shutdown: bool, #[skip(setters)] pub input_fault: bool, #[skip(setters)] pub watchdog_timer_expired: bool, #[skip] __: bool, } #[derive(BitfieldSpecifier, Debug, Eq, PartialEq)] pub enum ThermalThreshold { T60C, T80C, T100C, T120C, } #[bitfield] #[derive(Debug)] pub struct MiscellaneousOperationControl { pub thermal_regulation_threshold: ThermalThreshold, #[skip] __: bool, pub ntc_enabled: bool, #[skip] __: bool, pub battery_fet_disabled: bool, pub extended_safety_timer: bool, #[skip] __: bool, } impl Default for MiscellaneousOperationControl { fn default() -> Self { Self { bytes: [0b0000_1011], } } } #[derive(BitfieldSpecifier, Debug, Eq, PartialEq)] pub enum SafetyTimerPeriod { P20h, P5h, P8h, P12h, } #[derive(BitfieldSpecifier, Debug, Eq, PartialEq)] pub enum WatchdogTimerLimit { Disabled, L40s, L80s, L160s, } #[bitfield] #[derive(Debug)] pub struct ChargeTerminationAndTimerControl { pub termination_control_enabled: bool, pub timer_period: SafetyTimerPeriod, pub timer_enabled: bool, pub timer_limit: WatchdogTimerLimit, pub termination_enabled: bool, #[skip] __: bool, } impl Default for ChargeTerminationAndTimerControl { fn default() -> Self { Self { bytes: [0b0100_1010], } } } #[derive(BitfieldSpecifier, Debug, Eq, PartialEq)] pub enum RechargeThreshold { U150mV, U300mV, } #[derive(BitfieldSpecifier, Debug, Eq, PartialEq)] pub enum PrechargeThreshold { U2800mV, U3000mV, } #[bitfield] #[derive(Debug)] pub struct ChargeVoltageControl { pub recharge_threshold: RechargeThreshold, pub precharge_threshold: PrechargeThreshold, pub regulation_voltage: B6, } impl Default for ChargeVoltageControl { fn default() -> Self { Self { bytes: [0b1010_0011], } } } #[derive(BitfieldSpecifier, Debug, Eq, PartialEq)] pub enum TerminalCurrent { I24mA, I52mA, I80mA, I108mA, } #[bitfield] #[derive(Debug)] pub struct DischargeAndTerminationCurrent { pub terminal_current: TerminalCurrent, #[skip] __: bool, pub discharge_current_limit: B4, #[skip] __: bool, } impl Default for DischargeAndTerminationCurrent { fn default() -> Self { Self { bytes: [0b0100_1001], } } } #[bitfield] #[derive(Debug)] pub struct ChargeCurrentControl { pub charge_current: B5, #[skip] __: B3, } impl Default for ChargeCurrentControl { fn default() -> Self { Self { bytes: [0b0000_0111], } } } #[derive(BitfieldSpecifier, Debug, Eq, PartialEq)] pub enum UVLOThreshold { U2400mV, U2500mV, U2600mV, U2700mV, U2800mV, U2900mV, U3000mV, U3100mV, } #[bitfield] #[derive(Debug)] pub struct PowerOnConfiguration { pub uvlo_threshold: UVLOThreshold, pub charge_disabled: bool, #[skip] __: B2, pub watchdog_timer_reset: bool, pub settings_reset: bool, } impl Default for PowerOnConfiguration { fn default() -> Self { Self { bytes: [0b0000_0100], } } } #[derive(BitfieldSpecifier, Debug, Eq, PartialEq)] pub enum InputCurrentLimit { I77mA, I118mA, I345mA, I470mA, I540mA, I635mA, I734mA, I993mA, } #[bitfield] #[derive(Debug)] pub struct InputSourceControl { pub input_current_limit: InputCurrentLimit, pub input_minimum_voltage: B4, pub ldo_fet_disabled: bool, } impl Default for InputSourceControl { fn default() -> Self { Self { bytes: [0b0100_1011], } } }
19.84326
83
0.603949
bf6913fce7379add49eaef50d8f1dbb3e16dba6c
6,642
use core::ops::RangeInclusive; use std::collections::{BTreeMap, HashMap}; use itertools::iproduct; use crate::{ cache::{ error::CacheResult, index::{self, CacheIndex}, indextype::IndexType, }, definitions::mapsquares::{GroupMapSquare, MapFileType, MapSquare, MapSquares}, }; impl MapSquares { pub fn new(config: &crate::cli::Config) -> CacheResult<MapSquares> { let index = CacheIndex::new(IndexType::MAPSV2, &config.input)?; let land_hashes: HashMap<i32, (u8, u8)> = iproduct!(0..100, 0..200) .map(|(i, j)| (crate::cache::hash::hash_djb2(format!("{}{}_{}", MapFileType::LOCATIONS, i, j)), (i, j))) .collect(); let map_hashes: HashMap<i32, (u8, u8)> = iproduct!(0..100, 0..200) .map(|(i, j)| (crate::cache::hash::hash_djb2(format!("{}{}_{}", MapFileType::TILES, i, j)), (i, j))) .collect(); let env_hashes: HashMap<i32, (u8, u8)> = iproduct!(0..100, 0..200) .map(|(i, j)| (crate::cache::hash::hash_djb2(format!("{}{}_{}", MapFileType::ENVIRONMENT, i, j)), (i, j))) .collect(); let mapping = index .metadatas() .iter() .map(|(_, m)| { let name_hash = m.name().unwrap(); if let Some((i, j)) = land_hashes.get(&name_hash) { (("l", *i, *j), m.archive_id()) } else if let Some((i, j)) = map_hashes.get(&name_hash) { (("m", *i, *j), m.archive_id()) } else if let Some((i, j)) = env_hashes.get(&name_hash) { (("e", *i, *j), m.archive_id()) } else { (("ul_or_um_dont_care", 0, 0), m.archive_id()) } }) .collect(); Ok(MapSquares { index, mapping }) } pub fn get(&self, i: u8, j: u8) -> CacheResult<MapSquare> { let land = self.mapping.get(&("l", i, j)).unwrap(); let map = self.mapping.get(&("m", i, j)).unwrap(); let env = self.mapping.get(&("e", i, j)).copied(); let xtea = self.index.xteas().as_ref().unwrap().get(&(((i as u32) << 8) | j as u32)); let sq = MapSquare::new(&self.index, xtea.copied(), *land, *map, env, i, j)?; Ok(sq) } } /// Iterates over all [`MapSquare`]s in arbitrary order. pub struct MapSquareIterator { pub(crate) mapsquares: MapSquares, pub(crate) state: std::vec::IntoIter<(u8, u8)>, } impl Iterator for MapSquareIterator { type Item = CacheResult<MapSquare>; fn next(&mut self) -> Option<Self::Item> { self.state.next().map(|(i, j)| self.mapsquares.get(i, j)) } fn size_hint(&self) -> (usize, Option<usize>) { self.state.size_hint() } } /// Iterates over [`GroupMapSquare`] in arbitrary order. pub struct GroupMapSquareIterator { inner: CacheIndex<index::Initial>, range_i: RangeInclusive<i32>, range_j: RangeInclusive<i32>, mapping: BTreeMap<(&'static str, u8, u8), u32>, state: std::vec::IntoIter<(u8, u8)>, } impl GroupMapSquareIterator { /// Constructor for [`GroupMapSquareIterator`]. pub fn new(range_i: RangeInclusive<i32>, range_j: RangeInclusive<i32>, config: &crate::cli::Config) -> CacheResult<GroupMapSquareIterator> { let inner = CacheIndex::new(IndexType::MAPSV2, &config.input)?; let land_hashes: HashMap<i32, (u8, u8)> = iproduct!(0..100, 0..200) .map(|(i, j)| (crate::cache::hash::hash_djb2(format!("l{}_{}", i, j)), (i, j))) .collect(); let map_hashes: HashMap<i32, (u8, u8)> = iproduct!(0..100, 0..200) .map(|(i, j)| (crate::cache::hash::hash_djb2(format!("m{}_{}", i, j)), (i, j))) .collect(); let env_hashes: HashMap<i32, (u8, u8)> = iproduct!(0..100, 0..200) .map(|(i, j)| (crate::cache::hash::hash_djb2(format!("ul{}_{}", i, j)), (i, j))) .collect(); let env_hashes2: HashMap<i32, (u8, u8)> = iproduct!(0..100, 0..200) .map(|(i, j)| (crate::cache::hash::hash_djb2(format!("e{}_{}", i, j)), (i, j))) .collect(); let mapping: BTreeMap<(&'static str, u8, u8), u32> = inner .metadatas() .iter() .map(|(_, m)| { let name_hash = m.name().unwrap(); if let Some((i, j)) = land_hashes.get(&name_hash) { (("l", *i, *j), m.archive_id()) } else if let Some((i, j)) = map_hashes.get(&name_hash) { (("m", *i, *j), m.archive_id()) } else if let Some((i, j)) = env_hashes.get(&name_hash) { (("e", *i, *j), m.archive_id()) } else { (("ul_or_um_dont_care", 0, 0), m.archive_id()) } }) .collect(); let state = mapping .keys() .filter_map(|(ty, i, j)| if *ty == "m" { Some((*i, *j)) } else { None }) .collect::<Vec<_>>() .into_iter(); Ok(GroupMapSquareIterator { inner, range_i, range_j, mapping, state, }) } } impl Iterator for GroupMapSquareIterator { type Item = GroupMapSquare; fn next(&mut self) -> Option<Self::Item> { self.state.next().map(|(core_i, core_j)| { let coordinates = iproduct!(self.range_i.clone(), self.range_j.clone()).map(|(di, dj)| ((di + (core_i as i32)) as u8, ((dj + (core_j as i32)) as u8))); let mapsquares: HashMap<(u8, u8), MapSquare> = coordinates .filter_map(|(i, j)| { if let Some(land) = self.mapping.get(&("l", i, j)) { let map = self.mapping.get(&("m", i, j)).unwrap(); let env = self.mapping.get(&("e", i, j)).copied(); let xtea = self.inner.xteas().as_ref().unwrap().get(&(((i as u32) << 8) | j as u32)); MapSquare::new(&self.inner, xtea.copied(), *land, *map, env, i, j).ok() } else { None } }) .map(|sq| ((sq.i, sq.j), sq)) .collect(); if !(mapsquares.contains_key(&(core_i, core_j))) { println!("failed reading mapsquare {}, {}", core_i, core_j); }; GroupMapSquare { core_i, core_j, mapsquares } }) } fn size_hint(&self) -> (usize, Option<usize>) { self.state.size_hint() } } impl ExactSizeIterator for GroupMapSquareIterator {}
37.954286
149
0.497742
2f0852d01ac58d931d3befae1d58d66f1fc69634
1,794
#![cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))] //! There are two level of statefulness in actix-web. Application has state //! that is shared across all handlers within same Application. //! And individual handler can have state. //! //! > **Note**: http server accepts an application factory rather than an //! application > instance. Http server constructs an application instance for //! each thread, > thus application state //! > must be constructed multiple times. If you want to share state between //! different > threads, a shared object should be used, e.g. `Arc`. //! //! Check [user guide](https://actix.rs/book/actix-web/sec-2-application.html) for more info. extern crate actix; extern crate actix_web; extern crate env_logger; use std::sync::Arc; use std::sync::Mutex; use actix_web::{middleware, server, App, HttpRequest, HttpResponse}; /// Application state struct AppState { counter: Arc<Mutex<usize>>, } /// simple handle fn index(req: &HttpRequest<AppState>) -> HttpResponse { println!("{:?}", req); *(req.state().counter.lock().unwrap()) += 1; HttpResponse::Ok().body(format!("Num of requests: {}", req.state().counter.lock().unwrap())) } fn main() { ::std::env::set_var("RUST_LOG", "actix_web=info"); env_logger::init(); let sys = actix::System::new("ws-example"); server::new(|| { App::with_state(AppState{counter: Arc::new(Mutex::new(0))}) // <- create app with state // enable logger .middleware(middleware::Logger::default()) // register simple handler, handle all methods .resource("/", |r| r.f(index)) }).bind("127.0.0.1:8080") .unwrap() .start(); println!("Started http server: 127.0.0.1:8080"); let _ = sys.run(); }
33.222222
96
0.653289
f4ae47af4fb0624d4bb7c430a84af375b2090313
1,827
use std::time::Duration; use Sample; use Source; /// Internal function that builds a `Pausable` object. pub fn pausable<I>(source: I, paused: bool) -> Pausable<I> { Pausable { input: source, paused: paused, } } #[derive(Clone, Debug)] pub struct Pausable<I> { input: I, paused: bool, } impl<I> Pausable<I> { /// Sets whether the filter applies. /// /// If set to true, the inner sound stops playing and no samples are processed from it. #[inline] pub fn set_paused(&mut self, paused: bool) { self.paused = paused; } /// Returns a reference to the inner source. #[inline] pub fn inner(&self) -> &I { &self.input } /// Returns a mutable reference to the inner source. #[inline] pub fn inner_mut(&mut self) -> &mut I { &mut self.input } /// Returns the inner source. #[inline] pub fn into_inner(self) -> I { self.input } } impl<I> Iterator for Pausable<I> where I: Source, I::Item: Sample { type Item = I::Item; #[inline] fn next(&mut self) -> Option<I::Item> { if self.paused { return Some(I::Item::zero_value()); } self.input.next() } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.input.size_hint() } } impl<I> Source for Pausable<I> where I: Source, I::Item: Sample { #[inline] fn current_frame_len(&self) -> Option<usize> { self.input.current_frame_len() } #[inline] fn channels(&self) -> u16 { self.input.channels() } #[inline] fn samples_rate(&self) -> u32 { self.input.samples_rate() } #[inline] fn total_duration(&self) -> Option<Duration> { self.input.total_duration() } }
19.645161
91
0.558292
03518585d041a6cb74330c39744fe623c2c757c1
521
use std::time; use crate::format::PixelFormat; #[derive(Clone, Debug)] /// Image stream description pub struct Descriptor { /// Width in pixels pub width: u32, /// Height in pixels pub height: u32, /// PixelFormat pub pixfmt: PixelFormat, /// Frame timing as duration pub interval: time::Duration, } #[derive(Clone, Debug)] /// Stream settings, needed on stream open operation. pub struct DeviceStreamSettings<'a> { pub desc: &'a Descriptor, pub buffers_count: Option<usize>, }
21.708333
53
0.669866
90ab29c86c5d73f2810dd59a0bf82f284aba9412
2,810
use nix::sys::signal::{kill, Signal}; use std::time::Duration; pub mod utils; use utils::*; // Test termination section #[test] fn test_termination_wait() { // A signal handler will capture our gentle signal, // So horust will use the force to stop it: let (mut cmd, temp_dir) = get_cli(); // this script captures traps SIGINT / SIGTERM / SIGEXIT let script = r#"#!/usr/bin/env bash trap_with_arg() { func="$1" ; shift for sig ; do trap "$func $sig" "$sig" done } func_trap() { : } trap_with_arg func_trap INT TERM EXIT while true ; do sleep 1 done "#; let service = r#"[termination] wait = "1s""#; store_service(temp_dir.path(), script, Some(service), None); let recv = run_async(&mut cmd, true); kill(recv.pid, Signal::SIGINT).expect("kill"); recv.recv_or_kill(Duration::from_secs(5)); } fn test_termination_custom_signal(friendly_name: &str) { let (mut cmd, temp_dir) = get_cli(); // this script captures traps signals let script = format!( r#"#!/usr/bin/env bash trap_with_arg() {{ func="$1" ; shift for sig ; do trap "$func $sig" "$sig" done }} func_trap() {{ if [ "$1" == "{0}" ] ; then exit 0 fi }} trap_with_arg func_trap {0} while true ; do sleep 0.3 done "#, friendly_name ); let service = format!( r#"[termination] signal = "{}" wait = "10s""#, friendly_name ); // wait is higher than the test duration. store_service( temp_dir.path(), script.as_str(), Some(service.as_str()), None, ); let recv = run_async(&mut cmd, true); kill(recv.pid, Signal::SIGTERM).expect("kill"); recv.recv_or_kill(Duration::from_secs(20)); } #[test] fn test_termination_all_custom_signals() { vec![ "HUP", "INT", "QUIT", "ILL", "TRAP", "ABRT", "BUS", "FPE", "USR1", "SEGV", "USR2", "PIPE", "ALRM", "TERM", "STKFLT", "CHLD", "CONT", "STOP", "TSTP", "TTIN", "TTOU", "URG", "XCPU", "XFSZ", "VTALRM", "PROF", "WINCH", "IO", "PWR", "SYS", ] .into_iter() .for_each(|friendly_name| { println!("Testing: {}", friendly_name); test_termination_custom_signal(friendly_name); println!("Test done: {}", friendly_name); }) } #[test] fn test_termination_die_if_failed() { let (mut cmd, temp_dir) = get_cli(); let script = r#"#!/usr/bin/env bash while true ; do sleep 1 done "#; let service = r#"[termination] wait = "0s" die-if-failed = ["a.toml"]"#; store_service(temp_dir.path(), script, Some(service), None); let script = r#"#!/usr/bin/env bash sleep 1 exit 1 "#; store_service(temp_dir.path(), script, None, Some("a")); let recv = run_async(&mut cmd, true); recv.recv_or_kill(Duration::from_secs(10)); }
24.224138
98
0.593238
033e4614511908b424927ec5c66e1a197bd4acef
17,432
#[doc = "Reader of register EP10_OUT_BUFFER_CONTROL"] pub type R = crate::R<u32, super::EP10_OUT_BUFFER_CONTROL>; #[doc = "Writer for register EP10_OUT_BUFFER_CONTROL"] pub type W = crate::W<u32, super::EP10_OUT_BUFFER_CONTROL>; #[doc = "Register EP10_OUT_BUFFER_CONTROL `reset()`'s with value 0"] impl crate::ResetValue for super::EP10_OUT_BUFFER_CONTROL { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `FULL_1`"] pub type FULL_1_R = crate::R<bool, bool>; #[doc = "Write proxy for field `FULL_1`"] pub struct FULL_1_W<'a> { w: &'a mut W, } impl<'a> FULL_1_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 31)) | (((value as u32) & 0x01) << 31); self.w } } #[doc = "Reader of field `LAST_1`"] pub type LAST_1_R = crate::R<bool, bool>; #[doc = "Write proxy for field `LAST_1`"] pub struct LAST_1_W<'a> { w: &'a mut W, } impl<'a> LAST_1_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 30)) | (((value as u32) & 0x01) << 30); self.w } } #[doc = "Reader of field `PID_1`"] pub type PID_1_R = crate::R<bool, bool>; #[doc = "Write proxy for field `PID_1`"] pub struct PID_1_W<'a> { w: &'a mut W, } impl<'a> PID_1_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 29)) | (((value as u32) & 0x01) << 29); self.w } } #[doc = "The number of bytes buffer 1 is offset from buffer 0 in Isochronous mode. Only valid in double buffered mode for an Isochronous endpoint.\\n For a non Isochronous endpoint the offset is always 64 bytes.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum DOUBLE_BUFFER_ISO_OFFSET_A { #[doc = "0: `0`"] _128 = 0, #[doc = "1: `1`"] _256 = 1, #[doc = "2: `10`"] _512 = 2, #[doc = "3: `11`"] _1024 = 3, } impl From<DOUBLE_BUFFER_ISO_OFFSET_A> for u8 { #[inline(always)] fn from(variant: DOUBLE_BUFFER_ISO_OFFSET_A) -> Self { variant as _ } } #[doc = "Reader of field `DOUBLE_BUFFER_ISO_OFFSET`"] pub type DOUBLE_BUFFER_ISO_OFFSET_R = crate::R<u8, DOUBLE_BUFFER_ISO_OFFSET_A>; impl DOUBLE_BUFFER_ISO_OFFSET_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DOUBLE_BUFFER_ISO_OFFSET_A { match self.bits { 0 => DOUBLE_BUFFER_ISO_OFFSET_A::_128, 1 => DOUBLE_BUFFER_ISO_OFFSET_A::_256, 2 => DOUBLE_BUFFER_ISO_OFFSET_A::_512, 3 => DOUBLE_BUFFER_ISO_OFFSET_A::_1024, _ => unreachable!(), } } #[doc = "Checks if the value of the field is `_128`"] #[inline(always)] pub fn is_128(&self) -> bool { *self == DOUBLE_BUFFER_ISO_OFFSET_A::_128 } #[doc = "Checks if the value of the field is `_256`"] #[inline(always)] pub fn is_256(&self) -> bool { *self == DOUBLE_BUFFER_ISO_OFFSET_A::_256 } #[doc = "Checks if the value of the field is `_512`"] #[inline(always)] pub fn is_512(&self) -> bool { *self == DOUBLE_BUFFER_ISO_OFFSET_A::_512 } #[doc = "Checks if the value of the field is `_1024`"] #[inline(always)] pub fn is_1024(&self) -> bool { *self == DOUBLE_BUFFER_ISO_OFFSET_A::_1024 } } #[doc = "Write proxy for field `DOUBLE_BUFFER_ISO_OFFSET`"] pub struct DOUBLE_BUFFER_ISO_OFFSET_W<'a> { w: &'a mut W, } impl<'a> DOUBLE_BUFFER_ISO_OFFSET_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DOUBLE_BUFFER_ISO_OFFSET_A) -> &'a mut W { { self.bits(variant.into()) } } #[doc = "`0`"] #[inline(always)] pub fn _128(self) -> &'a mut W { self.variant(DOUBLE_BUFFER_ISO_OFFSET_A::_128) } #[doc = "`1`"] #[inline(always)] pub fn _256(self) -> &'a mut W { self.variant(DOUBLE_BUFFER_ISO_OFFSET_A::_256) } #[doc = "`10`"] #[inline(always)] pub fn _512(self) -> &'a mut W { self.variant(DOUBLE_BUFFER_ISO_OFFSET_A::_512) } #[doc = "`11`"] #[inline(always)] pub fn _1024(self) -> &'a mut W { self.variant(DOUBLE_BUFFER_ISO_OFFSET_A::_1024) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 27)) | (((value as u32) & 0x03) << 27); self.w } } #[doc = "Reader of field `AVAILABLE_1`"] pub type AVAILABLE_1_R = crate::R<bool, bool>; #[doc = "Write proxy for field `AVAILABLE_1`"] pub struct AVAILABLE_1_W<'a> { w: &'a mut W, } impl<'a> AVAILABLE_1_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 26)) | (((value as u32) & 0x01) << 26); self.w } } #[doc = "Reader of field `LENGTH_1`"] pub type LENGTH_1_R = crate::R<u16, u16>; #[doc = "Write proxy for field `LENGTH_1`"] pub struct LENGTH_1_W<'a> { w: &'a mut W, } impl<'a> LENGTH_1_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u16) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03ff << 16)) | (((value as u32) & 0x03ff) << 16); self.w } } #[doc = "Reader of field `FULL_0`"] pub type FULL_0_R = crate::R<bool, bool>; #[doc = "Write proxy for field `FULL_0`"] pub struct FULL_0_W<'a> { w: &'a mut W, } impl<'a> FULL_0_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 15)) | (((value as u32) & 0x01) << 15); self.w } } #[doc = "Reader of field `LAST_0`"] pub type LAST_0_R = crate::R<bool, bool>; #[doc = "Write proxy for field `LAST_0`"] pub struct LAST_0_W<'a> { w: &'a mut W, } impl<'a> LAST_0_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 14)) | (((value as u32) & 0x01) << 14); self.w } } #[doc = "Reader of field `PID_0`"] pub type PID_0_R = crate::R<bool, bool>; #[doc = "Write proxy for field `PID_0`"] pub struct PID_0_W<'a> { w: &'a mut W, } impl<'a> PID_0_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 13)) | (((value as u32) & 0x01) << 13); self.w } } #[doc = "Reader of field `RESET`"] pub type RESET_R = crate::R<bool, bool>; #[doc = "Write proxy for field `RESET`"] pub struct RESET_W<'a> { w: &'a mut W, } impl<'a> RESET_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 12)) | (((value as u32) & 0x01) << 12); self.w } } #[doc = "Reader of field `STALL`"] pub type STALL_R = crate::R<bool, bool>; #[doc = "Write proxy for field `STALL`"] pub struct STALL_W<'a> { w: &'a mut W, } impl<'a> STALL_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 11)) | (((value as u32) & 0x01) << 11); self.w } } #[doc = "Reader of field `AVAILABLE_0`"] pub type AVAILABLE_0_R = crate::R<bool, bool>; #[doc = "Write proxy for field `AVAILABLE_0`"] pub struct AVAILABLE_0_W<'a> { w: &'a mut W, } impl<'a> AVAILABLE_0_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10); self.w } } #[doc = "Reader of field `LENGTH_0`"] pub type LENGTH_0_R = crate::R<u16, u16>; #[doc = "Write proxy for field `LENGTH_0`"] pub struct LENGTH_0_W<'a> { w: &'a mut W, } impl<'a> LENGTH_0_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u16) -> &'a mut W { self.w.bits = (self.w.bits & !0x03ff) | ((value as u32) & 0x03ff); self.w } } impl R { #[doc = "Bit 31 - Buffer 1 is full. For an IN transfer (TX to the host) the bit is set to indicate the data is valid. For an OUT transfer (RX from the host) this bit should be left as a 0. The host will set it when it has filled the buffer with data."] #[inline(always)] pub fn full_1(&self) -> FULL_1_R { FULL_1_R::new(((self.bits >> 31) & 0x01) != 0) } #[doc = "Bit 30 - Buffer 1 is the last buffer of the transfer."] #[inline(always)] pub fn last_1(&self) -> LAST_1_R { LAST_1_R::new(((self.bits >> 30) & 0x01) != 0) } #[doc = "Bit 29 - The data pid of buffer 1."] #[inline(always)] pub fn pid_1(&self) -> PID_1_R { PID_1_R::new(((self.bits >> 29) & 0x01) != 0) } #[doc = "Bits 27:28 - The number of bytes buffer 1 is offset from buffer 0 in Isochronous mode. Only valid in double buffered mode for an Isochronous endpoint.\\n For a non Isochronous endpoint the offset is always 64 bytes."] #[inline(always)] pub fn double_buffer_iso_offset(&self) -> DOUBLE_BUFFER_ISO_OFFSET_R { DOUBLE_BUFFER_ISO_OFFSET_R::new(((self.bits >> 27) & 0x03) as u8) } #[doc = "Bit 26 - Buffer 1 is available. This bit is set to indicate the buffer can be used by the controller. The controller clears the available bit when writing the status back."] #[inline(always)] pub fn available_1(&self) -> AVAILABLE_1_R { AVAILABLE_1_R::new(((self.bits >> 26) & 0x01) != 0) } #[doc = "Bits 16:25 - The length of the data in buffer 1."] #[inline(always)] pub fn length_1(&self) -> LENGTH_1_R { LENGTH_1_R::new(((self.bits >> 16) & 0x03ff) as u16) } #[doc = "Bit 15 - Buffer 0 is full. For an IN transfer (TX to the host) the bit is set to indicate the data is valid. For an OUT transfer (RX from the host) this bit should be left as a 0. The host will set it when it has filled the buffer with data."] #[inline(always)] pub fn full_0(&self) -> FULL_0_R { FULL_0_R::new(((self.bits >> 15) & 0x01) != 0) } #[doc = "Bit 14 - Buffer 0 is the last buffer of the transfer."] #[inline(always)] pub fn last_0(&self) -> LAST_0_R { LAST_0_R::new(((self.bits >> 14) & 0x01) != 0) } #[doc = "Bit 13 - The data pid of buffer 0."] #[inline(always)] pub fn pid_0(&self) -> PID_0_R { PID_0_R::new(((self.bits >> 13) & 0x01) != 0) } #[doc = "Bit 12 - Reset the buffer selector to buffer 0."] #[inline(always)] pub fn reset(&self) -> RESET_R { RESET_R::new(((self.bits >> 12) & 0x01) != 0) } #[doc = "Bit 11 - Reply with a stall (valid for both buffers)."] #[inline(always)] pub fn stall(&self) -> STALL_R { STALL_R::new(((self.bits >> 11) & 0x01) != 0) } #[doc = "Bit 10 - Buffer 0 is available. This bit is set to indicate the buffer can be used by the controller. The controller clears the available bit when writing the status back."] #[inline(always)] pub fn available_0(&self) -> AVAILABLE_0_R { AVAILABLE_0_R::new(((self.bits >> 10) & 0x01) != 0) } #[doc = "Bits 0:9 - The length of the data in buffer 1."] #[inline(always)] pub fn length_0(&self) -> LENGTH_0_R { LENGTH_0_R::new((self.bits & 0x03ff) as u16) } } impl W { #[doc = "Bit 31 - Buffer 1 is full. For an IN transfer (TX to the host) the bit is set to indicate the data is valid. For an OUT transfer (RX from the host) this bit should be left as a 0. The host will set it when it has filled the buffer with data."] #[inline(always)] pub fn full_1(&mut self) -> FULL_1_W { FULL_1_W { w: self } } #[doc = "Bit 30 - Buffer 1 is the last buffer of the transfer."] #[inline(always)] pub fn last_1(&mut self) -> LAST_1_W { LAST_1_W { w: self } } #[doc = "Bit 29 - The data pid of buffer 1."] #[inline(always)] pub fn pid_1(&mut self) -> PID_1_W { PID_1_W { w: self } } #[doc = "Bits 27:28 - The number of bytes buffer 1 is offset from buffer 0 in Isochronous mode. Only valid in double buffered mode for an Isochronous endpoint.\\n For a non Isochronous endpoint the offset is always 64 bytes."] #[inline(always)] pub fn double_buffer_iso_offset(&mut self) -> DOUBLE_BUFFER_ISO_OFFSET_W { DOUBLE_BUFFER_ISO_OFFSET_W { w: self } } #[doc = "Bit 26 - Buffer 1 is available. This bit is set to indicate the buffer can be used by the controller. The controller clears the available bit when writing the status back."] #[inline(always)] pub fn available_1(&mut self) -> AVAILABLE_1_W { AVAILABLE_1_W { w: self } } #[doc = "Bits 16:25 - The length of the data in buffer 1."] #[inline(always)] pub fn length_1(&mut self) -> LENGTH_1_W { LENGTH_1_W { w: self } } #[doc = "Bit 15 - Buffer 0 is full. For an IN transfer (TX to the host) the bit is set to indicate the data is valid. For an OUT transfer (RX from the host) this bit should be left as a 0. The host will set it when it has filled the buffer with data."] #[inline(always)] pub fn full_0(&mut self) -> FULL_0_W { FULL_0_W { w: self } } #[doc = "Bit 14 - Buffer 0 is the last buffer of the transfer."] #[inline(always)] pub fn last_0(&mut self) -> LAST_0_W { LAST_0_W { w: self } } #[doc = "Bit 13 - The data pid of buffer 0."] #[inline(always)] pub fn pid_0(&mut self) -> PID_0_W { PID_0_W { w: self } } #[doc = "Bit 12 - Reset the buffer selector to buffer 0."] #[inline(always)] pub fn reset(&mut self) -> RESET_W { RESET_W { w: self } } #[doc = "Bit 11 - Reply with a stall (valid for both buffers)."] #[inline(always)] pub fn stall(&mut self) -> STALL_W { STALL_W { w: self } } #[doc = "Bit 10 - Buffer 0 is available. This bit is set to indicate the buffer can be used by the controller. The controller clears the available bit when writing the status back."] #[inline(always)] pub fn available_0(&mut self) -> AVAILABLE_0_W { AVAILABLE_0_W { w: self } } #[doc = "Bits 0:9 - The length of the data in buffer 1."] #[inline(always)] pub fn length_0(&mut self) -> LENGTH_0_W { LENGTH_0_W { w: self } } }
34.314961
256
0.578648
1869302a9717ded1ba6ec529bb43542e983f72c7
1,906
use bson::{Bson, Document}; use serde::Deserialize; use super::{Outcome, TestFile}; use crate::{ options::{Collation, DistinctOptions}, test::{run_spec_test, CLIENT, LOCK}, }; #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] struct Arguments { pub filter: Option<Document>, pub field_name: String, pub collation: Option<Collation>, } #[function_name::named] fn run_distinct_test(test_file: TestFile) { let data = test_file.data; for mut test_case in test_file.tests { if test_case.operation.name != "distinct" { continue; } let _guard = LOCK.run_concurrently(); test_case.description = test_case.description.replace('$', "%"); let coll = CLIENT.init_db_and_coll(function_name!(), &test_case.description); coll.insert_many(data.clone(), None) .expect(&test_case.description); let arguments: Arguments = bson::from_bson(Bson::Document(test_case.operation.arguments)) .expect(&test_case.description); let outcome: Outcome<Vec<Bson>> = bson::from_bson(Bson::Document(test_case.outcome)).expect(&test_case.description); if let Some(ref c) = outcome.collection { if let Some(ref name) = c.name { CLIENT.drop_collection(function_name!(), name); } } let opts = DistinctOptions { collation: arguments.collation, ..Default::default() }; let result = coll .distinct(&arguments.field_name, arguments.filter, opts) .expect(&test_case.description); assert_eq!(result, outcome.result, "{}", test_case.description); } } #[cfg_attr(feature = "tokio-runtime", tokio::test)] #[cfg_attr(feature = "async-std-runtime", async_std::test)] async fn run() { run_spec_test(&["crud", "v1", "read"], run_distinct_test); }
30.253968
97
0.628541
898c9d65c7826d5270e4a4b95d0379fb78c6555c
9,951
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // The outlines relation `T: 'a` or `'a: 'b`. This code frequently // refers to rules defined in RFC 1214 (`OutlivesFooBar`), so see that // RFC for reference. use infer::InferCtxt; use ty::{self, Ty, TypeFoldable}; #[derive(Debug)] pub enum Component<'tcx> { Region(ty::Region), Param(ty::ParamTy), UnresolvedInferenceVariable(ty::InferTy), // Projections like `T::Foo` are tricky because a constraint like // `T::Foo: 'a` can be satisfied in so many ways. There may be a // where-clause that says `T::Foo: 'a`, or the defining trait may // include a bound like `type Foo: 'static`, or -- in the most // conservative way -- we can prove that `T: 'a` (more generally, // that all components in the projection outlive `'a`). This code // is not in a position to judge which is the best technique, so // we just product the projection as a component and leave it to // the consumer to decide (but see `EscapingProjection` below). Projection(ty::ProjectionTy<'tcx>), // In the case where a projection has escaping regions -- meaning // regions bound within the type itself -- we always use // the most conservative rule, which requires that all components // outlive the bound. So for example if we had a type like this: // // for<'a> Trait1< <T as Trait2<'a,'b>>::Foo > // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // then the inner projection (underlined) has an escaping region // `'a`. We consider that outer trait `'c` to meet a bound if `'b` // outlives `'b: 'c`, and we don't consider whether the trait // declares that `Foo: 'static` etc. Therefore, we just return the // free components of such a projection (in this case, `'b`). // // However, in the future, we may want to get smarter, and // actually return a "higher-ranked projection" here. Therefore, // we mark that these components are part of an escaping // projection, so that implied bounds code can avoid relying on // them. This gives us room to improve the regionck reasoning in // the future without breaking backwards compat. EscapingProjection(Vec<Component<'tcx>>), } /// Returns all the things that must outlive `'a` for the condition /// `ty0: 'a` to hold. pub fn components<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>, ty0: Ty<'tcx>) -> Vec<Component<'tcx>> { let mut components = vec![]; compute_components(infcx, ty0, &mut components); debug!("components({:?}) = {:?}", ty0, components); components } fn compute_components<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>, ty: Ty<'tcx>, out: &mut Vec<Component<'tcx>>) { // Descend through the types, looking for the various "base" // components and collecting them into `out`. This is not written // with `collect()` because of the need to sometimes skip subtrees // in the `subtys` iterator (e.g., when encountering a // projection). match ty.sty { ty::TyClosure(_, ref substs) => { // FIXME(#27086). We do not accumulate from substs, since they // don't represent reachable data. This means that, in // practice, some of the lifetime parameters might not // be in scope when the body runs, so long as there is // no reachable data with that lifetime. For better or // worse, this is consistent with fn types, however, // which can also encapsulate data in this fashion // (though it's somewhat harder, and typically // requires virtual dispatch). // // Note that changing this (in a naive way, at least) // causes regressions for what appears to be perfectly // reasonable code like this: // // ``` // fn foo<'a>(p: &Data<'a>) { // bar(|q: &mut Parser| q.read_addr()) // } // fn bar(p: Box<FnMut(&mut Parser)+'static>) { // } // ``` // // Note that `p` (and `'a`) are not used in the // closure at all, but to meet the requirement that // the closure type `C: 'static` (so it can be coerced // to the object type), we get the requirement that // `'a: 'static` since `'a` appears in the closure // type `C`. // // A smarter fix might "prune" unused `func_substs` -- // this would avoid breaking simple examples like // this, but would still break others (which might // indeed be invalid, depending on your POV). Pruning // would be a subtle process, since we have to see // what func/type parameters are used and unused, // taking into consideration UFCS and so forth. for &upvar_ty in &substs.upvar_tys { compute_components(infcx, upvar_ty, out); } } // OutlivesTypeParameterEnv -- the actual checking that `X:'a` // is implied by the environment is done in regionck. ty::TyParam(p) => { out.push(Component::Param(p)); } // For projections, we prefer to generate an obligation like // `<P0 as Trait<P1...Pn>>::Foo: 'a`, because this gives the // regionck more ways to prove that it holds. However, // regionck is not (at least currently) prepared to deal with // higher-ranked regions that may appear in the // trait-ref. Therefore, if we see any higher-ranke regions, // we simply fallback to the most restrictive rule, which // requires that `Pi: 'a` for all `i`. ty::TyProjection(ref data) => { if !data.has_escaping_regions() { // best case: no escaping regions, so push the // projection and skip the subtree (thus generating no // constraints for Pi). This defers the choice between // the rules OutlivesProjectionEnv, // OutlivesProjectionTraitDef, and // OutlivesProjectionComponents to regionck. out.push(Component::Projection(*data)); } else { // fallback case: hard code // OutlivesProjectionComponents. Continue walking // through and constrain Pi. let subcomponents = capture_components(infcx, ty); out.push(Component::EscapingProjection(subcomponents)); } } // If we encounter an inference variable, try to resolve it // and proceed with resolved version. If we cannot resolve it, // then record the unresolved variable as a component. ty::TyInfer(_) => { let ty = infcx.resolve_type_vars_if_possible(&ty); if let ty::TyInfer(infer_ty) = ty.sty { out.push(Component::UnresolvedInferenceVariable(infer_ty)); } else { compute_components(infcx, ty, out); } } // Most types do not introduce any region binders, nor // involve any other subtle cases, and so the WF relation // simply constraints any regions referenced directly by // the type and then visits the types that are lexically // contained within. (The comments refer to relevant rules // from RFC1214.) ty::TyBool | // OutlivesScalar ty::TyChar | // OutlivesScalar ty::TyInt(..) | // OutlivesScalar ty::TyUint(..) | // OutlivesScalar ty::TyFloat(..) | // OutlivesScalar ty::TyEnum(..) | // OutlivesNominalType ty::TyStruct(..) | // OutlivesNominalType ty::TyBox(..) | // OutlivesNominalType (ish) ty::TyStr | // OutlivesScalar (ish) ty::TyArray(..) | // ... ty::TySlice(..) | // ... ty::TyRawPtr(..) | // ... ty::TyRef(..) | // OutlivesReference ty::TyTuple(..) | // ... ty::TyFnDef(..) | // OutlivesFunction (*) ty::TyFnPtr(_) | // OutlivesFunction (*) ty::TyTrait(..) | // OutlivesObject, OutlivesFragment (*) ty::TyError => { // (*) Bare functions and traits are both binders. In the // RFC, this means we would add the bound regions to the // "bound regions list". In our representation, no such // list is maintained explicitly, because bound regions // themselves can be readily identified. push_region_constraints(out, ty.regions()); for subty in ty.walk_shallow() { compute_components(infcx, subty, out); } } } } fn capture_components<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>, ty: Ty<'tcx>) -> Vec<Component<'tcx>> { let mut temp = vec![]; push_region_constraints(&mut temp, ty.regions()); for subty in ty.walk_shallow() { compute_components(infcx, subty, &mut temp); } temp } fn push_region_constraints<'tcx>(out: &mut Vec<Component<'tcx>>, regions: Vec<ty::Region>) { for r in regions { if !r.is_bound() { out.push(Component::Region(r)); } } }
45.027149
92
0.575118
03d27af1d5d5b347bae47bd88ec47e70ac1336ec
6,263
use goblin::{elf, mach}; use object::{Object, ObjectTarget}; use elf::{find_elf_section, has_elf_section}; use mach::{find_mach_section, has_mach_segment}; /// Provides access to DWARF debugging information in object files pub trait DwarfData { /// Checks whether this object contains DWARF infos fn has_dwarf_data(&self) -> bool; /// Loads a specific dwarf section if its in the file fn get_dwarf_section<'input>( &'input self, section: DwarfSection, ) -> Option<DwarfSectionData<'input>>; } impl<'input> DwarfData for Object<'input> { fn has_dwarf_data(&self) -> bool { match self.target { // We assume an ELF contains debug information if it still contains // the debug_info section. The file utility uses a similar mechanism, // except that it checks for the ".symtab" section instead. ObjectTarget::Elf(ref elf) => has_elf_section( elf, elf::section_header::SHT_PROGBITS, DwarfSection::DebugInfo.elf_name(), ), // MachO generally stores debug information in the "__DWARF" segment, // so we simply check if it is present. The only exception to this // rule is call frame information (CFI), which is stored in the __TEXT // segment of the executable. This, however, requires more specific // logic anyway, so we ignore this here. ObjectTarget::MachOSingle(ref macho) => has_mach_segment(macho, "__DWARF"), ObjectTarget::MachOFat(_, ref macho) => has_mach_segment(macho, "__DWARF"), // We do not support DWARF in any other object targets _ => false, } } fn get_dwarf_section<'data>( &'data self, section: DwarfSection, ) -> Option<DwarfSectionData<'data>> { match self.target { ObjectTarget::Elf(ref elf) => read_elf_dwarf_section(elf, self.as_bytes(), section), ObjectTarget::MachOSingle(ref macho) => read_mach_dwarf_section(macho, section), ObjectTarget::MachOFat(_, ref macho) => read_mach_dwarf_section(macho, section), _ => None, } } } /// Represents the name of the section #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)] pub enum DwarfSection { EhFrame, DebugFrame, DebugAbbrev, DebugAranges, DebugLine, DebugLoc, DebugPubNames, DebugRanges, DebugStr, DebugInfo, DebugTypes, } impl DwarfSection { /// Return the name for elf pub fn elf_name(&self) -> &'static str { match *self { DwarfSection::EhFrame => ".eh_frame", DwarfSection::DebugFrame => ".debug_frame", DwarfSection::DebugAbbrev => ".debug_abbrev", DwarfSection::DebugAranges => ".debug_aranges", DwarfSection::DebugLine => ".debug_line", DwarfSection::DebugLoc => ".debug_loc", DwarfSection::DebugPubNames => ".debug_pubnames", DwarfSection::DebugRanges => ".debug_ranges", DwarfSection::DebugStr => ".debug_str", DwarfSection::DebugInfo => ".debug_info", DwarfSection::DebugTypes => ".debug_types", } } /// Return the name for macho pub fn macho_name(&self) -> &'static str { match *self { DwarfSection::EhFrame => "__eh_frame", DwarfSection::DebugFrame => "__debug_frame", DwarfSection::DebugAbbrev => "__debug_abbrev", DwarfSection::DebugAranges => "__debug_aranges", DwarfSection::DebugLine => "__debug_line", DwarfSection::DebugLoc => "__debug_loc", DwarfSection::DebugPubNames => "__debug_pubnames", DwarfSection::DebugRanges => "__debug_ranges", DwarfSection::DebugStr => "__debug_str", DwarfSection::DebugInfo => "__debug_info", DwarfSection::DebugTypes => "__debug_types", } } /// Return the name of the section for debug purposes pub fn name(&self) -> &'static str { match *self { DwarfSection::EhFrame => "eh_frame", DwarfSection::DebugFrame => "debug_frame", DwarfSection::DebugAbbrev => "debug_abbrev", DwarfSection::DebugAranges => "debug_aranges", DwarfSection::DebugLine => "debug_line", DwarfSection::DebugLoc => "debug_loc", DwarfSection::DebugPubNames => "debug_pubnames", DwarfSection::DebugRanges => "debug_ranges", DwarfSection::DebugStr => "debug_str", DwarfSection::DebugInfo => "debug_info", DwarfSection::DebugTypes => "debug_types", } } } /// Gives access to a section in a dwarf file #[derive(Eq, PartialEq, Debug, Clone)] pub struct DwarfSectionData<'data> { section: DwarfSection, data: &'data [u8], offset: u64, } impl<'data> DwarfSectionData<'data> { pub fn new(section: DwarfSection, data: &'data [u8], offset: u64) -> DwarfSectionData<'data> { DwarfSectionData { section: section, data: data, offset: offset, } } /// Return the section data as bytes pub fn as_bytes(&self) -> &'data [u8] { self.data } /// Get the absolute file offset pub fn offset(&self) -> u64 { self.offset } /// Get the section name pub fn section(&self) -> DwarfSection { self.section } } /// Reads a single `DwarfSection` from an ELF object file fn read_elf_dwarf_section<'data>( elf: &elf::Elf<'data>, data: &'data [u8], sect: DwarfSection, ) -> Option<DwarfSectionData<'data>> { let sh_type = elf::section_header::SHT_PROGBITS; find_elf_section(elf, data, sh_type, sect.elf_name()) .map(|section| DwarfSectionData::new(sect, section.data, section.header.sh_offset)) } /// Reads a single `DwarfSection` from Mach object file fn read_mach_dwarf_section<'data>( macho: &mach::MachO<'data>, sect: DwarfSection, ) -> Option<DwarfSectionData<'data>> { find_mach_section(macho, sect.macho_name()) .map(|section| DwarfSectionData::new(sect, section.data, section.header.offset as u64)) }
35.185393
98
0.613923
56ee0b8587d3888f8a0901e69b4a8535316ff95f
2,778
use algebra_core::{ biginteger::BigInteger768, curves::models::mnt4::{MNT4Parameters, MNT4}, field_new, fields::FpParameters, Fp2, }; use crate::mnt4_753::{Fq, Fq2, Fq2Parameters, Fq4Parameters, FqParameters, Fr, FrParameters}; pub mod g1; pub mod g2; #[cfg(test)] mod tests; pub use self::{ g1::{G1Affine, G1Prepared, G1Projective}, g2::{G2Affine, G2Prepared, G2Projective}, }; pub type MNT4_753 = MNT4<Parameters>; pub struct Parameters; impl MNT4Parameters for Parameters { const TWIST: Fp2<Self::Fp2Params> = field_new!(Fq2, FQ_ZERO, FQ_ONE); // A coefficient of MNT4-753 G2 = // ``` // mnt4753_twist_coeff_a = mnt4753_Fq2(mnt4753_G1::coeff_a * non_residue, mnt6753_Fq::zero()); // = (A_COEFF * NONRESIDUE, ZERO) // = (26, ZERO) // ``` #[rustfmt::skip] const TWIST_COEFF_A: Fp2<Self::Fp2Params> = field_new!(Fq2, G1_COEFF_A_NON_RESIDUE, FQ_ZERO, ); // https://github.com/o1-labs/snarky/blob/9c21ab2bb23874604640740d646a932e813432c3/snarkette/mnt4753.ml const ATE_LOOP_COUNT: &'static [u64] = &[ 8824542903220142080, 7711082599397206192, 8303354903384568230, 5874150271971943936, 9717849827920685054, 95829799234282493, ]; const ATE_IS_LOOP_COUNT_NEG: bool = true; const FINAL_EXPONENT_LAST_CHUNK_1: BigInteger768 = BigInteger768([0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0]); const FINAL_EXPONENT_LAST_CHUNK_W0_IS_NEG: bool = true; const FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0: BigInteger768 = BigInteger768([ 8824542903220142079, 7711082599397206192, 8303354903384568230, 5874150271971943936, 9717849827920685054, 95829799234282493, 0, 0, 0, 0, 0, 0, ]); type Fp = Fq; type Fp2Params = Fq2Parameters; type Fp4Params = Fq4Parameters; type G1Parameters = self::g1::Parameters; type G2Parameters = self::g2::Parameters; } // 26 pub const G1_COEFF_A_NON_RESIDUE: Fq = field_new!( Fq, BigInteger768([ 16948538951764659373, 10775354577659735631, 12766795894854242596, 8684022258823474090, 973489465296612807, 3883945490221946200, 16178634811223492029, 16155746945640075033, 17642042187059426365, 10295720303844380352, 13265853240981244259, 39422991244875, ]) ); pub const FQ_ZERO: Fq = field_new!(Fq, BigInteger768([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])); pub const FQ_ONE: Fq = field_new!(Fq, FqParameters::R); pub const FR_ZERO: Fr = field_new!(Fr, BigInteger768([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])); pub const FR_ONE: Fr = field_new!(Fr, FrParameters::R);
29.242105
107
0.650108
e5095215fb38ffbe32752809674b8e686278959a
721
//! //! The `evaluation stack copy` instruction. //! use std::fmt; use serde::Deserialize; use serde::Serialize; use crate::instructions::Instruction; /// /// The `evaluation stack copy` instruction. /// /// Copies the top element of the evaluation stack. /// #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct Copy; impl Copy { /// /// If the instruction is for the debug mode only. /// pub fn is_debug(&self) -> bool { false } } impl Into<Instruction> for Copy { fn into(self) -> Instruction { Instruction::Copy(self) } } impl fmt::Display for Copy { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "copy",) } }
18.025
62
0.613037
ed64bae8f172e7afeb8df8070b4714ba56c6aa32
812
use std::collections::HashMap; struct NonRepeating {} impl NonRepeating { pub fn find_first_non_repeating_character(str: &str) -> isize { let mut hash: HashMap<u8, usize> = HashMap::new(); for b in str.as_bytes() { if let Some(val) = hash.get_mut(b) { *val += 1; } else { hash.insert(*b, 1); } } for (idx, b) in str.as_bytes().iter().enumerate() { if *hash.get(b).unwrap() == 1 { return idx as isize; } } -1 } } #[cfg(test)] mod tests { use crate::first_non_repeating::NonRepeating; #[test] fn test1() { assert_eq!( NonRepeating::find_first_non_repeating_character("abcdcaf"), 1 ); } }
21.368421
72
0.490148
76679113efa17b0b14cbe57a00644cf242c23c48
5,043
#![cfg(target_arch = "x86_64")] use crate::structures::paging::{frame::PhysFrame, mapper::*, page_table::PageTable}; /// A Mapper implementation that requires that the complete physically memory is mapped at some /// offset in the virtual address space. #[derive(Debug)] pub struct OffsetPageTable<'a> { inner: MappedPageTable<'a, PhysOffset>, } impl<'a> OffsetPageTable<'a> { /// Creates a new `OffsetPageTable` that uses the given offset for converting virtual /// to physical addresses. /// /// The complete physical memory must be mapped in the virtual address space starting at /// address `phys_offset`. This means that for example physical address `0x5000` can be /// accessed through virtual address `phys_offset + 0x5000`. This mapping is required because /// the mapper needs to access page tables, which are not mapped into the virtual address /// space by default. /// /// ## Safety /// /// This function is unsafe because the caller must guarantee that the passed `phys_offset` /// is correct. Also, the passed `level_4_table` must point to the level 4 page table /// of a valid page table hierarchy. Otherwise this function might break memory safety, e.g. /// by writing to an illegal memory location. #[inline] pub unsafe fn new(level_4_table: &'a mut PageTable, phys_offset: VirtAddr) -> Self { let phys_offset = PhysOffset { offset: phys_offset, }; Self { inner: MappedPageTable::new(level_4_table, phys_offset), } } } #[derive(Debug)] struct PhysOffset { offset: VirtAddr, } impl PhysToVirt for PhysOffset { #[inline] fn phys_to_virt(&self, frame: PhysFrame) -> *mut PageTable { let phys = frame.start_address().as_u64(); let virt = self.offset + phys; virt.as_mut_ptr() } } // delegate all trait implementations to inner impl<'a> Mapper<Size1GiB> for OffsetPageTable<'a> { fn map_to<A>( &mut self, page: Page<Size1GiB>, frame: UnusedPhysFrame<Size1GiB>, flags: PageTableFlags, allocator: &mut A, ) -> Result<MapperFlush<Size1GiB>, MapToError<Size1GiB>> where A: FrameAllocator<Size4KiB>, { self.inner.map_to(page, frame, flags, allocator) } #[inline] fn unmap( &mut self, page: Page<Size1GiB>, ) -> Result<(PhysFrame<Size1GiB>, MapperFlush<Size1GiB>), UnmapError> { self.inner.unmap(page) } #[inline] fn update_flags( &mut self, page: Page<Size1GiB>, flags: PageTableFlags, ) -> Result<MapperFlush<Size1GiB>, FlagUpdateError> { self.inner.update_flags(page, flags) } #[inline] fn translate_page(&self, page: Page<Size1GiB>) -> Result<PhysFrame<Size1GiB>, TranslateError> { self.inner.translate_page(page) } } impl<'a> Mapper<Size2MiB> for OffsetPageTable<'a> { #[inline] fn map_to<A>( &mut self, page: Page<Size2MiB>, frame: UnusedPhysFrame<Size2MiB>, flags: PageTableFlags, allocator: &mut A, ) -> Result<MapperFlush<Size2MiB>, MapToError<Size2MiB>> where A: FrameAllocator<Size4KiB>, { self.inner.map_to(page, frame, flags, allocator) } #[inline] fn unmap( &mut self, page: Page<Size2MiB>, ) -> Result<(PhysFrame<Size2MiB>, MapperFlush<Size2MiB>), UnmapError> { self.inner.unmap(page) } #[inline] fn update_flags( &mut self, page: Page<Size2MiB>, flags: PageTableFlags, ) -> Result<MapperFlush<Size2MiB>, FlagUpdateError> { self.inner.update_flags(page, flags) } #[inline] fn translate_page(&self, page: Page<Size2MiB>) -> Result<PhysFrame<Size2MiB>, TranslateError> { self.inner.translate_page(page) } } impl<'a> Mapper<Size4KiB> for OffsetPageTable<'a> { #[inline] fn map_to<A>( &mut self, page: Page<Size4KiB>, frame: UnusedPhysFrame<Size4KiB>, flags: PageTableFlags, allocator: &mut A, ) -> Result<MapperFlush<Size4KiB>, MapToError<Size4KiB>> where A: FrameAllocator<Size4KiB>, { self.inner.map_to(page, frame, flags, allocator) } #[inline] fn unmap( &mut self, page: Page<Size4KiB>, ) -> Result<(PhysFrame<Size4KiB>, MapperFlush<Size4KiB>), UnmapError> { self.inner.unmap(page) } #[inline] fn update_flags( &mut self, page: Page<Size4KiB>, flags: PageTableFlags, ) -> Result<MapperFlush<Size4KiB>, FlagUpdateError> { self.inner.update_flags(page, flags) } #[inline] fn translate_page(&self, page: Page<Size4KiB>) -> Result<PhysFrame<Size4KiB>, TranslateError> { self.inner.translate_page(page) } } impl<'a> MapperAllSizes for OffsetPageTable<'a> { #[inline] fn translate(&self, addr: VirtAddr) -> TranslateResult { self.inner.translate(addr) } }
28.982759
99
0.627008
e81e64df4eabc9974fd6605fae57551fc44b5925
15,561
//! --- Part Two --- //! //! Strangely, the exit isn't open when you reach it. Then, you remember: the ancient Plutonians //! were famous for building recursive spaces. //! //! The marked connections in the maze aren't portals: they physically connect to a larger or //! smaller copy of the maze. Specifically, the labeled tiles around the inside edge actually //! connect to a smaller copy of the same maze, and the smaller copy's inner labeled tiles connect //! to yet a smaller copy, and so on. //! //! When you enter the maze, you are at the outermost level; when at the outermost level, only the //! outer labels AA and ZZ function (as the start and end, respectively); all other outer labeled //! tiles are effectively walls. At any other level, AA and ZZ count as walls, but the other outer //! labeled tiles bring you one level outward. //! //! Your goal is to find a path through the maze that brings you back to ZZ at the outermost level //! of the maze. //! //! In the first example above, the shortest path is now the loop around the right side. If the //! starting level is 0, then taking the previously-shortest path would pass through BC (to level //! 1), DE (to level 2), and FG (back to level 1). Because this is not the outermost level, ZZ is a //! wall, and the only option is to go back around to BC, which would only send you even deeper //! into the recursive maze. //! //! In the second example above, there is no path that brings you to ZZ at the outermost level. //! //! Here is a more interesting example: //! //! Z L X W C //! Z P Q B K //! ###########.#.#.#.#######.############### //! #...#.......#.#.......#.#.......#.#.#...# //! ###.#.#.#.#.#.#.#.###.#.#.#######.#.#.### //! #.#...#.#.#...#.#.#...#...#...#.#.......# //! #.###.#######.###.###.#.###.###.#.####### //! #...#.......#.#...#...#.............#...# //! #.#########.#######.#.#######.#######.### //! #...#.# F R I Z #.#.#.# //! #.###.# D E C H #.#.#.# //! #.#...# #...#.# //! #.###.# #.###.# //! #.#....OA WB..#.#..ZH //! #.###.# #.#.#.# //! CJ......# #.....# //! ####### ####### //! #.#....CK #......IC //! #.###.# #.###.# //! #.....# #...#.# //! ###.### #.#.#.# //! XF....#.# RF..#.#.# //! #####.# ####### //! #......CJ NM..#...# //! ###.#.# #.###.# //! RE....#.# #......RF //! ###.### X X L #.#.#.# //! #.....# F Q P #.#.#.# //! ###.###########.###.#######.#########.### //! #.....#...#.....#.......#...#.....#.#...# //! #####.#.###.#######.#######.###.###.#.#.# //! #.......#.......#.#.#.#.#...#...#...#.#.# //! #####.###.#####.#.#.#.#.###.###.#.###.### //! #.......#.....#.#...#...............#...# //! #############.#.#.###.################### //! A O F N //! A A D M //! //! One shortest path through the maze is the following: //! //! Walk from AA to XF (16 steps) //! Recurse into level 1 through XF (1 step) //! Walk from XF to CK (10 steps) //! Recurse into level 2 through CK (1 step) //! Walk from CK to ZH (14 steps) //! Recurse into level 3 through ZH (1 step) //! Walk from ZH to WB (10 steps) //! Recurse into level 4 through WB (1 step) //! Walk from WB to IC (10 steps) //! Recurse into level 5 through IC (1 step) //! Walk from IC to RF (10 steps) //! Recurse into level 6 through RF (1 step) //! Walk from RF to NM (8 steps) //! Recurse into level 7 through NM (1 step) //! Walk from NM to LP (12 steps) //! Recurse into level 8 through LP (1 step) //! Walk from LP to FD (24 steps) //! Recurse into level 9 through FD (1 step) //! Walk from FD to XQ (8 steps) //! Recurse into level 10 through XQ (1 step) //! Walk from XQ to WB (4 steps) //! Return to level 9 through WB (1 step) //! Walk from WB to ZH (10 steps) //! Return to level 8 through ZH (1 step) //! Walk from ZH to CK (14 steps) //! Return to level 7 through CK (1 step) //! Walk from CK to XF (10 steps) //! Return to level 6 through XF (1 step) //! Walk from XF to OA (14 steps) //! Return to level 5 through OA (1 step) //! Walk from OA to CJ (8 steps) //! Return to level 4 through CJ (1 step) //! Walk from CJ to RE (8 steps) //! Return to level 3 through RE (1 step) //! Walk from RE to IC (4 steps) //! Recurse into level 4 through IC (1 step) //! Walk from IC to RF (10 steps) //! Recurse into level 5 through RF (1 step) //! Walk from RF to NM (8 steps) //! Recurse into level 6 through NM (1 step) //! Walk from NM to LP (12 steps) //! Recurse into level 7 through LP (1 step) //! Walk from LP to FD (24 steps) //! Recurse into level 8 through FD (1 step) //! Walk from FD to XQ (8 steps) //! Recurse into level 9 through XQ (1 step) //! Walk from XQ to WB (4 steps) //! Return to level 8 through WB (1 step) //! Walk from WB to ZH (10 steps) //! Return to level 7 through ZH (1 step) //! Walk from ZH to CK (14 steps) //! Return to level 6 through CK (1 step) //! Walk from CK to XF (10 steps) //! Return to level 5 through XF (1 step) //! Walk from XF to OA (14 steps) //! Return to level 4 through OA (1 step) //! Walk from OA to CJ (8 steps) //! Return to level 3 through CJ (1 step) //! Walk from CJ to RE (8 steps) //! Return to level 2 through RE (1 step) //! Walk from RE to XQ (14 steps) //! Return to level 1 through XQ (1 step) //! Walk from XQ to FD (8 steps) //! Return to level 0 through FD (1 step) //! Walk from FD to ZZ (18 steps) //! //! This path takes a total of 396 steps to move from AA at the outermost layer to ZZ at the //! outermost layer. //! //! In your maze, when accounting for recursion, how many steps does it take to get from the open //! tile marked AA to the open tile marked ZZ, both at the outermost layer? use std::collections::{HashMap, HashSet, VecDeque}; use std::error::Error; use std::fmt; use std::fs::File; use std::io::{BufRead, BufReader}; use std::ops::RangeInclusive; const LETTERS: RangeInclusive<u8> = b'A'..=b'Z'; struct Node { edges: HashMap<(String, bool), (usize, bool)>, } impl fmt::Display for Node { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { for ((label, _), (distance, inner)) in self.edges.iter() { write!( f, "{}({},{}) ", label, distance, if *inner { "I" } else { "O" } )?; } Ok(()) } } fn find_labels( map: &Vec<Vec<u8>>, ) -> Result<HashMap<String, Vec<(usize, usize, bool)>>, Box<dyn Error>> { let mut label_map: HashMap<String, Vec<(usize, usize, bool)>> = HashMap::new(); let (width, height) = (map[0].len(), map.len()); println!("Finding labels..."); for y in 0..height { for x in 0..width { if LETTERS.contains(&map[y][x]) { // we check for the second letter below or to the right... if neither of those // work, it's because we've already visited this label. if y + 1 < height && LETTERS.contains(&map[y + 1][x]) { // vertical label let label = String::from_utf8(vec![map[y][x], map[y + 1][x]])?; println!("\x1b[F\x1b[KFound label: {}", label); let positions = label_map.entry(label).or_default(); if y > 0 && map[y - 1][x] == b'.' { // entrance on top of label let inner = y < height - 3; (*positions).push((x, y - 1, inner)); } else { // entrace on bottom of label let inner = y > 2; (*positions).push((x, y + 2, inner)); } } else if x + 1 < width && LETTERS.contains(&map[y][x + 1]) { // horizontal label let label = String::from_utf8(vec![map[y][x], map[y][x + 1]])?; let positions = label_map.entry(label).or_default(); if x > 0 && map[y][x - 1] == b'.' { // entrance to the left of label let inner = x < width - 3; (*positions).push((x - 1, y, inner)); } else { // entrance to the right of label let inner = x > 2; (*positions).push((x + 2, y, inner)); } } } } } println!("\x1b[F\x1b[KFound {} labels", label_map.len()); Ok(label_map) } fn find_connections( map: &Vec<Vec<u8>>, labels: &HashMap<String, Vec<(usize, usize, bool)>>, ) -> HashMap<String, Node> { fn bfs( from_label: &String, from_inner: bool, edges: &mut HashMap<(String, bool), (usize, bool)>, map: &Vec<Vec<u8>>, visited: &mut Vec<Vec<bool>>, label_positions: &HashMap<(usize, usize), (String, bool)>, width: usize, height: usize, pos: (usize, usize), distance: usize, ) { let x = pos.0; let y = pos.1; visited[y][x] = true; if let Some((label, inner)) = label_positions.get(&pos) { // ignore cycles - AA and ZZ can only connect to inner labels, and don't bother // recording any connections *back* to AA; we don't care. let mut valid_connection = label != from_label; if (from_label == "AA" && !inner) || label == "AA" { valid_connection = false; } if from_label == "ZZ" && !inner { valid_connection = false; } if valid_connection { // distance + 1 because it takes one more step to walk into the portal, except ZZ let distance = if label == "ZZ" { distance } else { distance + 1 }; let old = edges.insert((label.clone(), from_inner), (distance, *inner)); if !old.is_none() { panic!("Label {} can reach both portals for {}", from_label, label); } } } let moves = [ (Some(x), y.checked_sub(1)), (Some(x + 1), Some(y)), (Some(x), Some(y + 1)), (x.checked_sub(1), Some(y)), ]; for newpos in moves.iter() { if let (Some(x), Some(y)) = newpos { if *x < width && *y < height && map[*y][*x] == b'.' && !visited[*y][*x] { bfs( from_label, from_inner, edges, map, visited, label_positions, width, height, (*x, *y), distance + 1, ); } } } } let mut connections = HashMap::new(); let (width, height) = (map[0].len(), map.len()); let label_positions: HashMap<(usize, usize), (String, bool)> = labels .iter() .flat_map(|(l, v)| { v.iter() .map(move |(x, y, inner)| ((*x, *y), (l.clone(), *inner))) }) .collect(); for (label, positions) in labels.iter() { if label == "ZZ" { // no need to compute the connections here continue; } let mut edges = HashMap::new(); let mut visited = vec![vec![false; width]; height]; for (x, y, inner) in positions.iter() { bfs( label, *inner, &mut edges, &map, &mut visited, &label_positions, width, height, (*x, *y), 0, ); } let node = Node { edges }; connections.insert(label.clone(), node); } let mut label_names: Vec<String> = connections.keys().cloned().collect(); label_names.sort(); for label in label_names { let node = connections.get(&label).unwrap(); println!("{} can reach: {}", label, node); } connections } fn find_shortest_path(connections: &HashMap<String, Node>) -> usize { // We can't use Dijkstra's anymore because the quickest way to the portal before ZZ may put us // in a level > 0, which means we'll never finish the maze. By extension, the quickest way to // the portal 2 steps before ZZ may not set us up for success, etc. Additionally, we may need // to revisit nodes to walk our way back up to level 0. Dijkstra just won't work. Because we // need to allow cycles, we also can't use dfs because we might end up in an infinite loop. // We'll need bfs instead. let mut shortest = std::usize::MAX; let mut queue = VecDeque::new(); queue.push_back(("AA".to_owned(), false, 0, 0, Vec::<String>::new())); while let Some((current, from_inner, total_distance, level, path)) = queue.pop_front() { if level == 50 || total_distance > shortest { // we've probably gone too far; bail continue; } let node = connections.get(&current).unwrap(); for ((to_label, to_from_inner), (distance, inner)) in node.edges.iter() { if from_inner != *to_from_inner { continue; } let mut path = path.clone(); path.push(format!("{}({}, {})", current, level, total_distance)); if to_label == "ZZ" { if level == 0 { // we've reached the end! let total_distance = total_distance + *distance; if total_distance < shortest { shortest = total_distance; } } // either way, we need to stop here continue; } let level = if *inner { level + 1 } else if level > 0 { level - 1 } else { continue; }; queue.push_back(( to_label.clone(), !*inner, total_distance + *distance, level, path, )); } } shortest } fn main() -> Result<(), Box<dyn Error>> { let file = File::open("input.txt")?; let reader = BufReader::new(file); let map = reader .lines() .map(|line| line.map(|l| l.bytes().collect::<Vec<u8>>())) .collect::<Result<Vec<Vec<_>>, _>>()?; let labels = find_labels(&map)?; let connections = find_connections(&map, &labels); let result = find_shortest_path(&connections); println!("Shortest distance: {}", result); Ok(()) }
38.233415
99
0.464752
0af7492aa01ac60744984a191a4b56065dd430bd
1,132
use clap::ArgMatches; use std::str::FromStr; use rusoto_core::credential::ProfileProvider; use rusoto_core::request::HttpClient; use rusoto_core::Region; use rusoto_s3::S3Client; use rusoto_ssm::SsmClient; pub fn get_ssm_client(matches: &ArgMatches) -> SsmClient { let http_client = HttpClient::new().unwrap(); let credentials = get_credentials(matches); let region = get_region(matches); SsmClient::new_with(http_client, credentials, region) } pub fn get_s3_client(matches: &ArgMatches) -> S3Client { let http_client = HttpClient::new().unwrap(); let credentials = get_credentials(matches); let region = get_region(matches); S3Client::new_with(http_client, credentials, region) } pub fn get_region(matches: &ArgMatches) -> Region { Region::from_str(matches.value_of("region").unwrap_or("")).unwrap_or(Region::default()) } pub fn get_credentials(matches: &ArgMatches) -> ProfileProvider { let mut credentials = ProfileProvider::new().unwrap(); if matches.is_present("profile") { credentials.set_profile(matches.value_of("profile").unwrap()); } credentials }
25.727273
91
0.720848
03f1f32ec26d615040a1201368a7e3629fd3b115
7,283
//!# MPESA Environment //! //! Code related to setting up the desired Safaricom API environment. Environment can be either //! sandbox or production. //! you will need environment specific credentials (`CLIENT_KEY` AND `CLIENT_SECRET`) when creating //! an instance of the `Mpesa` client struct. Note that you cannot use sandbox credentials in //! production and vice versa. //! //! Based on selected environment. You are able to access environment specific data such as the `base_url` //! and the `public key` an X509 certificate used for encrypting initiator passwords. You can read more about that from //! the Safaricom API [docs](https://developer.safaricom.co.ke/docs?javascript#security-credentials). use crate::MpesaError; use std::{convert::TryFrom, str::FromStr}; #[derive(Debug)] /// Enum to map to desired environment so as to access certificate /// and the base url /// Required to construct a new `Mpesa` struct pub enum Environment { Production, Sandbox, } impl FromStr for Environment { type Err = MpesaError; fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "production" | "Production" | "PRODUCTION" => Ok(Self::Production), "sandbox" | "Sandbox" | "SANDBOX" => Ok(Self::Sandbox), _ => Err(MpesaError::Message( "Could not parse the provided environment name", )), } } } impl TryFrom<&'static str> for Environment { type Error = MpesaError; fn try_from(v: &'static str) -> Result<Self, Self::Error> { match v { "production" | "Production" | "PRODUCTION" => Ok(Self::Production), "sandbox" | "Sandbox" | "SANDBOX" => Ok(Self::Sandbox), _ => Err(MpesaError::Message( "Could not parse the provided environment name", )), } } } impl Environment { /// Matches to intended base_url depending on Environment variant /// /// ## Example /// ``` /// use mpesa::Environment; /// /// let env: Environment = Environment::Production; /// let base_url: &str = env.base_url(); /// assert_eq!("https://api.safaricom.co.ke", base_url); /// ``` pub fn base_url(&self) -> &'static str { match self { Environment::Production => "https://api.safaricom.co.ke", Environment::Sandbox => "https://sandbox.safaricom.co.ke", } } /// Match to X509 public key certificate based on /// environment variant pub fn get_certificate(&self) -> &'static str { match self { Environment::Production => { r#"-----BEGIN CERTIFICATE----- MIIGkzCCBXugAwIBAgIKXfBp5gAAAD+hNjANBgkqhkiG9w0BAQsFADBbMRMwEQYK CZImiZPyLGQBGRYDbmV0MRkwFwYKCZImiZPyLGQBGRYJc2FmYXJpY29tMSkwJwYD VQQDEyBTYWZhcmljb20gSW50ZXJuYWwgSXNzdWluZyBDQSAwMjAeFw0xNzA0MjUx NjA3MjRaFw0xODAzMjExMzIwMTNaMIGNMQswCQYDVQQGEwJLRTEQMA4GA1UECBMH TmFpcm9iaTEQMA4GA1UEBxMHTmFpcm9iaTEaMBgGA1UEChMRU2FmYXJpY29tIExp bWl0ZWQxEzARBgNVBAsTClRlY2hub2xvZ3kxKTAnBgNVBAMTIGFwaWdlZS5hcGlj YWxsZXIuc2FmYXJpY29tLmNvLmtlMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB CgKCAQEAoknIb5Tm1hxOVdFsOejAs6veAai32Zv442BLuOGkFKUeCUM2s0K8XEsU t6BP25rQGNlTCTEqfdtRrym6bt5k0fTDscf0yMCoYzaxTh1mejg8rPO6bD8MJB0c FWRUeLEyWjMeEPsYVSJFv7T58IdAn7/RhkrpBl1dT7SmIZfNVkIlD35+Cxgab+u7 +c7dHh6mWguEEoE3NbV7Xjl60zbD/Buvmu6i9EYz+27jNVPI6pRXHvp+ajIzTSsi eD8Ztz1eoC9mphErasAGpMbR1sba9bM6hjw4tyTWnJDz7RdQQmnsW1NfFdYdK0qD RKUX7SG6rQkBqVhndFve4SDFRq6wvQIDAQABo4IDJDCCAyAwHQYDVR0OBBYEFG2w ycrgEBPFzPUZVjh8KoJ3EpuyMB8GA1UdIwQYMBaAFOsy1E9+YJo6mCBjug1evuh5 TtUkMIIBOwYDVR0fBIIBMjCCAS4wggEqoIIBJqCCASKGgdZsZGFwOi8vL0NOPVNh ZmFyaWNvbSUyMEludGVybmFsJTIwSXNzdWluZyUyMENBJTIwMDIsQ049U1ZEVDNJ U1NDQTAxLENOPUNEUCxDTj1QdWJsaWMlMjBLZXklMjBTZXJ2aWNlcyxDTj1TZXJ2 aWNlcyxDTj1Db25maWd1cmF0aW9uLERDPXNhZmFyaWNvbSxEQz1uZXQ/Y2VydGlm aWNhdGVSZXZvY2F0aW9uTGlzdD9iYXNlP29iamVjdENsYXNzPWNSTERpc3RyaWJ1 dGlvblBvaW50hkdodHRwOi8vY3JsLnNhZmFyaWNvbS5jby5rZS9TYWZhcmljb20l MjBJbnRlcm5hbCUyMElzc3VpbmclMjBDQSUyMDAyLmNybDCCAQkGCCsGAQUFBwEB BIH8MIH5MIHJBggrBgEFBQcwAoaBvGxkYXA6Ly8vQ049U2FmYXJpY29tJTIwSW50 ZXJuYWwlMjBJc3N1aW5nJTIwQ0ElMjAwMixDTj1BSUEsQ049UHVibGljJTIwS2V5 JTIwU2VydmljZXMsQ049U2VydmljZXMsQ049Q29uZmlndXJhdGlvbixEQz1zYWZh cmljb20sREM9bmV0P2NBQ2VydGlmaWNhdGU/YmFzZT9vYmplY3RDbGFzcz1jZXJ0 aWZpY2F0aW9uQXV0aG9yaXR5MCsGCCsGAQUFBzABhh9odHRwOi8vY3JsLnNhZmFy aWNvbS5jby5rZS9vY3NwMAsGA1UdDwQEAwIFoDA9BgkrBgEEAYI3FQcEMDAuBiYr BgEEAYI3FQiHz4xWhMLEA4XphTaE3tENhqCICGeGwcdsg7m5awIBZAIBDDAdBgNV HSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwEwJwYJKwYBBAGCNxUKBBowGDAKBggr BgEFBQcDAjAKBggrBgEFBQcDATANBgkqhkiG9w0BAQsFAAOCAQEAC/hWx7KTwSYr x2SOyyHNLTRmCnCJmqxA/Q+IzpW1mGtw4Sb/8jdsoWrDiYLxoKGkgkvmQmB2J3zU ngzJIM2EeU921vbjLqX9sLWStZbNC2Udk5HEecdpe1AN/ltIoE09ntglUNINyCmf zChs2maF0Rd/y5hGnMM9bX9ub0sqrkzL3ihfmv4vkXNxYR8k246ZZ8tjQEVsKehE dqAmj8WYkYdWIHQlkKFP9ba0RJv7aBKb8/KP+qZ5hJip0I5Ey6JJ3wlEWRWUYUKh gYoPHrJ92ToadnFCCpOlLKWc0xVxANofy6fqreOVboPO0qTAYpoXakmgeRNLUiar 0ah6M/q/KA== -----END CERTIFICATE----- "# } Environment::Sandbox => { r#"-----BEGIN CERTIFICATE----- MIIGKzCCBROgAwIBAgIQDL7NH8cxSdUpl0ihH0A1wTANBgkqhkiG9w0BAQsFADBN MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMScwJQYDVQQDEx5E aWdpQ2VydCBTSEEyIFNlY3VyZSBTZXJ2ZXIgQ0EwHhcNMTgwODI3MDAwMDAwWhcN MTkwNDA0MTIwMDAwWjBuMQswCQYDVQQGEwJLRTEQMA4GA1UEBxMHTmFpcm9iaTEW MBQGA1UEChMNU2FmYXJpY29tIFBMQzETMBEGA1UECxMKRGlnaXRhbCBJVDEgMB4G A1UEAxMXc2FuZGJveC5zYWZhcmljb20uY28ua2UwggEiMA0GCSqGSIb3DQEBAQUA A4IBDwAwggEKAoIBAQC78yeC/wLoZY6TJeqc4g/9eAKIpeCwEsjX09pD8ZxAGXqT Oi7ssdIGJBPmJZNeEVyf8ocFhisCuLngJ9Z5e/AvH52PhrEFmVu2D03zSf4C+rhZ ndEKP6G79pUAb/bemOliU9zM8xYYkpCRzPWUzk6zSDarg0ZDLw5FrtZj/VJ9YEDL WGgAfwExEgSN3wjyUlJ2UwI3wqQXLka0VNFWoZxUH5j436gbSWRIL6NJUmrq8V8S aTEPz3eJHj3NOToDu245c7VKdF/KExyZjRjD2p5I+Aip80TXzKlZj6DjMb3DlfXF Hsnu0+1uJE701mvKX7BiscxKr8tCRphL63as4dqvAgMBAAGjggLkMIIC4DAfBgNV HSMEGDAWgBQPgGEcgjFh1S8o541GOLQs4cbZ4jAdBgNVHQ4EFgQUzZmY7ZORLw9w qRbAQN5m9lJ28qMwIgYDVR0RBBswGYIXc2FuZGJveC5zYWZhcmljb20uY28ua2Uw DgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjBr BgNVHR8EZDBiMC+gLaArhilodHRwOi8vY3JsMy5kaWdpY2VydC5jb20vc3NjYS1z aGEyLWc2LmNybDAvoC2gK4YpaHR0cDovL2NybDQuZGlnaWNlcnQuY29tL3NzY2Et c2hhMi1nNi5jcmwwTAYDVR0gBEUwQzA3BglghkgBhv1sAQEwKjAoBggrBgEFBQcC ARYcaHR0cHM6Ly93d3cuZGlnaWNlcnQuY29tL0NQUzAIBgZngQwBAgIwfAYIKwYB BQUHAQEEcDBuMCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC5kaWdpY2VydC5jb20w RgYIKwYBBQUHMAKGOmh0dHA6Ly9jYWNlcnRzLmRpZ2ljZXJ0LmNvbS9EaWdpQ2Vy dFNIQTJTZWN1cmVTZXJ2ZXJDQS5jcnQwCQYDVR0TBAIwADCCAQUGCisGAQQB1nkC BAIEgfYEgfMA8QB2AKS5CZC0GFgUh7sTosxncAo8NZgE+RvfuON3zQ7IDdwQAAAB ZXs1FvEAAAQDAEcwRQIgBzVMkm7SNprjJ1GBqiXIc9rNzY+y7gt6s/O02oMkyFoC IQDBuThGlpmUKpeZoHhK6HGwB4jDMIecmKaOcMS18R2jxwB3AId1v+dZfPiMQ5lf vfNu/1aNR1Y2/0q1YMG06v9eoIMPAAABZXs1F8IAAAQDAEgwRgIhAIRq2XFiC+RS uDCYq8ICJg0QafSV+e9BLpJnElEdaSjiAiEAyiiW4vxwv4cWcAXE6FAipctyUBs6 bE5QyaCnmNpoDiQwDQYJKoZIhvcNAQELBQADggEBAB0YoWve9Sxhb0PBS3Hc46Rf a7H1jhHuwE+UyscSQsdJdk8uPAgDuKRZMvJPGEaCkNHm36NfcaXXFjPOl7LI1d1a 9zqSP0xeZBI6cF0x96WuQGrI9/WR2tfxjmaUSp8a/aJ6n+tZA28eJZNPrIaMm+6j gh7AkKnqcf+g8F/MvCCVdNAiVMdz6UpCscf6BRPHNZ5ifvChGh7aUKjrVLLuF4Ls HE05qm6HNyV5eTa6wvcbc4ewguN1UDZvPWetSyfBk10Wbpor4znQ4TJ3Y9uCvsJH 41ldblDvZZ2z4kB2UYQ7iBkPlJSxSOaFgW/GGDXq49sz/995xzhVITHxh2SdLkI= -----END CERTIFICATE----- "# } } } }
46.388535
119
0.826445
2fcb5b9c4e66e722098efde38810b1ab60389582
19,765
//! Implementation of `std::os` functionality for unix systems #![allow(unused_imports)] // lots of cfg code here use crate::os::unix::prelude::*; use crate::error::Error as StdError; use crate::ffi::{CStr, CString, OsStr, OsString}; use crate::fmt; use crate::io; use crate::iter; use crate::marker::PhantomData; use crate::mem; use crate::memchr; use crate::path::{self, PathBuf}; use crate::ptr; use crate::slice; use crate::str; use crate::sys::cvt; use crate::sys::fd; use crate::sys_common::mutex::{Mutex, MutexGuard}; use crate::vec; use libc::{c_char, c_int, c_void}; const TMPBUF_SZ: usize = 128; cfg_if::cfg_if! { if #[cfg(target_os = "redox")] { const PATH_SEPARATOR: u8 = b';'; } else { const PATH_SEPARATOR: u8 = b':'; } } extern "C" { #[cfg(not(target_os = "dragonfly"))] #[cfg_attr( any( target_os = "linux", target_os = "emscripten", target_os = "fuchsia", target_os = "l4re" ), link_name = "__errno_location" )] #[cfg_attr( any( target_os = "netbsd", target_os = "openbsd", target_os = "android", target_os = "redox", target_env = "newlib" ), link_name = "__errno" )] #[cfg_attr(any(target_os = "solaris", target_os = "illumos"), link_name = "___errno")] #[cfg_attr( any(target_os = "macos", target_os = "ios", target_os = "freebsd"), link_name = "__error" )] #[cfg_attr(target_os = "haiku", link_name = "_errnop")] fn errno_location() -> *mut c_int; } /// Returns the platform-specific value of errno #[cfg(not(target_os = "dragonfly"))] pub fn errno() -> i32 { unsafe { (*errno_location()) as i32 } } /// Sets the platform-specific value of errno #[cfg(all(not(target_os = "linux"), not(target_os = "dragonfly")))] // needed for readdir and syscall! #[allow(dead_code)] // but not all target cfgs actually end up using it pub fn set_errno(e: i32) { unsafe { *errno_location() = e as c_int } } #[cfg(target_os = "dragonfly")] pub fn errno() -> i32 { extern "C" { #[thread_local] static errno: c_int; } unsafe { errno as i32 } } #[cfg(target_os = "dragonfly")] pub fn set_errno(e: i32) { extern "C" { #[thread_local] static mut errno: c_int; } unsafe { errno = e; } } /// Gets a detailed string description for the given error number. pub fn error_string(errno: i32) -> String { extern "C" { #[cfg_attr(any(target_os = "linux", target_env = "newlib"), link_name = "__xpg_strerror_r")] fn strerror_r(errnum: c_int, buf: *mut c_char, buflen: libc::size_t) -> c_int; } let mut buf = [0 as c_char; TMPBUF_SZ]; let p = buf.as_mut_ptr(); unsafe { if strerror_r(errno as c_int, p, buf.len()) < 0 { panic!("strerror_r failure"); } let p = p as *const _; str::from_utf8(CStr::from_ptr(p).to_bytes()).unwrap().to_owned() } } pub fn getcwd() -> io::Result<PathBuf> { let mut buf = Vec::with_capacity(512); loop { unsafe { let ptr = buf.as_mut_ptr() as *mut libc::c_char; if !libc::getcwd(ptr, buf.capacity()).is_null() { let len = CStr::from_ptr(buf.as_ptr() as *const libc::c_char).to_bytes().len(); buf.set_len(len); buf.shrink_to_fit(); return Ok(PathBuf::from(OsString::from_vec(buf))); } else { let error = io::Error::last_os_error(); if error.raw_os_error() != Some(libc::ERANGE) { return Err(error); } } // Trigger the internal buffer resizing logic of `Vec` by requiring // more space than the current capacity. let cap = buf.capacity(); buf.set_len(cap); buf.reserve(1); } } } pub fn chdir(p: &path::Path) -> io::Result<()> { let p: &OsStr = p.as_ref(); let p = CString::new(p.as_bytes())?; unsafe { match libc::chdir(p.as_ptr()) == (0 as c_int) { true => Ok(()), false => Err(io::Error::last_os_error()), } } } pub struct SplitPaths<'a> { iter: iter::Map<slice::Split<'a, u8, fn(&u8) -> bool>, fn(&'a [u8]) -> PathBuf>, } pub fn split_paths(unparsed: &OsStr) -> SplitPaths<'_> { fn bytes_to_path(b: &[u8]) -> PathBuf { PathBuf::from(<OsStr as OsStrExt>::from_bytes(b)) } fn is_separator(b: &u8) -> bool { *b == PATH_SEPARATOR } let unparsed = unparsed.as_bytes(); SplitPaths { iter: unparsed .split(is_separator as fn(&u8) -> bool) .map(bytes_to_path as fn(&[u8]) -> PathBuf), } } impl<'a> Iterator for SplitPaths<'a> { type Item = PathBuf; fn next(&mut self) -> Option<PathBuf> { self.iter.next() } fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() } } #[derive(Debug)] pub struct JoinPathsError; pub fn join_paths<I, T>(paths: I) -> Result<OsString, JoinPathsError> where I: Iterator<Item = T>, T: AsRef<OsStr>, { let mut joined = Vec::new(); for (i, path) in paths.enumerate() { let path = path.as_ref().as_bytes(); if i > 0 { joined.push(PATH_SEPARATOR) } if path.contains(&PATH_SEPARATOR) { return Err(JoinPathsError); } joined.extend_from_slice(path); } Ok(OsStringExt::from_vec(joined)) } impl fmt::Display for JoinPathsError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "path segment contains separator `{}`", PATH_SEPARATOR) } } impl StdError for JoinPathsError { #[allow(deprecated)] fn description(&self) -> &str { "failed to join paths" } } #[cfg(any(target_os = "freebsd", target_os = "dragonfly"))] pub fn current_exe() -> io::Result<PathBuf> { unsafe { let mut mib = [ libc::CTL_KERN as c_int, libc::KERN_PROC as c_int, libc::KERN_PROC_PATHNAME as c_int, -1 as c_int, ]; let mut sz = 0; cvt(libc::sysctl( mib.as_mut_ptr(), mib.len() as libc::c_uint, ptr::null_mut(), &mut sz, ptr::null_mut(), 0, ))?; if sz == 0 { return Err(io::Error::last_os_error()); } let mut v: Vec<u8> = Vec::with_capacity(sz); cvt(libc::sysctl( mib.as_mut_ptr(), mib.len() as libc::c_uint, v.as_mut_ptr() as *mut libc::c_void, &mut sz, ptr::null_mut(), 0, ))?; if sz == 0 { return Err(io::Error::last_os_error()); } v.set_len(sz - 1); // chop off trailing NUL Ok(PathBuf::from(OsString::from_vec(v))) } } #[cfg(target_os = "netbsd")] pub fn current_exe() -> io::Result<PathBuf> { fn sysctl() -> io::Result<PathBuf> { unsafe { let mib = [libc::CTL_KERN, libc::KERN_PROC_ARGS, -1, libc::KERN_PROC_PATHNAME]; let mut path_len: usize = 0; cvt(libc::sysctl( mib.as_ptr(), mib.len() as libc::c_uint, ptr::null_mut(), &mut path_len, ptr::null(), 0, ))?; if path_len <= 1 { return Err(io::Error::new( io::ErrorKind::Other, "KERN_PROC_PATHNAME sysctl returned zero-length string", )); } let mut path: Vec<u8> = Vec::with_capacity(path_len); cvt(libc::sysctl( mib.as_ptr(), mib.len() as libc::c_uint, path.as_ptr() as *mut libc::c_void, &mut path_len, ptr::null(), 0, ))?; path.set_len(path_len - 1); // chop off NUL Ok(PathBuf::from(OsString::from_vec(path))) } } fn procfs() -> io::Result<PathBuf> { let curproc_exe = path::Path::new("/proc/curproc/exe"); if curproc_exe.is_file() { return crate::fs::read_link(curproc_exe); } Err(io::Error::new( io::ErrorKind::Other, "/proc/curproc/exe doesn't point to regular file.", )) } sysctl().or_else(|_| procfs()) } #[cfg(target_os = "openbsd")] pub fn current_exe() -> io::Result<PathBuf> { unsafe { let mut mib = [libc::CTL_KERN, libc::KERN_PROC_ARGS, libc::getpid(), libc::KERN_PROC_ARGV]; let mib = mib.as_mut_ptr(); let mut argv_len = 0; cvt(libc::sysctl(mib, 4, ptr::null_mut(), &mut argv_len, ptr::null_mut(), 0))?; let mut argv = Vec::<*const libc::c_char>::with_capacity(argv_len as usize); cvt(libc::sysctl(mib, 4, argv.as_mut_ptr() as *mut _, &mut argv_len, ptr::null_mut(), 0))?; argv.set_len(argv_len as usize); if argv[0].is_null() { return Err(io::Error::new(io::ErrorKind::Other, "no current exe available")); } let argv0 = CStr::from_ptr(argv[0]).to_bytes(); if argv0[0] == b'.' || argv0.iter().any(|b| *b == b'/') { crate::fs::canonicalize(OsStr::from_bytes(argv0)) } else { Ok(PathBuf::from(OsStr::from_bytes(argv0))) } } } #[cfg(any(target_os = "linux", target_os = "android", target_os = "emscripten"))] pub fn current_exe() -> io::Result<PathBuf> { match crate::fs::read_link("/proc/self/exe") { Err(ref e) if e.kind() == io::ErrorKind::NotFound => Err(io::Error::new( io::ErrorKind::Other, "no /proc/self/exe available. Is /proc mounted?", )), other => other, } } #[cfg(any(target_os = "macos", target_os = "ios"))] pub fn current_exe() -> io::Result<PathBuf> { extern "C" { fn _NSGetExecutablePath(buf: *mut libc::c_char, bufsize: *mut u32) -> libc::c_int; } unsafe { let mut sz: u32 = 0; _NSGetExecutablePath(ptr::null_mut(), &mut sz); if sz == 0 { return Err(io::Error::last_os_error()); } let mut v: Vec<u8> = Vec::with_capacity(sz as usize); let err = _NSGetExecutablePath(v.as_mut_ptr() as *mut i8, &mut sz); if err != 0 { return Err(io::Error::last_os_error()); } v.set_len(sz as usize - 1); // chop off trailing NUL Ok(PathBuf::from(OsString::from_vec(v))) } } #[cfg(any(target_os = "solaris", target_os = "illumos"))] pub fn current_exe() -> io::Result<PathBuf> { extern "C" { fn getexecname() -> *const c_char; } unsafe { let path = getexecname(); if path.is_null() { Err(io::Error::last_os_error()) } else { let filename = CStr::from_ptr(path).to_bytes(); let path = PathBuf::from(<OsStr as OsStrExt>::from_bytes(filename)); // Prepend a current working directory to the path if // it doesn't contain an absolute pathname. if filename[0] == b'/' { Ok(path) } else { getcwd().map(|cwd| cwd.join(path)) } } } } #[cfg(target_os = "haiku")] pub fn current_exe() -> io::Result<PathBuf> { // Use Haiku's image info functions #[repr(C)] struct image_info { id: i32, type_: i32, sequence: i32, init_order: i32, init_routine: *mut libc::c_void, // function pointer term_routine: *mut libc::c_void, // function pointer device: libc::dev_t, node: libc::ino_t, name: [libc::c_char; 1024], // MAXPATHLEN text: *mut libc::c_void, data: *mut libc::c_void, text_size: i32, data_size: i32, api_version: i32, abi: i32, } unsafe { extern "C" { fn _get_next_image_info( team_id: i32, cookie: *mut i32, info: *mut image_info, size: i32, ) -> i32; } let mut info: image_info = mem::zeroed(); let mut cookie: i32 = 0; // the executable can be found at team id 0 let result = _get_next_image_info(0, &mut cookie, &mut info, mem::size_of::<image_info>() as i32); if result != 0 { use crate::io::ErrorKind; Err(io::Error::new(ErrorKind::Other, "Error getting executable path")) } else { let name = CStr::from_ptr(info.name.as_ptr()).to_bytes(); Ok(PathBuf::from(OsStr::from_bytes(name))) } } } #[cfg(target_os = "redox")] pub fn current_exe() -> io::Result<PathBuf> { crate::fs::read_to_string("sys:exe").map(PathBuf::from) } #[cfg(any(target_os = "fuchsia", target_os = "l4re"))] pub fn current_exe() -> io::Result<PathBuf> { use crate::io::ErrorKind; Err(io::Error::new(ErrorKind::Other, "Not yet implemented!")) } pub struct Env { iter: vec::IntoIter<(OsString, OsString)>, _dont_send_or_sync_me: PhantomData<*mut ()>, } impl Iterator for Env { type Item = (OsString, OsString); fn next(&mut self) -> Option<(OsString, OsString)> { self.iter.next() } fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() } } #[cfg(target_os = "macos")] pub unsafe fn environ() -> *mut *const *const c_char { extern "C" { fn _NSGetEnviron() -> *mut *const *const c_char; } _NSGetEnviron() } #[cfg(not(target_os = "macos"))] pub unsafe fn environ() -> *mut *const *const c_char { extern "C" { static mut environ: *const *const c_char; } &mut environ } pub unsafe fn env_lock() -> MutexGuard<'static> { // We never call `ENV_LOCK.init()`, so it is UB to attempt to // acquire this mutex reentrantly! static ENV_LOCK: Mutex = Mutex::new(); ENV_LOCK.lock() } /// Returns a vector of (variable, value) byte-vector pairs for all the /// environment variables of the current process. pub fn env() -> Env { unsafe { let _guard = env_lock(); let mut environ = *environ(); let mut result = Vec::new(); if !environ.is_null() { while !(*environ).is_null() { if let Some(key_value) = parse(CStr::from_ptr(*environ).to_bytes()) { result.push(key_value); } environ = environ.add(1); } } return Env { iter: result.into_iter(), _dont_send_or_sync_me: PhantomData }; } fn parse(input: &[u8]) -> Option<(OsString, OsString)> { // Strategy (copied from glibc): Variable name and value are separated // by an ASCII equals sign '='. Since a variable name must not be // empty, allow variable names starting with an equals sign. Skip all // malformed lines. if input.is_empty() { return None; } let pos = memchr::memchr(b'=', &input[1..]).map(|p| p + 1); pos.map(|p| { ( OsStringExt::from_vec(input[..p].to_vec()), OsStringExt::from_vec(input[p + 1..].to_vec()), ) }) } } pub fn getenv(k: &OsStr) -> io::Result<Option<OsString>> { // environment variables with a nul byte can't be set, so their value is // always None as well let k = CString::new(k.as_bytes())?; unsafe { let _guard = env_lock(); let s = libc::getenv(k.as_ptr()) as *const libc::c_char; let ret = if s.is_null() { None } else { Some(OsStringExt::from_vec(CStr::from_ptr(s).to_bytes().to_vec())) }; Ok(ret) } } pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> { let k = CString::new(k.as_bytes())?; let v = CString::new(v.as_bytes())?; unsafe { let _guard = env_lock(); cvt(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(drop) } } pub fn unsetenv(n: &OsStr) -> io::Result<()> { let nbuf = CString::new(n.as_bytes())?; unsafe { let _guard = env_lock(); cvt(libc::unsetenv(nbuf.as_ptr())).map(drop) } } pub fn page_size() -> usize { unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize } } pub fn temp_dir() -> PathBuf { crate::env::var_os("TMPDIR").map(PathBuf::from).unwrap_or_else(|| { if cfg!(target_os = "android") { PathBuf::from("/data/local/tmp") } else { PathBuf::from("/tmp") } }) } pub fn home_dir() -> Option<PathBuf> { return crate::env::var_os("HOME").or_else(|| unsafe { fallback() }).map(PathBuf::from); #[cfg(any( target_os = "android", target_os = "ios", target_os = "emscripten", target_os = "redox" ))] unsafe fn fallback() -> Option<OsString> { None } #[cfg(not(any( target_os = "android", target_os = "ios", target_os = "emscripten", target_os = "redox" )))] unsafe fn fallback() -> Option<OsString> { let amt = match libc::sysconf(libc::_SC_GETPW_R_SIZE_MAX) { n if n < 0 => 512 as usize, n => n as usize, }; let mut buf = Vec::with_capacity(amt); let mut passwd: libc::passwd = mem::zeroed(); let mut result = ptr::null_mut(); match libc::getpwuid_r( libc::getuid(), &mut passwd, buf.as_mut_ptr(), buf.capacity(), &mut result, ) { 0 if !result.is_null() => { let ptr = passwd.pw_dir as *const _; let bytes = CStr::from_ptr(ptr).to_bytes().to_vec(); Some(OsStringExt::from_vec(bytes)) } _ => None, } } } pub fn exit(code: i32) -> ! { unsafe { libc::exit(code as c_int) } } pub fn getpid() -> u32 { unsafe { libc::getpid() as u32 } } pub fn getppid() -> u32 { unsafe { libc::getppid() as u32 } } #[cfg(target_env = "gnu")] pub fn glibc_version() -> Option<(usize, usize)> { if let Some(Ok(version_str)) = glibc_version_cstr().map(CStr::to_str) { parse_glibc_version(version_str) } else { None } } #[cfg(target_env = "gnu")] fn glibc_version_cstr() -> Option<&'static CStr> { weak! { fn gnu_get_libc_version() -> *const libc::c_char } if let Some(f) = gnu_get_libc_version.get() { unsafe { Some(CStr::from_ptr(f())) } } else { None } } // Returns Some((major, minor)) if the string is a valid "x.y" version, // ignoring any extra dot-separated parts. Otherwise return None. #[cfg(target_env = "gnu")] fn parse_glibc_version(version: &str) -> Option<(usize, usize)> { let mut parsed_ints = version.split('.').map(str::parse::<usize>).fuse(); match (parsed_ints.next(), parsed_ints.next()) { (Some(Ok(major)), Some(Ok(minor))) => Some((major, minor)), _ => None, } } #[cfg(all(test, target_env = "gnu"))] mod test { use super::*; #[test] fn test_glibc_version() { // This mostly just tests that the weak linkage doesn't panic wildly... glibc_version(); } #[test] fn test_parse_glibc_version() { let cases = [ ("0.0", Some((0, 0))), ("01.+2", Some((1, 2))), ("3.4.5.six", Some((3, 4))), ("1", None), ("1.-2", None), ("1.foo", None), ("foo.1", None), ]; for &(version_str, parsed) in cases.iter() { assert_eq!(parsed, parse_glibc_version(version_str)); } } }
29.281481
102
0.536453
690e590fab72052391e50cad6ad5f53c8db84339
4,724
//! Driver for the [SparkFun Serial 7 Segment Display](https://github.com/sparkfun/Serial7SegmentDisplay/wiki/Serial-7-Segment-Display-Datasheet) //! //! This is compatible with `embedded-hal`. //! //! Right now, only the SPI or I2C interfaces are supported. In the future, //! support will be added for UART interfaces #![no_std] use bitflags::bitflags; pub mod i2c; pub mod spi; bitflags! { /// A bit packed structure representing days of the week pub struct PunctuationFlags: u8 { const DOT_BETWEEN_1_AND_2 = 0b0000_0001; const DOT_BETWEEN_2_AND_3 = 0b0000_0010; const DOT_BETWEEN_3_AND_4 = 0b0000_0100; const DOT_RIGHT_OF_4 = 0b0000_1000; const DOTS_COLON = 0b0001_0000; const APOSTROPHE_BETWEEN_3_AND_4 = 0b0010_0000; const NONE = 0b0000_0000; } } mod command { #![allow(dead_code)] pub(crate) const CLEAR_DISPLAY: u8 = 0x76; pub(crate) const DECIMAL_CTL: u8 = 0x77; pub(crate) const CURSOR_CTL: u8 = 0x79; pub(crate) const BRIGHTNESS_CTL: u8 = 0x7A; pub(crate) const DIGIT_1_CTL: u8 = 0x7B; pub(crate) const DIGIT_2_CTL: u8 = 0x7C; pub(crate) const DIGIT_3_CTL: u8 = 0x7D; pub(crate) const DIGIT_4_CTL: u8 = 0x7E; pub(crate) const BAUD_RATE_CFG: u8 = 0x7F; pub(crate) const I2C_ADDR_CFG: u8 = 0x80; pub(crate) const FACTORY_RESET: u8 = 0x81; } #[derive(Debug, Eq, PartialEq)] pub enum Error<I> { Interface(I), CursorOutOfRange, DigitOutOfRange, } pub trait SevenSegInterface { /// A single error type used by the interface type InterfaceError; /// Sending commands to the interface fn send(&mut self, data: &[u8]) -> Result<(), Error<Self::InterfaceError>>; /// Set the digit cursor to a particular location /// `col` may be 0..=3, from left to right. fn set_cursor(&mut self, col: u8) -> Result<(), Error<Self::InterfaceError>> { if col >= 4 { return Err(Error::CursorOutOfRange); } self.send(&[command::CURSOR_CTL, col]) } /// Set the brightness for the display. The datasheet says that 100 is the /// brightest, however my device gets brighter with values above 100 (up to 255). /// Your mileage may vary. fn set_brightness(&mut self, bright: u8) -> Result<(), Error<Self::InterfaceError>> { self.send(&[command::BRIGHTNESS_CTL, bright]) } /// Completely clear the display fn clear(&mut self) -> Result<(), Error<Self::InterfaceError>> { self.send(&[command::CLEAR_DISPLAY]) } /// Write a digit to the curent cursor position. This also /// increments the cursor position fn write_digit(&mut self, digit: u8) -> Result<(), Error<Self::InterfaceError>> { if digit > 0x0F { return Err(Error::DigitOutOfRange); } self.send(&[digit]) } /// Write the requested punctuation to the display. This does not take /// the current state into account, so any unset flags in `punct_flags` /// will turn the corresponding LEDs off. fn write_punctuation( &mut self, punct_flags: PunctuationFlags, ) -> Result<(), Error<Self::InterfaceError>> { self.send(&[command::DECIMAL_CTL, punct_flags.bits()]) } /// Write the requested digits to the display, starting at the current /// cursor position. Each digit must be in the range 0x0..=0xF, and up /// to 4 digits may be updated at once. The cursor is incremented after /// each digit fn write_digits(&mut self, digits: &[u8]) -> Result<(), Error<Self::InterfaceError>> { // Too many digits? if digits.len() > 4 { return Err(Error::CursorOutOfRange); } // Any digit too big? for d in digits { if *d > 0x0F { return Err(Error::DigitOutOfRange); } } self.send(digits) } /// Write the number to the display. The number will be left-filled /// with zeroes if necessary. After this function, the cursor /// will be at position 0. fn set_num(&mut self, num: u16) -> Result<(), Error<Self::InterfaceError>> { if num > 9999 { return Err(Error::DigitOutOfRange); } self.set_cursor(0)?; // TODO: We seem to need roughly 15uS between // back-to-back commands. How should I handle this? // Failure to do so can cause a potential NACK. let data: [u8; 4] = [ (num / 1000) as u8, ((num % 1000) / 100) as u8, ((num % 100) / 10) as u8, (num % 10) as u8, ]; self.send(&data) } }
32.136054
145
0.609441
1a81b345e5f12f3e0e39932ea664757b9137c7fa
972
use magnesium::*; type Tais = TagAttributeIterator<'static>; #[test] fn empty_string_or_whitespace_gives_none() { assert!(Tais::new("").next().is_none()); assert!(Tais::new(" ").next().is_none()); assert!(Tais::new("\r\n").next().is_none()); assert!(Tais::new("\t").next().is_none()); } #[test] fn test_parsing() { let mut iter = Tais::new(r#"namespace="Graphics" group="Polygon""#); assert_eq!( iter.next(), Some(TagAttribute { key: "namespace", value: "Graphics" }) ); assert_eq!( iter.next(), Some(TagAttribute { key: "group", value: "Polygon" }) ); assert_eq!(iter.next(), None); // extra space around doesn't affect it let mut iter = Tais::new(r#" namespace="Graphics" group="Polygon" "#); assert_eq!( iter.next(), Some(TagAttribute { key: "namespace", value: "Graphics" }) ); assert_eq!( iter.next(), Some(TagAttribute { key: "group", value: "Polygon" }) ); assert_eq!(iter.next(), None); }
23.707317
72
0.611111
8f66550220ab6fea5ecd4b10438b4c561f13ce0a
1,346
#![doc = include_str!("../notes/README.md")] pub use dioxus_core as core; pub mod hooks { #[cfg(feature = "hooks")] pub use dioxus_hooks::*; #[cfg(all(target_arch = "wasm32", feature = "web"))] pub use dioxus_web::use_eval; #[cfg(all(not(target_arch = "wasm32"), feature = "desktop"))] pub use dioxus_desktop::use_eval; } #[cfg(feature = "router")] pub use dioxus_router as router; #[cfg(feature = "ssr")] pub use dioxus_ssr as ssr; #[cfg(feature = "web")] pub use dioxus_web as web; #[cfg(feature = "liveview")] pub use dioxus_liveview as liveview; #[cfg(feature = "desktop")] pub use dioxus_desktop as desktop; #[cfg(feature = "tui")] pub use dioxus_tui as tui; #[cfg(feature = "fermi")] pub use fermi; pub mod events { #[cfg(feature = "html")] pub use dioxus_html::{on::*, KeyCode}; } pub mod prelude { pub use crate::hooks::*; pub use dioxus_core::prelude::*; pub use dioxus_core_macro::{format_args_f, inline_props, rsx, Props}; pub use dioxus_elements::{GlobalAttributes, SvgAttributes}; pub use dioxus_html as dioxus_elements; #[cfg(feature = "router")] pub use dioxus_router::{use_route, use_router, Link, Redirect, Route, Router, UseRoute}; #[cfg(feature = "fermi")] pub use fermi::{use_atom_ref, use_init_atom_root, use_read, use_set, Atom, AtomRef}; }
24.472727
92
0.666419
bf6be0ee98545c9749672222438346791ecb9fb2
1,309
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. fn main() { match 0u8 { [u8]::AssocItem => {} //~^ ERROR missing angle brackets in associated item path //~| ERROR no associated item named `AssocItem` found for type `[u8]` in the current scope (u8, u8)::AssocItem => {} //~^ ERROR missing angle brackets in associated item path //~| ERROR no associated item named `AssocItem` found for type `(u8, u8)` in the current sco _::AssocItem => {} //~^ ERROR missing angle brackets in associated item path //~| ERROR no associated item named `AssocItem` found for type `_` in the current scope } match &0u8 { &(u8,)::AssocItem => {} //~^ ERROR missing angle brackets in associated item path //~| ERROR no associated item named `AssocItem` found for type `(u8,)` in the current scope } }
45.137931
100
0.655462
26442faa3215c48a39e51312757992fc36f9b224
48,566
use rustc::middle::lang_items; use rustc::ty::{self, Ty, TypeFoldable}; use rustc::ty::layout::{self, LayoutOf, HasTyCtxt}; use rustc::mir::{self, Place, PlaceBase, Static, StaticKind}; use rustc::mir::interpret::EvalErrorKind; use rustc_target::abi::call::{ArgType, FnType, PassMode, IgnoreMode}; use rustc_target::spec::abi::Abi; use rustc_mir::monomorphize; use crate::base; use crate::MemFlags; use crate::common::{self, IntPredicate}; use crate::meth; use crate::traits::*; use std::borrow::Cow; use syntax::symbol::Symbol; use syntax_pos::Pos; use super::{FunctionCx, LocalRef}; use super::place::PlaceRef; use super::operand::OperandRef; use super::operand::OperandValue::{Pair, Ref, Immediate}; /// Used by `FunctionCx::codegen_terminator` for emitting common patterns /// e.g., creating a basic block, calling a function, etc. struct TerminatorCodegenHelper<'a, 'tcx> { bb: &'a mir::BasicBlock, terminator: &'a mir::Terminator<'tcx>, funclet_bb: Option<mir::BasicBlock>, } impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> { /// Returns the associated funclet from `FunctionCx::funclets` for the /// `funclet_bb` member if it is not `None`. fn funclet<'c, 'b, Bx: BuilderMethods<'b, 'tcx>>( &self, fx: &'c mut FunctionCx<'b, 'tcx, Bx>, ) -> Option<&'c Bx::Funclet> { match self.funclet_bb { Some(funcl) => fx.funclets[funcl].as_ref(), None => None, } } fn lltarget<'b, 'c, Bx: BuilderMethods<'b, 'tcx>>( &self, fx: &'c mut FunctionCx<'b, 'tcx, Bx>, target: mir::BasicBlock, ) -> (Bx::BasicBlock, bool) { let span = self.terminator.source_info.span; let lltarget = fx.blocks[target]; let target_funclet = fx.cleanup_kinds[target].funclet_bb(target); match (self.funclet_bb, target_funclet) { (None, None) => (lltarget, false), (Some(f), Some(t_f)) if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) => (lltarget, false), // jump *into* cleanup - need a landing pad if GNU (None, Some(_)) => (fx.landing_pad_to(target), false), (Some(_), None) => span_bug!(span, "{:?} - jump out of cleanup?", self.terminator), (Some(_), Some(_)) => (fx.landing_pad_to(target), true), } } /// Create a basic block. fn llblock<'c, 'b, Bx: BuilderMethods<'b, 'tcx>>( &self, fx: &'c mut FunctionCx<'b, 'tcx, Bx>, target: mir::BasicBlock, ) -> Bx::BasicBlock { let (lltarget, is_cleanupret) = self.lltarget(fx, target); if is_cleanupret { // MSVC cross-funclet jump - need a trampoline debug!("llblock: creating cleanup trampoline for {:?}", target); let name = &format!("{:?}_cleanup_trampoline_{:?}", self.bb, target); let mut trampoline = fx.new_block(name); trampoline.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget)); trampoline.llbb() } else { lltarget } } fn funclet_br<'c, 'b, Bx: BuilderMethods<'b, 'tcx>>( &self, fx: &'c mut FunctionCx<'b, 'tcx, Bx>, bx: &mut Bx, target: mir::BasicBlock, ) { let (lltarget, is_cleanupret) = self.lltarget(fx, target); if is_cleanupret { // micro-optimization: generate a `ret` rather than a jump // to a trampoline. bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget)); } else { bx.br(lltarget); } } /// Call `fn_ptr` of `fn_ty` with the arguments `llargs`, the optional /// return destination `destination` and the cleanup function `cleanup`. fn do_call<'c, 'b, Bx: BuilderMethods<'b, 'tcx>>( &self, fx: &'c mut FunctionCx<'b, 'tcx, Bx>, bx: &mut Bx, fn_ty: FnType<'tcx, Ty<'tcx>>, fn_ptr: Bx::Value, llargs: &[Bx::Value], destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>, cleanup: Option<mir::BasicBlock>, ) { if let Some(cleanup) = cleanup { let ret_bx = if let Some((_, target)) = destination { fx.blocks[target] } else { fx.unreachable_block() }; let invokeret = bx.invoke(fn_ptr, &llargs, ret_bx, self.llblock(fx, cleanup), self.funclet(fx)); bx.apply_attrs_callsite(&fn_ty, invokeret); if let Some((ret_dest, target)) = destination { let mut ret_bx = fx.build_block(target); fx.set_debug_loc(&mut ret_bx, self.terminator.source_info); fx.store_return(&mut ret_bx, ret_dest, &fn_ty.ret, invokeret); } } else { let llret = bx.call(fn_ptr, &llargs, self.funclet(fx)); bx.apply_attrs_callsite(&fn_ty, llret); if fx.mir[*self.bb].is_cleanup { // Cleanup is always the cold path. Don't inline // drop glue. Also, when there is a deeply-nested // struct, there are "symmetry" issues that cause // exponential inlining - see issue #41696. bx.do_not_inline(llret); } if let Some((ret_dest, target)) = destination { fx.store_return(bx, ret_dest, &fn_ty.ret, llret); self.funclet_br(fx, bx, target); } else { bx.unreachable(); } } } } /// Codegen implementations for some terminator variants. impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { /// Generates code for a `Resume` terminator. fn codegen_resume_terminator<'b>( &mut self, helper: TerminatorCodegenHelper<'b, 'tcx>, mut bx: Bx, ) { if let Some(funclet) = helper.funclet(self) { bx.cleanup_ret(funclet, None); } else { let slot = self.get_personality_slot(&mut bx); let lp0 = slot.project_field(&mut bx, 0); let lp0 = bx.load_operand(lp0).immediate(); let lp1 = slot.project_field(&mut bx, 1); let lp1 = bx.load_operand(lp1).immediate(); slot.storage_dead(&mut bx); if !bx.sess().target.target.options.custom_unwind_resume { let mut lp = bx.const_undef(self.landing_pad_type()); lp = bx.insert_value(lp, lp0, 0); lp = bx.insert_value(lp, lp1, 1); bx.resume(lp); } else { bx.call(bx.eh_unwind_resume(), &[lp0], helper.funclet(self)); bx.unreachable(); } } } fn codegen_switchint_terminator<'b>( &mut self, helper: TerminatorCodegenHelper<'b, 'tcx>, mut bx: Bx, discr: &mir::Operand<'tcx>, switch_ty: Ty<'tcx>, values: &Cow<'tcx, [u128]>, targets: &Vec<mir::BasicBlock>, ) { let discr = self.codegen_operand(&mut bx, &discr); if targets.len() == 2 { // If there are two targets, emit br instead of switch let lltrue = helper.llblock(self, targets[0]); let llfalse = helper.llblock(self, targets[1]); if switch_ty == bx.tcx().types.bool { // Don't generate trivial icmps when switching on bool if let [0] = values[..] { bx.cond_br(discr.immediate(), llfalse, lltrue); } else { assert_eq!(&values[..], &[1]); bx.cond_br(discr.immediate(), lltrue, llfalse); } } else { let switch_llty = bx.immediate_backend_type( bx.layout_of(switch_ty) ); let llval = bx.const_uint_big(switch_llty, values[0]); let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval); bx.cond_br(cmp, lltrue, llfalse); } } else { let (otherwise, targets) = targets.split_last().unwrap(); bx.switch( discr.immediate(), helper.llblock(self, *otherwise), values.iter().zip(targets).map(|(&value, target)| { (value, helper.llblock(self, *target)) }) ); } } fn codegen_return_terminator<'b>( &mut self, mut bx: Bx, ) { if self.fn_ty.c_variadic { match self.va_list_ref { Some(va_list) => { bx.va_end(va_list.llval); } None => { bug!("C-variadic function must have a `va_list_ref`"); } } } let llval = match self.fn_ty.ret.mode { PassMode::Ignore(IgnoreMode::Zst) | PassMode::Indirect(..) => { bx.ret_void(); return; } PassMode::Ignore(IgnoreMode::CVarArgs) => { bug!("C-variadic arguments should never be the return type"); } PassMode::Direct(_) | PassMode::Pair(..) => { let op = self.codegen_consume(&mut bx, &mir::Place::RETURN_PLACE); if let Ref(llval, _, align) = op.val { bx.load(llval, align) } else { op.immediate_or_packed_pair(&mut bx) } } PassMode::Cast(cast_ty) => { let op = match self.locals[mir::RETURN_PLACE] { LocalRef::Operand(Some(op)) => op, LocalRef::Operand(None) => bug!("use of return before def"), LocalRef::Place(cg_place) => { OperandRef { val: Ref(cg_place.llval, None, cg_place.align), layout: cg_place.layout } } LocalRef::UnsizedPlace(_) => bug!("return type must be sized"), }; let llslot = match op.val { Immediate(_) | Pair(..) => { let scratch = PlaceRef::alloca(&mut bx, self.fn_ty.ret.layout, "ret"); op.val.store(&mut bx, scratch); scratch.llval } Ref(llval, _, align) => { assert_eq!(align, op.layout.align.abi, "return place is unaligned!"); llval } }; let addr = bx.pointercast(llslot, bx.type_ptr_to( bx.cast_backend_type(&cast_ty) )); bx.load(addr, self.fn_ty.ret.layout.align.abi) } }; bx.ret(llval); } fn codegen_drop_terminator<'b>( &mut self, helper: TerminatorCodegenHelper<'b, 'tcx>, mut bx: Bx, location: &mir::Place<'tcx>, target: mir::BasicBlock, unwind: Option<mir::BasicBlock>, ) { let ty = location.ty(self.mir, bx.tcx()).to_ty(bx.tcx()); let ty = self.monomorphize(&ty); let drop_fn = monomorphize::resolve_drop_in_place(bx.tcx(), ty); if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def { // we don't actually need to drop anything. helper.funclet_br(self, &mut bx, target); return } let place = self.codegen_place(&mut bx, location); let (args1, args2); let mut args = if let Some(llextra) = place.llextra { args2 = [place.llval, llextra]; &args2[..] } else { args1 = [place.llval]; &args1[..] }; let (drop_fn, fn_ty) = match ty.sty { ty::Dynamic(..) => { let sig = drop_fn.fn_sig(self.cx.tcx()); let sig = self.cx.tcx().normalize_erasing_late_bound_regions( ty::ParamEnv::reveal_all(), &sig, ); let fn_ty = bx.new_vtable(sig, &[]); let vtable = args[1]; args = &args[..1]; (meth::DESTRUCTOR.get_fn(&mut bx, vtable, &fn_ty), fn_ty) } _ => { (bx.get_fn(drop_fn), bx.fn_type_of_instance(&drop_fn)) } }; helper.do_call(self, &mut bx, fn_ty, drop_fn, args, Some((ReturnDest::Nothing, target)), unwind); } fn codegen_assert_terminator<'b>( &mut self, helper: TerminatorCodegenHelper<'b, 'tcx>, mut bx: Bx, terminator: &mir::Terminator<'tcx>, cond: &mir::Operand<'tcx>, expected: bool, msg: &mir::AssertMessage<'tcx>, target: mir::BasicBlock, cleanup: Option<mir::BasicBlock>, ) { let span = terminator.source_info.span; let cond = self.codegen_operand(&mut bx, cond).immediate(); let mut const_cond = bx.const_to_opt_u128(cond, false).map(|c| c == 1); // This case can currently arise only from functions marked // with #[rustc_inherit_overflow_checks] and inlined from // another crate (mostly core::num generic/#[inline] fns), // while the current crate doesn't use overflow checks. // NOTE: Unlike binops, negation doesn't have its own // checked operation, just a comparison with the minimum // value, so we have to check for the assert message. if !bx.check_overflow() { if let mir::interpret::EvalErrorKind::OverflowNeg = *msg { const_cond = Some(expected); } } // Don't codegen the panic block if success if known. if const_cond == Some(expected) { helper.funclet_br(self, &mut bx, target); return; } // Pass the condition through llvm.expect for branch hinting. let cond = bx.expect(cond, expected); // Create the failure block and the conditional branch to it. let lltarget = helper.llblock(self, target); let panic_block = self.new_block("panic"); if expected { bx.cond_br(cond, lltarget, panic_block.llbb()); } else { bx.cond_br(cond, panic_block.llbb(), lltarget); } // After this point, bx is the block for the call to panic. bx = panic_block; self.set_debug_loc(&mut bx, terminator.source_info); // Get the location information. let loc = bx.sess().source_map().lookup_char_pos(span.lo()); let filename = Symbol::intern(&loc.file.name.to_string()).as_str(); let line = bx.const_u32(loc.line as u32); let col = bx.const_u32(loc.col.to_usize() as u32 + 1); // Put together the arguments to the panic entry point. let (lang_item, args) = match *msg { EvalErrorKind::BoundsCheck { ref len, ref index } => { let len = self.codegen_operand(&mut bx, len).immediate(); let index = self.codegen_operand(&mut bx, index).immediate(); let file_line_col = bx.static_panic_msg( None, filename, line, col, "panic_bounds_check_loc", ); (lang_items::PanicBoundsCheckFnLangItem, vec![file_line_col, index, len]) } _ => { let str = msg.description(); let msg_str = Symbol::intern(str).as_str(); let msg_file_line_col = bx.static_panic_msg( Some(msg_str), filename, line, col, "panic_loc", ); (lang_items::PanicFnLangItem, vec![msg_file_line_col]) } }; // Obtain the panic entry point. let def_id = common::langcall(bx.tcx(), Some(span), "", lang_item); let instance = ty::Instance::mono(bx.tcx(), def_id); let fn_ty = bx.fn_type_of_instance(&instance); let llfn = bx.get_fn(instance); // Codegen the actual panic invoke/call. helper.do_call(self, &mut bx, fn_ty, llfn, &args, None, cleanup); } fn codegen_call_terminator<'b>( &mut self, helper: TerminatorCodegenHelper<'b, 'tcx>, mut bx: Bx, terminator: &mir::Terminator<'tcx>, func: &mir::Operand<'tcx>, args: &Vec<mir::Operand<'tcx>>, destination: &Option<(mir::Place<'tcx>, mir::BasicBlock)>, cleanup: Option<mir::BasicBlock>, ) { let span = terminator.source_info.span; // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar. let callee = self.codegen_operand(&mut bx, func); let (instance, mut llfn) = match callee.layout.ty.sty { ty::FnDef(def_id, substs) => { (Some(ty::Instance::resolve(bx.tcx(), ty::ParamEnv::reveal_all(), def_id, substs).unwrap()), None) } ty::FnPtr(_) => { (None, Some(callee.immediate())) } _ => bug!("{} is not callable", callee.layout.ty), }; let def = instance.map(|i| i.def); let sig = callee.layout.ty.fn_sig(bx.tcx()); let sig = bx.tcx().normalize_erasing_late_bound_regions( ty::ParamEnv::reveal_all(), &sig, ); let abi = sig.abi; // Handle intrinsics old codegen wants Expr's for, ourselves. let intrinsic = match def { Some(ty::InstanceDef::Intrinsic(def_id)) => Some(bx.tcx().item_name(def_id).as_str()), _ => None }; let intrinsic = intrinsic.as_ref().map(|s| &s[..]); if intrinsic == Some("transmute") { if let Some(destination_ref) = destination.as_ref() { let &(ref dest, target) = destination_ref; self.codegen_transmute(&mut bx, &args[0], dest); helper.funclet_br(self, &mut bx, target); } else { // If we are trying to transmute to an uninhabited type, // it is likely there is no allotted destination. In fact, // transmuting to an uninhabited type is UB, which means // we can do what we like. Here, we declare that transmuting // into an uninhabited type is impossible, so anything following // it must be unreachable. assert_eq!(bx.layout_of(sig.output()).abi, layout::Abi::Uninhabited); bx.unreachable(); } return; } // The "spoofed" `VaList` added to a C-variadic functions signature // should not be included in the `extra_args` calculation. let extra_args_start_idx = sig.inputs().len() - if sig.c_variadic { 1 } else { 0 }; let extra_args = &args[extra_args_start_idx..]; let extra_args = extra_args.iter().map(|op_arg| { let op_ty = op_arg.ty(self.mir, bx.tcx()); self.monomorphize(&op_ty) }).collect::<Vec<_>>(); let fn_ty = match def { Some(ty::InstanceDef::Virtual(..)) => { bx.new_vtable(sig, &extra_args) } Some(ty::InstanceDef::DropGlue(_, None)) => { // Empty drop glue; a no-op. let &(_, target) = destination.as_ref().unwrap(); helper.funclet_br(self, &mut bx, target); return; } _ => bx.new_fn_type(sig, &extra_args) }; // Emit a panic or a no-op for `panic_if_uninhabited`. if intrinsic == Some("panic_if_uninhabited") { let ty = instance.unwrap().substs.type_at(0); let layout = bx.layout_of(ty); if layout.abi.is_uninhabited() { let loc = bx.sess().source_map().lookup_char_pos(span.lo()); let filename = Symbol::intern(&loc.file.name.to_string()).as_str(); let line = bx.const_u32(loc.line as u32); let col = bx.const_u32(loc.col.to_usize() as u32 + 1); let str = format!( "Attempted to instantiate uninhabited type {}", ty ); let msg_str = Symbol::intern(&str).as_str(); let msg_file_line_col = bx.static_panic_msg( Some(msg_str), filename, line, col, "panic_loc", ); // Obtain the panic entry point. let def_id = common::langcall(bx.tcx(), Some(span), "", lang_items::PanicFnLangItem); let instance = ty::Instance::mono(bx.tcx(), def_id); let fn_ty = bx.fn_type_of_instance(&instance); let llfn = bx.get_fn(instance); // Codegen the actual panic invoke/call. helper.do_call( self, &mut bx, fn_ty, llfn, &[msg_file_line_col], destination.as_ref().map(|(_, bb)| (ReturnDest::Nothing, *bb)), cleanup, ); } else { // a NOP helper.funclet_br(self, &mut bx, destination.as_ref().unwrap().1) } return; } // The arguments we'll be passing. Plus one to account for outptr, if used. let arg_count = fn_ty.args.len() + fn_ty.ret.is_indirect() as usize; let mut llargs = Vec::with_capacity(arg_count); // Prepare the return value destination let ret_dest = if let Some((ref dest, _)) = *destination { let is_intrinsic = intrinsic.is_some(); self.make_return_dest(&mut bx, dest, &fn_ty.ret, &mut llargs, is_intrinsic) } else { ReturnDest::Nothing }; if intrinsic.is_some() && intrinsic != Some("drop_in_place") { let dest = match ret_dest { _ if fn_ty.ret.is_indirect() => llargs[0], ReturnDest::Nothing => bx.const_undef(bx.type_ptr_to(bx.memory_ty(&fn_ty.ret))), ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval, ReturnDest::DirectOperand(_) => bug!("Cannot use direct operand with an intrinsic call"), }; let args: Vec<_> = args.iter().enumerate().map(|(i, arg)| { // The indices passed to simd_shuffle* in the // third argument must be constant. This is // checked by const-qualification, which also // promotes any complex rvalues to constants. if i == 2 && intrinsic.unwrap().starts_with("simd_shuffle") { match *arg { // The shuffle array argument is usually not an explicit constant, // but specified directly in the code. This means it gets promoted // and we can then extract the value by evaluating the promoted. mir::Operand::Copy( Place::Base( PlaceBase::Static( box Static { kind: StaticKind::Promoted(promoted), ty } ) ) ) | mir::Operand::Move( Place::Base( PlaceBase::Static( box Static { kind: StaticKind::Promoted(promoted), ty } ) ) ) => { let param_env = ty::ParamEnv::reveal_all(); let cid = mir::interpret::GlobalId { instance: self.instance, promoted: Some(promoted), }; let c = bx.tcx().const_eval(param_env.and(cid)); let (llval, ty) = self.simd_shuffle_indices( &bx, terminator.source_info.span, ty, c, ); return OperandRef { val: Immediate(llval), layout: bx.layout_of(ty), }; } mir::Operand::Copy(_) | mir::Operand::Move(_) => { span_bug!(span, "shuffle indices must be constant"); } mir::Operand::Constant(ref constant) => { let c = self.eval_mir_constant(constant); let (llval, ty) = self.simd_shuffle_indices( &bx, constant.span, constant.ty, c, ); return OperandRef { val: Immediate(llval), layout: bx.layout_of(ty) }; } } } self.codegen_operand(&mut bx, arg) }).collect(); let callee_ty = instance.as_ref().unwrap().ty(bx.tcx()); bx.codegen_intrinsic_call(callee_ty, &fn_ty, &args, dest, terminator.source_info.span); if let ReturnDest::IndirectOperand(dst, _) = ret_dest { self.store_return(&mut bx, ret_dest, &fn_ty.ret, dst.llval); } if let Some((_, target)) = *destination { helper.funclet_br(self, &mut bx, target); } else { bx.unreachable(); } return; } // Split the rust-call tupled arguments off. let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() { let (tup, args) = args.split_last().unwrap(); (args, Some(tup)) } else { (&args[..], None) }; // Useful determining if the current argument is the "spoofed" `VaList` let last_arg_idx = if sig.inputs().is_empty() { None } else { Some(sig.inputs().len() - 1) }; 'make_args: for (i, arg) in first_args.iter().enumerate() { // If this is a C-variadic function the function signature contains // an "spoofed" `VaList`. This argument is ignored, but we need to // populate it with a dummy operand so that the users real arguments // are not overwritten. let i = if sig.c_variadic && last_arg_idx.map(|x| i >= x).unwrap_or(false) { if i + 1 < fn_ty.args.len() { i + 1 } else { break 'make_args } } else { i }; let mut op = self.codegen_operand(&mut bx, arg); if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) { if let Pair(..) = op.val { // In the case of Rc<Self>, we need to explicitly pass a // *mut RcBox<Self> with a Scalar (not ScalarPair) ABI. This is a hack // that is understood elsewhere in the compiler as a method on // `dyn Trait`. // To get a `*mut RcBox<Self>`, we just keep unwrapping newtypes until // we get a value of a built-in pointer type 'descend_newtypes: while !op.layout.ty.is_unsafe_ptr() && !op.layout.ty.is_region_ptr() { 'iter_fields: for i in 0..op.layout.fields.count() { let field = op.extract_field(&mut bx, i); if !field.layout.is_zst() { // we found the one non-zero-sized field that is allowed // now find *its* non-zero-sized field, or stop if it's a // pointer op = field; continue 'descend_newtypes } } span_bug!(span, "receiver has no non-zero-sized fields {:?}", op); } // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its // data pointer and vtable. Look up the method in the vtable, and pass // the data pointer as the first argument match op.val { Pair(data_ptr, meta) => { llfn = Some(meth::VirtualIndex::from_index(idx) .get_fn(&mut bx, meta, &fn_ty)); llargs.push(data_ptr); continue 'make_args } other => bug!("expected a Pair, got {:?}", other), } } else if let Ref(data_ptr, Some(meta), _) = op.val { // by-value dynamic dispatch llfn = Some(meth::VirtualIndex::from_index(idx) .get_fn(&mut bx, meta, &fn_ty)); llargs.push(data_ptr); continue; } else { span_bug!(span, "can't codegen a virtual call on {:?}", op); } } // The callee needs to own the argument memory if we pass it // by-ref, so make a local copy of non-immediate constants. match (arg, op.val) { (&mir::Operand::Copy(_), Ref(_, None, _)) | (&mir::Operand::Constant(_), Ref(_, None, _)) => { let tmp = PlaceRef::alloca(&mut bx, op.layout, "const"); op.val.store(&mut bx, tmp); op.val = Ref(tmp.llval, None, tmp.align); } _ => {} } self.codegen_argument(&mut bx, op, &mut llargs, &fn_ty.args[i]); } if let Some(tup) = untuple { self.codegen_arguments_untupled(&mut bx, tup, &mut llargs, &fn_ty.args[first_args.len()..]) } let fn_ptr = match (llfn, instance) { (Some(llfn), _) => llfn, (None, Some(instance)) => bx.get_fn(instance), _ => span_bug!(span, "no llfn for call"), }; helper.do_call(self, &mut bx, fn_ty, fn_ptr, &llargs, destination.as_ref().map(|&(_, target)| (ret_dest, target)), cleanup); } } impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { pub fn codegen_block( &mut self, bb: mir::BasicBlock, ) { let mut bx = self.build_block(bb); let data = &self.mir[bb]; debug!("codegen_block({:?}={:?})", bb, data); for statement in &data.statements { bx = self.codegen_statement(bx, statement); } self.codegen_terminator(bx, bb, data.terminator()); } fn codegen_terminator( &mut self, mut bx: Bx, bb: mir::BasicBlock, terminator: &mir::Terminator<'tcx> ) { debug!("codegen_terminator: {:?}", terminator); // Create the cleanup bundle, if needed. let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb); let helper = TerminatorCodegenHelper { bb: &bb, terminator, funclet_bb }; self.set_debug_loc(&mut bx, terminator.source_info); match terminator.kind { mir::TerminatorKind::Resume => { self.codegen_resume_terminator(helper, bx) } mir::TerminatorKind::Abort => { bx.abort(); bx.unreachable(); } mir::TerminatorKind::Goto { target } => { helper.funclet_br(self, &mut bx, target); } mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => { self.codegen_switchint_terminator(helper, bx, discr, switch_ty, values, targets); } mir::TerminatorKind::Return => { self.codegen_return_terminator(bx); } mir::TerminatorKind::Unreachable => { bx.unreachable(); } mir::TerminatorKind::Drop { ref location, target, unwind } => { self.codegen_drop_terminator(helper, bx, location, target, unwind); } mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => { self.codegen_assert_terminator(helper, bx, terminator, cond, expected, msg, target, cleanup); } mir::TerminatorKind::DropAndReplace { .. } => { bug!("undesugared DropAndReplace in codegen: {:?}", terminator); } mir::TerminatorKind::Call { ref func, ref args, ref destination, cleanup, from_hir_call: _ } => { self.codegen_call_terminator(helper, bx, terminator, func, args, destination, cleanup); } mir::TerminatorKind::GeneratorDrop | mir::TerminatorKind::Yield { .. } => bug!("generator ops in codegen"), mir::TerminatorKind::FalseEdges { .. } | mir::TerminatorKind::FalseUnwind { .. } => bug!("borrowck false edges in codegen"), } } fn codegen_argument( &mut self, bx: &mut Bx, op: OperandRef<'tcx, Bx::Value>, llargs: &mut Vec<Bx::Value>, arg: &ArgType<'tcx, Ty<'tcx>> ) { // Fill padding with undef value, where applicable. if let Some(ty) = arg.pad { llargs.push(bx.const_undef(bx.reg_backend_type(&ty))) } if arg.is_ignore() { return; } if let PassMode::Pair(..) = arg.mode { match op.val { Pair(a, b) => { llargs.push(a); llargs.push(b); return; } _ => bug!("codegen_argument: {:?} invalid for pair argument", op) } } else if arg.is_unsized_indirect() { match op.val { Ref(a, Some(b), _) => { llargs.push(a); llargs.push(b); return; } _ => bug!("codegen_argument: {:?} invalid for unsized indirect argument", op) } } // Force by-ref if we have to load through a cast pointer. let (mut llval, align, by_ref) = match op.val { Immediate(_) | Pair(..) => { match arg.mode { PassMode::Indirect(..) | PassMode::Cast(_) => { let scratch = PlaceRef::alloca(bx, arg.layout, "arg"); op.val.store(bx, scratch); (scratch.llval, scratch.align, true) } _ => { (op.immediate_or_packed_pair(bx), arg.layout.align.abi, false) } } } Ref(llval, _, align) => { if arg.is_indirect() && align < arg.layout.align.abi { // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't // have scary latent bugs around. let scratch = PlaceRef::alloca(bx, arg.layout, "arg"); base::memcpy_ty(bx, scratch.llval, scratch.align, llval, align, op.layout, MemFlags::empty()); (scratch.llval, scratch.align, true) } else { (llval, align, true) } } }; if by_ref && !arg.is_indirect() { // Have to load the argument, maybe while casting it. if let PassMode::Cast(ty) = arg.mode { let addr = bx.pointercast(llval, bx.type_ptr_to( bx.cast_backend_type(&ty)) ); llval = bx.load(addr, align.min(arg.layout.align.abi)); } else { // We can't use `PlaceRef::load` here because the argument // may have a type we don't treat as immediate, but the ABI // used for this call is passing it by-value. In that case, // the load would just produce `OperandValue::Ref` instead // of the `OperandValue::Immediate` we need for the call. llval = bx.load(llval, align); if let layout::Abi::Scalar(ref scalar) = arg.layout.abi { if scalar.is_bool() { bx.range_metadata(llval, 0..2); } } // We store bools as i8 so we need to truncate to i1. llval = base::to_immediate(bx, llval, arg.layout); } } llargs.push(llval); } fn codegen_arguments_untupled( &mut self, bx: &mut Bx, operand: &mir::Operand<'tcx>, llargs: &mut Vec<Bx::Value>, args: &[ArgType<'tcx, Ty<'tcx>>] ) { let tuple = self.codegen_operand(bx, operand); // Handle both by-ref and immediate tuples. if let Ref(llval, None, align) = tuple.val { let tuple_ptr = PlaceRef::new_sized(llval, tuple.layout, align); for i in 0..tuple.layout.fields.count() { let field_ptr = tuple_ptr.project_field(bx, i); let field = bx.load_operand(field_ptr); self.codegen_argument(bx, field, llargs, &args[i]); } } else if let Ref(_, Some(_), _) = tuple.val { bug!("closure arguments must be sized") } else { // If the tuple is immediate, the elements are as well. for i in 0..tuple.layout.fields.count() { let op = tuple.extract_field(bx, i); self.codegen_argument(bx, op, llargs, &args[i]); } } } fn get_personality_slot( &mut self, bx: &mut Bx ) -> PlaceRef<'tcx, Bx::Value> { let cx = bx.cx(); if let Some(slot) = self.personality_slot { slot } else { let layout = cx.layout_of(cx.tcx().intern_tup(&[ cx.tcx().mk_mut_ptr(cx.tcx().types.u8), cx.tcx().types.i32 ])); let slot = PlaceRef::alloca(bx, layout, "personalityslot"); self.personality_slot = Some(slot); slot } } /// Returns the landing-pad wrapper around the given basic block. /// /// No-op in MSVC SEH scheme. fn landing_pad_to( &mut self, target_bb: mir::BasicBlock ) -> Bx::BasicBlock { if let Some(block) = self.landing_pads[target_bb] { return block; } let block = self.blocks[target_bb]; let landing_pad = self.landing_pad_uncached(block); self.landing_pads[target_bb] = Some(landing_pad); landing_pad } fn landing_pad_uncached( &mut self, target_bb: Bx::BasicBlock ) -> Bx::BasicBlock { if base::wants_msvc_seh(self.cx.sess()) { span_bug!(self.mir.span, "landing pad was not inserted?") } let mut bx = self.new_block("cleanup"); let llpersonality = self.cx.eh_personality(); let llretty = self.landing_pad_type(); let lp = bx.landing_pad(llretty, llpersonality, 1); bx.set_cleanup(lp); let slot = self.get_personality_slot(&mut bx); slot.storage_live(&mut bx); Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&mut bx, slot); bx.br(target_bb); bx.llbb() } fn landing_pad_type(&self) -> Bx::Type { let cx = self.cx; cx.type_struct(&[cx.type_i8p(), cx.type_i32()], false) } fn unreachable_block( &mut self ) -> Bx::BasicBlock { self.unreachable_block.unwrap_or_else(|| { let mut bx = self.new_block("unreachable"); bx.unreachable(); self.unreachable_block = Some(bx.llbb()); bx.llbb() }) } pub fn new_block(&self, name: &str) -> Bx { Bx::new_block(self.cx, self.llfn, name) } pub fn build_block( &self, bb: mir::BasicBlock ) -> Bx { let mut bx = Bx::with_cx(self.cx); bx.position_at_end(self.blocks[bb]); bx } fn make_return_dest( &mut self, bx: &mut Bx, dest: &mir::Place<'tcx>, fn_ret: &ArgType<'tcx, Ty<'tcx>>, llargs: &mut Vec<Bx::Value>, is_intrinsic: bool ) -> ReturnDest<'tcx, Bx::Value> { // If the return is ignored, we can just return a do-nothing ReturnDest if fn_ret.is_ignore() { return ReturnDest::Nothing; } let dest = if let mir::Place::Base(mir::PlaceBase::Local(index)) = *dest { match self.locals[index] { LocalRef::Place(dest) => dest, LocalRef::UnsizedPlace(_) => bug!("return type must be sized"), LocalRef::Operand(None) => { // Handle temporary places, specifically Operand ones, as // they don't have allocas return if fn_ret.is_indirect() { // Odd, but possible, case, we have an operand temporary, // but the calling convention has an indirect return. let tmp = PlaceRef::alloca(bx, fn_ret.layout, "tmp_ret"); tmp.storage_live(bx); llargs.push(tmp.llval); ReturnDest::IndirectOperand(tmp, index) } else if is_intrinsic { // Currently, intrinsics always need a location to store // the result. so we create a temporary alloca for the // result let tmp = PlaceRef::alloca(bx, fn_ret.layout, "tmp_ret"); tmp.storage_live(bx); ReturnDest::IndirectOperand(tmp, index) } else { ReturnDest::DirectOperand(index) }; } LocalRef::Operand(Some(_)) => { bug!("place local already assigned to"); } } } else { self.codegen_place(bx, dest) }; if fn_ret.is_indirect() { if dest.align < dest.layout.align.abi { // Currently, MIR code generation does not create calls // that store directly to fields of packed structs (in // fact, the calls it creates write only to temps), // // If someone changes that, please update this code path // to create a temporary. span_bug!(self.mir.span, "can't directly store to unaligned value"); } llargs.push(dest.llval); ReturnDest::Nothing } else { ReturnDest::Store(dest) } } fn codegen_transmute( &mut self, bx: &mut Bx, src: &mir::Operand<'tcx>, dst: &mir::Place<'tcx> ) { if let mir::Place::Base(mir::PlaceBase::Local(index)) = *dst { match self.locals[index] { LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place), LocalRef::UnsizedPlace(_) => bug!("transmute must not involve unsized locals"), LocalRef::Operand(None) => { let dst_layout = bx.layout_of(self.monomorphized_place_ty(dst)); assert!(!dst_layout.ty.has_erasable_regions()); let place = PlaceRef::alloca(bx, dst_layout, "transmute_temp"); place.storage_live(bx); self.codegen_transmute_into(bx, src, place); let op = bx.load_operand(place); place.storage_dead(bx); self.locals[index] = LocalRef::Operand(Some(op)); } LocalRef::Operand(Some(op)) => { assert!(op.layout.is_zst(), "assigning to initialized SSAtemp"); } } } else { let dst = self.codegen_place(bx, dst); self.codegen_transmute_into(bx, src, dst); } } fn codegen_transmute_into( &mut self, bx: &mut Bx, src: &mir::Operand<'tcx>, dst: PlaceRef<'tcx, Bx::Value> ) { let src = self.codegen_operand(bx, src); let llty = bx.backend_type(src.layout); let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty)); let align = src.layout.align.abi.min(dst.align); src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align)); } // Stores the return value of a function call into it's final location. fn store_return( &mut self, bx: &mut Bx, dest: ReturnDest<'tcx, Bx::Value>, ret_ty: &ArgType<'tcx, Ty<'tcx>>, llval: Bx::Value ) { use self::ReturnDest::*; match dest { Nothing => (), Store(dst) => bx.store_arg_ty(&ret_ty, llval, dst), IndirectOperand(tmp, index) => { let op = bx.load_operand(tmp); tmp.storage_dead(bx); self.locals[index] = LocalRef::Operand(Some(op)); } DirectOperand(index) => { // If there is a cast, we have to store and reload. let op = if let PassMode::Cast(_) = ret_ty.mode { let tmp = PlaceRef::alloca(bx, ret_ty.layout, "tmp_ret"); tmp.storage_live(bx); bx.store_arg_ty(&ret_ty, llval, tmp); let op = bx.load_operand(tmp); tmp.storage_dead(bx); op } else { OperandRef::from_immediate_or_packed_pair(bx, llval, ret_ty.layout) }; self.locals[index] = LocalRef::Operand(Some(op)); } } } } enum ReturnDest<'tcx, V> { // Do nothing, the return value is indirect or ignored Nothing, // Store the return value to the pointer Store(PlaceRef<'tcx, V>), // Stores an indirect return value to an operand local place IndirectOperand(PlaceRef<'tcx, V>, mir::Local), // Stores a direct return value to an operand local place DirectOperand(mir::Local) }
39.229402
97
0.486287
62163e156edee0f12e9241f7b9cf0773fae99e27
1,010
// Take a look at the license at the top of the repository in the LICENSE file. use crate::AxisFlags; use glib::translate::*; use std::fmt; glib::wrapper! { #[doc(alias = "GdkTimeCoord")] pub struct TimeCoord(BoxedInline<ffi::GdkTimeCoord>); } impl TimeCoord { pub fn new(time: u32, axes: [f64; 12], flags: AxisFlags) -> Self { assert_initialized_main_thread!(); Self(ffi::GdkTimeCoord { time, axes, flags: flags.into_glib(), }) } pub fn time(&self) -> u32 { self.0.time } pub fn axes(&self) -> &[f64; 12] { &self.0.axes } pub fn flags(&self) -> AxisFlags { unsafe { from_glib(self.0.flags) } } } impl fmt::Debug for TimeCoord { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("TimeCoord") .field("time", &self.time()) .field("axes", &self.axes()) .field("flags", &self.flags()) .finish() } }
22.954545
79
0.541584
f813c8fd9b7c3bfebf960af0dbdf0d31a92c7bfa
1,835
use solana_sdk::{ instruction::InstructionError, process_instruction::InvokeContext, pubkey::Pubkey, }; pub fn process_instruction( _program_id: &Pubkey, _data: &[u8], _invoke_context: &mut dyn InvokeContext, ) -> Result<(), InstructionError> { // Should be already checked by now. Ok(()) } #[cfg(test)] pub mod test { use rand::{thread_rng, Rng}; use solana_sdk::{ hash::Hash, secp256k1_instruction::{ new_secp256k1_instruction, SecpSignatureOffsets, SIGNATURE_OFFSETS_SERIALIZED_SIZE, }, signature::{Keypair, Signer}, transaction::Transaction, }; #[test] fn test_secp256k1() { solana_logger::setup(); let offsets = SecpSignatureOffsets::default(); assert_eq!( bincode::serialized_size(&offsets).unwrap() as usize, SIGNATURE_OFFSETS_SERIALIZED_SIZE ); let secp_privkey = libsecp256k1::SecretKey::random(&mut thread_rng()); let message_arr = b"hello"; let mut secp_instruction = new_secp256k1_instruction(&secp_privkey, message_arr); let mint_keypair = Keypair::new(); let tx = Transaction::new_signed_with_payer( &[secp_instruction.clone()], Some(&mint_keypair.pubkey()), &[&mint_keypair], Hash::default(), ); assert!(tx.verify_precompiles().is_ok()); let index = thread_rng().gen_range(0, secp_instruction.data.len()); secp_instruction.data[index] = secp_instruction.data[index].wrapping_add(12); let tx = Transaction::new_signed_with_payer( &[secp_instruction], Some(&mint_keypair.pubkey()), &[&mint_keypair], Hash::default(), ); assert!(tx.verify_precompiles().is_err()); } }
30.583333
95
0.613624
ac0bcd32e2b6a43a25e295ddfe12f9d126d32047
1,772
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::config::SafetyRulesConfig; use serde::{Deserialize, Serialize}; use std::path::PathBuf; #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] #[serde(default, deny_unknown_fields)] pub struct ConsensusConfig { pub contiguous_rounds: u32, pub max_block_size: u64, pub max_pruned_blocks_in_mem: usize, pub round_initial_timeout_ms: u64, pub proposer_type: ConsensusProposerType, pub safety_rules: SafetyRulesConfig, } impl Default for ConsensusConfig { fn default() -> ConsensusConfig { ConsensusConfig { contiguous_rounds: 2, max_block_size: 1000, max_pruned_blocks_in_mem: 10000, round_initial_timeout_ms: 1000, proposer_type: ConsensusProposerType::LeaderReputation(LeaderReputationConfig { active_weights: 99, inactive_weights: 1, }), safety_rules: SafetyRulesConfig::default(), } } } impl ConsensusConfig { pub fn set_data_dir(&mut self, data_dir: PathBuf) { self.safety_rules.set_data_dir(data_dir); } } #[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)] #[serde(rename_all = "snake_case", tag = "type")] pub enum ConsensusProposerType { // Choose the smallest PeerId as the proposer FixedProposer, // Round robin rotation of proposers RotatingProposer, // Committed history based proposer election LeaderReputation(LeaderReputationConfig), } #[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)] #[serde(deny_unknown_fields)] pub struct LeaderReputationConfig { pub active_weights: u64, pub inactive_weights: u64, }
30.551724
91
0.698081
14ed525d478cf2d9316bb278e9a295f2c0753902
7,994
extern crate thread_id; // Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under both the MIT license found in the // LICENSE-MIT file in the root directory of this source tree and the Apache // License, Version 2.0 found in the LICENSE-APACHE file in the root directory // of this source tree. use akd::ecvrf::VRFKeyStorage; use akd::storage::types::{AkdLabel, AkdValue}; use akd::Directory; use log::{info, Level, Metadata, Record}; use once_cell::sync::OnceCell; use rand::distributions::Alphanumeric; use rand::seq::IteratorRandom; use rand::{thread_rng, Rng}; use std::fs::File; use std::io; use std::io::Write; use std::path::Path; use std::sync::Mutex; use tokio::time::{Duration, Instant}; use winter_crypto::hashers::Blake3_256; use winter_math::fields::f128::BaseElement; type Blake3 = Blake3_256<BaseElement>; static EPOCH: OnceCell<Instant> = OnceCell::new(); static LOG: OnceCell<u64> = OnceCell::new(); // ================== Logging ================== // pub(crate) fn log_init(level: Level) { EPOCH.get_or_init(Instant::now); LOG.get_or_init(|| { if let Ok(logger) = FileLogger::new(String::from("integration_test.log")) { let loggers: Vec<Box<dyn log::Log>> = vec![Box::new(logger)]; let mlogger = multi_log::MultiLogger::new(loggers); log::set_max_level(level.to_level_filter()); if let Err(error) = log::set_boxed_logger(Box::new(mlogger)) { panic!("Error initializing multi-logger {}", error); } } else { panic!("Error creating file logger!"); } 0 }); } pub(crate) fn format_log_record(io: &mut (dyn Write + Send), record: &Record) { let target = { if let Some(target_str) = record.target().split(':').last() { if let Some(line) = record.line() { format!(" ({}:{})", target_str, line) } else { format!(" ({})", target_str) } } else { "".to_string() } }; let toc = if let Some(epoch) = EPOCH.get() { Instant::now() - *epoch } else { Duration::from_millis(0) }; let seconds = toc.as_secs(); let hours = seconds / 3600; let minutes = (seconds / 60) % 60; let seconds = seconds % 60; let miliseconds = toc.subsec_millis(); let msg = format!( "[{:02}:{:02}:{:02}.{:03}] ({:x}) {:6} {}{}", hours, minutes, seconds, miliseconds, thread_id::get(), record.level(), record.args(), target ); let _ = writeln!(io, "{}", msg); } pub(crate) struct FileLogger { sink: Mutex<File>, } impl FileLogger { pub(crate) fn new<T: AsRef<Path>>(path: T) -> io::Result<Self> { let file = File::create(path)?; Ok(Self { sink: Mutex::new(file), }) } } impl log::Log for FileLogger { fn enabled(&self, _metadata: &Metadata) -> bool { // use the global log-level true } fn log(&self, record: &Record) { if !self.enabled(record.metadata()) { return; } let mut sink = &*self.sink.lock().unwrap(); format_log_record(&mut sink, record); } fn flush(&self) { let _ = std::io::stdout().flush(); } } // ================== Test Helpers ================== // pub(crate) async fn test_lookups<S: akd::storage::Storage + Sync + Send, V: VRFKeyStorage>( mysql_db: &S, vrf: &V, num_users: u64, num_epochs: u64, num_lookups: usize, ) { // generate the test data let mut rng = thread_rng(); let mut users: Vec<String> = vec![]; for _ in 0..num_users { users.push( thread_rng() .sample_iter(&Alphanumeric) .take(30) .map(char::from) .collect(), ); } // create & test the directory let maybe_dir = Directory::<_, _>::new::<Blake3>(mysql_db, vrf, false).await; match maybe_dir { Err(akd_error) => panic!("Error initializing directory: {:?}", akd_error), Ok(dir) => { info!("AKD Directory started. Beginning tests"); // Publish `num_epochs` epochs of user material for i in 1..=num_epochs { let mut data = Vec::new(); for value in users.iter() { data.push(( AkdLabel::from_utf8_str(value), AkdValue(format!("{}", i).as_bytes().to_vec()), )); } if let Err(error) = dir.publish::<Blake3>(data).await { panic!("Error publishing batch {:?}", error); } else { info!("Published epoch {}", i); } } // Perform `num_lookup` random lookup proofs on the published users let azks = dir.retrieve_current_azks().await.unwrap(); let root_hash = dir.get_root_hash::<Blake3>(&azks).await.unwrap(); // Pick a set of users to lookup let mut labels = Vec::new(); for user in users.iter().choose_multiple(&mut rng, num_lookups) { let label = AkdLabel::from_utf8_str(user); labels.push(label); } println!("Metrics after publish(es)."); reset_mysql_db::<S>(mysql_db).await; let start = Instant::now(); // Lookup selected users one by one for label in labels.clone() { match dir.lookup::<Blake3>(label.clone()).await { Err(error) => panic!("Error looking up user information {:?}", error), Ok(proof) => { let vrf_pk = dir.get_public_key().await.unwrap(); if let Err(error) = akd::client::lookup_verify::<Blake3>(&vrf_pk, root_hash, label, proof) { panic!("Lookup proof failed to verify {:?}", error); } } } } println!( "Individual {} lookups took {}ms.", num_lookups, start.elapsed().as_millis() ); println!("Metrics after individual lookups:"); reset_mysql_db::<S>(mysql_db).await; let start = Instant::now(); // Bulk lookup selected users match dir.batch_lookup::<Blake3>(&labels).await { Err(error) => panic!("Error batch looking up user information {:?}", error), Ok(proofs) => { assert_eq!(labels.len(), proofs.len()); let vrf_pk = dir.get_public_key().await.unwrap(); for i in 0..proofs.len() { let label = labels[i].clone(); let proof = proofs[i].clone(); if let Err(error) = akd::client::lookup_verify::<Blake3>(&vrf_pk, root_hash, label, proof) { panic!("Batch lookup failed to verify for index {} {:?}", i, error); } } } } println!( "Bulk {} lookups took {}ms.", num_lookups, start.elapsed().as_millis() ); println!("Metrics after lookup proofs: "); reset_mysql_db::<S>(mysql_db).await; } } } // Reset MySQL database by logging metrics which resets the metrics, and flushing cache. // These allow us to accurately assess the additional efficiency of // bulk lookup proofs. async fn reset_mysql_db<S: akd::storage::Storage + Sync + Send>(mysql_db: &S) { mysql_db.log_metrics(Level::Trace).await; mysql_db.flush_cache().await; }
32.364372
98
0.511759
6230b97d4281a6dc98697ec3eecc468b88014d9e
4,183
#[cfg(feature = "tester")] mod common; #[cfg(feature = "tester")] use common::{client_addr, Client, Server, ServerEvent}; use laminar::{DeliveryGuarantee, OrderingGuarantee, Packet}; use log::{debug, error, info}; use std::net::SocketAddr; use std::{thread, time::Duration}; #[test] #[cfg(feature = "tester")] fn send_receive_unreliable_packets() { let client_addr = client_addr(); let listen_addr: SocketAddr = "127.0.0.1:12346".parse().unwrap(); let server = Server::new(listen_addr); let client = Client::new(Duration::from_millis(1), 5000); let assert_function = move |packet: Packet| { assert_eq!(packet.order_guarantee(), OrderingGuarantee::None); assert_eq!(packet.delivery_guarantee(), DeliveryGuarantee::Unreliable); assert_eq!(packet.payload(), payload().as_slice()); }; let packet_factory = move || -> Packet { Packet::unreliable(listen_addr, payload()) }; let server_handle = server.start_receiving(assert_function); client .run_instance(packet_factory, client_addr) .wait_until_finished(); // give the server time to process all packets. thread::sleep(Duration::from_millis(200)); server_handle.shutdown(); for event in server_handle.iter_events().collect::<Vec<ServerEvent>>() { match event { ServerEvent::Throughput(throughput) => { debug!("Throughput: {}", throughput); } ServerEvent::AverageThroughput(avg_throughput) => { debug!("Avg. Throughput: {}", avg_throughput); } ServerEvent::TotalSent(total) => { debug!("Total Packets Received {}", total); } ServerEvent::SocketEvent(event) => { info!("Socket Event: {:?}", event); } } } server_handle.wait_until_finished(); } #[test] #[cfg(feature = "tester")] fn send_receive_unreliable_packets_muliple_clients() { let listen_addr: SocketAddr = "127.0.0.1:12345".parse().unwrap(); let server = Server::new(listen_addr); let client = Client::new(Duration::from_millis(16), 500); let assert_function = move |packet: Packet| { assert_eq!(packet.order_guarantee(), OrderingGuarantee::None); assert_eq!(packet.delivery_guarantee(), DeliveryGuarantee::Unreliable); assert_eq!(packet.payload(), payload().as_slice()); }; let packet_factory = move || -> Packet { Packet::unreliable(listen_addr, payload()) }; let server_handle = server.start_receiving(assert_function); let received = server_handle.event_receiver(); let handle = thread::spawn(move || loop { match received.recv() { Ok(event) => { match event { ServerEvent::Throughput(throughput) => { info!("Throughput: {}", throughput); } ServerEvent::AverageThroughput(avg_throughput) => { info!("Avg. Throughput: {}", avg_throughput); } ServerEvent::TotalSent(total) => { info!("Total Received: {}", total); } ServerEvent::SocketEvent(event) => { info!("Socket Event: {:?}", event); } }; } Err(_) => { error!("Stopped receiving events; closing event handler."); return; } } }); let mut clients = Vec::new(); for _ in 0..10 { clients.push(client.run_instance(packet_factory, client_addr())); info!("Client started."); } for client in clients { client.wait_until_finished(); info!("Client finished."); } info!("Waiting 2 seconds"); // give the server time to process all packets. thread::sleep(Duration::from_millis(2000)); info!("Shutting down server!"); server_handle.shutdown(); server_handle.wait_until_finished(); info!("Server is stopped"); handle.join().unwrap(); } pub fn payload() -> Vec<u8> { vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9] }
32.426357
90
0.577576
f73ffe9a63c0efe62508a65859e7567318c0eeb8
1,226
use itertools::Itertools; use std::collections::BTreeMap; static INPUT: &str = include_str!("../input.txt"); fn main() { let id_counts = INPUT.lines().map(|id| { let mut counts = BTreeMap::new(); for c in id.chars() { *counts.entry(c).or_insert(0) += 1; } counts }); let mut has_2 = 0; let mut has_3 = 0; for counts in id_counts { if counts.values().any(|&c| c == 2) { has_2 += 1 } if counts.values().any(|&c| c == 3) { has_3 += 1 } } println!("Checksum: {} * {} = {}", has_2, has_3, has_2 * has_3); let ids = INPUT.lines(); // Eww, O(N^2) let mut z = ids.clone().cartesian_product(ids).filter_map(|(a, b)| { let mut same = Vec::new(); let mut diff = 0; for (a, b) in a.chars().zip(b.chars()) { if a == b { same.push(a); } else { diff += 1; } if diff > 1 { return None } } match diff { 0 => None, 1 => Some(same.into_iter().collect::<String>()), _ => None, } }); if let Some(shared) = z.next() { println!("Shared: {}", shared); } }
23.576923
72
0.451876