hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
ed0a4c62e732477c6a5a90a71cf85d9634a63cec
8,321
// Copyright 2017 The Australian National University // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use ast::ir::*; use ast::ptr::*; use ast::types::*; use compiler::backend::get_callee_saved_offset; use utils::ByteOffset; use std; use std::fmt; use std::collections::HashMap; use vm::VM; /// Frame serves two purposes: /// * it manages stack allocation that are known statically (such as callee saved, /// spilled registers) /// * it also stores exception table for a given function, used for exception handling at runtime /// Mu frame layout is compatible with C ABI /// on x64 /// | previous frame ... /// |--------------- /// | return address /// | old RBP <- RBP /// | callee saved /// | spilled /// |--------------- /// | alloca area (not implemented) #[derive(Clone)] pub struct Frame { /// function version for this frame func_ver_id: MuID, /// current offset to frame base pointer cur_offset: isize, /// arguments passed to this function by registers (used for validating register allocation) pub argument_by_reg: HashMap<MuID, P<Value>>, /// arguments passed to this function by stack (used for validating register allocation) pub argument_by_stack: HashMap<MuID, P<Value>>, /// allocated frame location for Mu Values pub allocated: HashMap<MuID, FrameSlot>, /// mapping from callee saved id (i.e. the position in the list of callee saved registers) /// and offset from the frame pointer pub callee_saved: HashMap<isize, ByteOffset> } rodal_struct!(Frame { func_ver_id, cur_offset, argument_by_reg, argument_by_stack, allocated, callee_saved }); impl fmt::Display for Frame { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { writeln!(f, "\nFrame for FuncVer {} {{", self.func_ver_id).unwrap(); writeln!(f, " allocated slots:").unwrap(); for slot in self.allocated.values() { writeln!(f, " {}", slot).unwrap(); } writeln!(f, " exception callsites:").unwrap(); writeln!(f, " cur offset: {}", self.cur_offset).unwrap(); writeln!(f, "}}") } } impl Frame { /// creates a new Frame pub fn new(func_ver_id: MuID) -> Frame { Frame { func_ver_id: func_ver_id, cur_offset: 0, argument_by_reg: HashMap::new(), argument_by_stack: HashMap::new(), callee_saved: HashMap::new(), allocated: HashMap::new() } } /// returns current size, /// which is always a multiple of 16 bytes for x64/aarch64 (alignment requirement) #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] pub fn cur_size(&self) -> usize { // frame size is a multiple of 16 bytes let size = self.cur_offset.abs() as usize; // align size to a multiple of 16 bytes let size = (size + 16 - 1) & !(16 - 1); debug_assert!(size % 16 == 0); size } /// adds a record of a Mu value argument passed in a certain register pub fn add_argument_by_reg(&mut self, temp: MuID, reg: P<Value>) { self.argument_by_reg.insert(temp, reg); } /// adds a record of a Mu value argumetn passed on stack pub fn add_argument_by_stack(&mut self, temp: MuID, stack_slot: P<Value>) { self.argument_by_stack.insert(temp, stack_slot); } /// allocates next stack slot for a callee saved register, and returns /// a memory operand representing the stack slot pub fn alloc_slot_for_callee_saved_reg(&mut self, reg: P<Value>, vm: &VM) -> P<Value> { let (mem, off) = { let slot = self.alloc_slot(&reg, vm); (slot.make_memory_op(reg.ty.clone(), vm), slot.offset) }; let o = get_callee_saved_offset(reg.id()); self.callee_saved.insert(o, off); mem } /// removes the record for a callee saved register /// We allocate stack slots for all the callee saved regsiter, and later /// remove slots for those registers that are not actually used pub fn remove_record_for_callee_saved_reg(&mut self, reg: MuID) { self.allocated.remove(&reg); let id = get_callee_saved_offset(reg); self.callee_saved.remove(&id); } /// allocates next stack slot for a spilled register, and returns /// a memory operand representing the stack slot pub fn alloc_slot_for_spilling(&mut self, reg: P<Value>, vm: &VM) -> P<Value> { let slot = self.alloc_slot(&reg, vm); slot.make_memory_op(reg.ty.clone(), vm) } #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] fn alloc_slot(&mut self, val: &P<Value>, vm: &VM) -> &FrameSlot { // base pointer is 16 bytes aligned, we are offsetting from base pointer // every value should be properly aligned let backendty = vm.get_backend_type_info(val.ty.id()); // asserting that the alignment is no larger than 16 bytes, otherwise // we need to adjust offset in a different way if backendty.alignment > 16 { if cfg!(target_arch = "aarch64") || cfg!(target_arch = "x86_64") { panic!("A type cannot have alignment greater than 16 on aarch64") } else { unimplemented!() } } self.cur_offset -= backendty.size as isize; { // if alignment doesnt satisfy, make adjustment let abs_offset = self.cur_offset.abs() as usize; if abs_offset % backendty.alignment != 0 { use utils::math; let abs_offset = math::align_up(abs_offset, backendty.alignment); self.cur_offset = -(abs_offset as isize); } } let id = val.id(); let ret = FrameSlot { offset: self.cur_offset, value: val.clone() }; self.allocated.insert(id, ret); self.allocated.get(&id).unwrap() } } /// FrameSlot presents a Value stored in a certain frame location #[derive(Clone)] pub struct FrameSlot { /// location offset from current base pointer pub offset: isize, /// Mu value that resides in this location pub value: P<Value> } rodal_struct!(FrameSlot { offset, value }); impl fmt::Display for FrameSlot { #[cfg(target_arch = "x86_64")] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}(RBP): {}", self.offset, self.value) } #[cfg(target_arch = "aarch64")] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "[FP, #{}]: {}", self.offset, self.value) } } impl FrameSlot { /// generates a memory operand for this frame slot #[cfg(target_arch = "x86_64")] pub fn make_memory_op(&self, ty: P<MuType>, vm: &VM) -> P<Value> { use compiler::backend::x86_64; P(Value { hdr: MuEntityHeader::unnamed(vm.next_id()), ty: ty.clone(), v: Value_::Memory(MemoryLocation::Address { base: x86_64::RBP.clone(), offset: Some(Value::make_int32_const(vm.next_id(), self.offset as u64)), index: None, scale: None }) }) } /// generates a memory operand for this frame slot #[cfg(target_arch = "aarch64")] pub fn make_memory_op(&self, ty: P<MuType>, vm: &VM) -> P<Value> { use compiler::backend::aarch64; P(Value { hdr: MuEntityHeader::unnamed(vm.next_id()), ty: ty.clone(), v: Value_::Memory(MemoryLocation::VirtualAddress { base: aarch64::FP.clone(), offset: Some(Value::make_int32_const(vm.next_id(), self.offset as u64)), scale: 1, signed: true }) }) } }
34.102459
97
0.606538
d7714742c1a57861bd183a3a404202dbe5eea7e9
7,543
use std::cmp::min; use std::ops::{Index, IndexMut}; use std::str::Chars; /// A line in a buffer. pub trait Line<'a> { /// The underlying iterator. type Iter: Iterator<Item = char> + 'a; /// Iterator over characters. fn chars_iter(&'a self) -> Self::Iter; } impl<'a, T: AsRef<str>> Line<'a> for T { type Iter = Chars<'a>; fn chars_iter(&self) -> Chars { self.as_ref().chars() } } /// A buffer structure pub trait TextBuffer<'a> { /// The line type of the buffer. type Line: 'a + Line<'a>; /// The line iterator. type LineIter: Iterator<Item = &'a Self::Line>; /// Create a new empty split buffer fn new() -> Self; /// Convert a string to a split buffer fn from_str(s: &str) -> Self; /// Get the nth line in the buffer by option reference fn get_line(&self, n: usize) -> Option<&Self::Line>; /// Get the nth line in the buffer by optional mutable reference fn get_line_mut(&mut self, n: usize) -> Option<&mut Self::Line>; /// Remove the nth line and return it. Panics on out of bound. fn remove_line(&mut self, n: usize) -> Self::Line; /// Insert line at n. Panics on out of bound. fn insert_line(&mut self, n: usize, line: Self::Line); /// Convert a vector of lines to a split buffer fn from_lines(vec: &[Self::Line]) -> SplitBuffer; /// Give a hint on where the operations are most frequent (i.e. where the cursor is). X value. fn focus_hint_x(&mut self, x: usize); /// Give a hint on where the operations are most frequent (i.e. where the cursor is). Y value. fn focus_hint_y(&mut self, y: usize); /// Get the number of lines in the buffer. fn len(&self) -> usize; /// Get an iterator over the lines in the buffer. fn lines(&'a self) -> Self::LineIter; /// Get an iterator over the line starting from a certain line fn lines_from(&'a self, from: usize) -> Self::LineIter; /// Get the leading whitespaces of the nth line. Used for autoindenting. fn get_indent(&self, n: usize) -> &str; } /// The buffer data structure, that Sodium is using. /// /// This structure consists of two "subbuffers", which are just vectors over lines (defined by /// Strings). The split is called a center. /// /// The nearer a given operation is to the center, the better it performs. /// /// The second buffer is in reverse order to get the particular efficiency we want. pub struct SplitBuffer { before: Vec<String>, after: Vec<String>, #[cfg(debug)] _hinted_since_edit: bool, } impl SplitBuffer { fn up(&mut self) { self.after .push(self.before.pop().expect("Popped last element")); } fn down(&mut self) { self.before .push(self.after.pop().expect("Popped last element")); } fn y(&self) -> usize { self.before.len() } } // TODO remove impl SplitBuffer { /// Convert the buffer to a string. pub fn to_string(&self) -> String { self.lines().map(|x| x.to_owned() + "\n").collect() } } impl<'a> TextBuffer<'a> for SplitBuffer { type Line = String; type LineIter = SplitBufIter<'a>; fn new() -> Self { SplitBuffer { before: vec![String::new()], after: Vec::new(), } } fn from_str(s: &str) -> Self { SplitBuffer { before: s.lines().map(ToOwned::to_owned).collect(), after: Vec::new(), } } fn get_line(&self, n: usize) -> Option<&String> { if n < self.before.len() { Some(&self.before[n]) } else if n < self.len() { let n = self.len() - 1 - n; Some(&self.after[n]) } else { None } } fn get_line_mut(&mut self, n: usize) -> Option<&mut String> { if n < self.before.len() { Some(&mut self.before[n]) } else if n < self.len() { let n = self.len() - 1 - n; Some(&mut self.after[n]) } else { None } } fn remove_line(&mut self, n: usize) -> String { if n < self.before.len() { self.before.remove(n) } else if n < self.len() { let n = n - self.before.len(); let ret = self.after.remove(n); if n == 0 { self.up(); } ret } else { panic!("Out of bound"); } } fn insert_line(&mut self, n: usize, line: String) { if n < self.before.len() { self.before.insert(n, line); } else if n <= self.len() { let n = self.len() - n; self.after.insert(n, line); } else { panic!("Out of bound"); } } fn from_lines(ln: &[String]) -> SplitBuffer { SplitBuffer { before: ln.to_owned(), after: Vec::new(), } } fn focus_hint_y(&mut self, y: usize) { if y < self.y() { for _ in 0..min(self.y() - y, self.before.len()) { self.up(); } } else if y > self.y() { for _ in 0..min(y - self.y(), self.after.len()) { self.down(); } } else if y >= self.len() { panic!("Out of bound"); } } fn focus_hint_x(&mut self, _: usize) {} fn len(&self) -> usize { self.before.len() + self.after.len() } fn lines(&'a self) -> SplitBufIter<'a> { SplitBufIter { buffer: self, line: 0, } } fn lines_from(&'a self, from: usize) -> SplitBufIter<'a> { SplitBufIter { buffer: self, line: from, } } fn get_indent(&self, n: usize) -> &str { if let Some(ln) = self.get_line(n) { let mut len = 0; for c in ln.chars() { match c { '\t' | ' ' => len += 1, _ => break, } } &ln[..len] } else { "" } } } impl Index<usize> for SplitBuffer { type Output = String; fn index<'a>(&'a self, index: usize) -> &'a String { self.get_line(index).expect("Out of bound") } } impl IndexMut<usize> for SplitBuffer { fn index_mut<'a>(&'a mut self, index: usize) -> &'a mut String { #[cfg(debug)] fn debug_check(b: &mut SplitBuffer) { if b._hinted_since_edit { b._hinted_since_edit = false; } else { panic!("No focus hint given since last edit!"); } } #[cfg(not(debug))] fn debug_check(_: &mut SplitBuffer) {} debug_check(&mut *self); self.get_line_mut(index).expect("Out of bound") } } /// A iterator over the lines of a split buffer pub struct SplitBufIter<'a> { buffer: &'a SplitBuffer, line: usize, } impl<'a> Iterator for SplitBufIter<'a> { type Item = &'a String; fn next(&mut self) -> Option<&'a String> { self.nth(1) } fn nth(&mut self, n: usize) -> Option<&'a String> { let res = self.buffer.get_line(self.line); self.line += n; res } fn count(self) -> usize { self.buffer.len() } } impl<'a> DoubleEndedIterator for SplitBufIter<'a> { fn next_back(&mut self) -> Option<&'a String> { if self.line == 0 { None } else { self.line -= 1; self.buffer.get_line(self.line) } } }
25.656463
98
0.517433
fb55ca7c300065c424309301b32f8066032de517
11,717
//! TCP stream for communicating with shadowsocks' proxy server use std::{ io::{self, ErrorKind}, pin::Pin, task::{self, Poll}, }; use bytes::{BufMut, BytesMut}; use cfg_if::cfg_if; use futures::ready; use log::trace; use once_cell::sync::Lazy; use pin_project::pin_project; use tokio::{ io::{AsyncRead, AsyncWrite, ReadBuf}, time, }; #[cfg(feature = "aead-cipher-2022")] use crate::relay::get_aead_2022_padding_size; use crate::{ config::ServerConfig, context::SharedContext, crypto::CipherKind, net::{ConnectOpts, TcpStream as OutboundTcpStream}, relay::{ socks5::Address, tcprelay::crypto_io::{CryptoRead, CryptoStream, CryptoWrite, StreamType}, }, }; enum ProxyClientStreamWriteState { Connect(Address), Connecting(BytesMut), Connected, } enum ProxyClientStreamReadState { #[cfg(feature = "aead-cipher-2022")] CheckRequestNonce, Established, } /// A stream for sending / receiving data stream from remote server via shadowsocks' proxy server #[pin_project] pub struct ProxyClientStream<S> { #[pin] stream: CryptoStream<S>, writer_state: ProxyClientStreamWriteState, reader_state: ProxyClientStreamReadState, context: SharedContext, } static DEFAULT_CONNECT_OPTS: Lazy<ConnectOpts> = Lazy::new(Default::default); impl ProxyClientStream<OutboundTcpStream> { /// Connect to target `addr` via shadowsocks' server configured by `svr_cfg` pub async fn connect<A>( context: SharedContext, svr_cfg: &ServerConfig, addr: A, ) -> io::Result<ProxyClientStream<OutboundTcpStream>> where A: Into<Address>, { ProxyClientStream::connect_with_opts(context, svr_cfg, addr, &DEFAULT_CONNECT_OPTS).await } /// Connect to target `addr` via shadowsocks' server configured by `svr_cfg` pub async fn connect_with_opts<A>( context: SharedContext, svr_cfg: &ServerConfig, addr: A, opts: &ConnectOpts, ) -> io::Result<ProxyClientStream<OutboundTcpStream>> where A: Into<Address>, { ProxyClientStream::connect_with_opts_map(context, svr_cfg, addr, opts, |s| s).await } } impl<S> ProxyClientStream<S> where S: AsyncRead + AsyncWrite + Unpin, { /// Connect to target `addr` via shadowsocks' server configured by `svr_cfg`, maps `TcpStream` to customized stream with `map_fn` pub async fn connect_map<A, F>( context: SharedContext, svr_cfg: &ServerConfig, addr: A, map_fn: F, ) -> io::Result<ProxyClientStream<S>> where A: Into<Address>, F: FnOnce(OutboundTcpStream) -> S, { ProxyClientStream::connect_with_opts_map(context, svr_cfg, addr, &DEFAULT_CONNECT_OPTS, map_fn).await } /// Connect to target `addr` via shadowsocks' server configured by `svr_cfg`, maps `TcpStream` to customized stream with `map_fn` pub async fn connect_with_opts_map<A, F>( context: SharedContext, svr_cfg: &ServerConfig, addr: A, opts: &ConnectOpts, map_fn: F, ) -> io::Result<ProxyClientStream<S>> where A: Into<Address>, F: FnOnce(OutboundTcpStream) -> S, { let stream = match svr_cfg.timeout() { Some(d) => { match time::timeout( d, OutboundTcpStream::connect_server_with_opts(&context, svr_cfg.external_addr(), opts), ) .await { Ok(Ok(s)) => s, Ok(Err(e)) => return Err(e), Err(..) => { return Err(io::Error::new( ErrorKind::TimedOut, format!("connect {} timeout", svr_cfg.addr()), )) } } } None => OutboundTcpStream::connect_server_with_opts(&context, svr_cfg.external_addr(), opts).await?, }; trace!( "connected tcp remote {} (outbound: {}) with {:?}", svr_cfg.addr(), svr_cfg.external_addr(), opts ); Ok(ProxyClientStream::from_stream(context, map_fn(stream), svr_cfg, addr)) } /// Create a `ProxyClientStream` with a connected `stream` to a shadowsocks' server /// /// NOTE: `stream` must be connected to the server with the same configuration as `svr_cfg`, otherwise strange errors would occurs pub fn from_stream<A>(context: SharedContext, stream: S, svr_cfg: &ServerConfig, addr: A) -> ProxyClientStream<S> where A: Into<Address>, { let addr = addr.into(); let stream = CryptoStream::from_stream(&context, stream, StreamType::Client, svr_cfg.method(), svr_cfg.key()); #[cfg(not(feature = "aead-cipher-2022"))] let reader_state = ProxyClientStreamReadState::Established; #[cfg(feature = "aead-cipher-2022")] let reader_state = if svr_cfg.method().is_aead_2022() { // AEAD 2022 has a respond header ProxyClientStreamReadState::CheckRequestNonce } else { ProxyClientStreamReadState::Established }; ProxyClientStream { stream, writer_state: ProxyClientStreamWriteState::Connect(addr), reader_state, context, } } /// Get reference to the underlying stream pub fn get_ref(&self) -> &S { self.stream.get_ref() } /// Get mutable reference to the underlying stream pub fn get_mut(&mut self) -> &mut S { self.stream.get_mut() } /// Consumes the `ProxyClientStream` and return the underlying stream pub fn into_inner(self) -> S { self.stream.into_inner() } } impl<S> AsyncRead for ProxyClientStream<S> where S: AsyncRead + AsyncWrite + Unpin, { #[inline] fn poll_read(self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<io::Result<()>> { #[allow(unused_mut)] let mut this = self.project(); #[allow(clippy::never_loop)] loop { match this.reader_state { ProxyClientStreamReadState::Established => { return this.stream.poll_read_decrypted(cx, this.context, buf); } #[cfg(feature = "aead-cipher-2022")] ProxyClientStreamReadState::CheckRequestNonce => { ready!(this.stream.as_mut().poll_read_decrypted(cx, this.context, buf))?; // REQUEST_NONCE should be in the respond packet (header) of AEAD-2022. // // If received_request_nonce() is None, then: // 1. method.salt_len() == 0, no checking required. // 2. TCP stream read() returns EOF before receiving the header, no checking required. // // poll_read_decrypted will wait until the first non-zero size data chunk. let (data_chunk_count, _) = this.stream.current_data_chunk_remaining(); if data_chunk_count > 0 { // data_chunk_count > 0, so the reader received at least 1 data chunk. let sent_nonce = this.stream.sent_nonce(); let sent_nonce = if sent_nonce.is_empty() { None } else { Some(sent_nonce) }; if sent_nonce != this.stream.received_request_nonce() { return Err(io::Error::new( ErrorKind::Other, "received TCP response header with unmatched salt", )) .into(); } *(this.reader_state) = ProxyClientStreamReadState::Established; } return Ok(()).into(); } } } } } #[inline] fn make_first_packet_buffer(method: CipherKind, addr: &Address, buf: &[u8]) -> BytesMut { // Target Address should be sent with the first packet together, // which would prevent from being detected. let addr_length = addr.serialized_len(); let mut buffer = BytesMut::new(); cfg_if! { if #[cfg(feature = "aead-cipher-2022")] { let padding_size = get_aead_2022_padding_size(buf); let header_length = if method.is_aead_2022() { addr_length + 2 + padding_size + buf.len() } else { addr_length + buf.len() }; } else { let _ = method; let header_length = addr_length + buf.len(); } } buffer.reserve(header_length); // STREAM / AEAD / AEAD2022 protocol, append the Address before payload addr.write_to_buf(&mut buffer); #[cfg(feature = "aead-cipher-2022")] if method.is_aead_2022() { buffer.put_u16(padding_size as u16); if padding_size > 0 { unsafe { buffer.advance_mut(padding_size); } } } buffer.put_slice(buf); buffer } impl<S> AsyncWrite for ProxyClientStream<S> where S: AsyncRead + AsyncWrite + Unpin, { fn poll_write(self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &[u8]) -> Poll<Result<usize, io::Error>> { let this = self.project(); loop { match this.writer_state { ProxyClientStreamWriteState::Connect(ref addr) => { let buffer = make_first_packet_buffer(this.stream.method(), addr, buf); // Save the concatenated buffer before it is written successfully. // APIs require buffer to be kept alive before Poll::Ready // // Proactor APIs like IOCP on Windows, pointers of buffers have to be kept alive // before IO completion. *(this.writer_state) = ProxyClientStreamWriteState::Connecting(buffer); } ProxyClientStreamWriteState::Connecting(ref buffer) => { let n = ready!(this.stream.poll_write_encrypted(cx, buffer))?; // In general, poll_write_encrypted should perform like write_all. debug_assert!(n == buffer.len()); *(this.writer_state) = ProxyClientStreamWriteState::Connected; // NOTE: // poll_write will return Ok(0) if buf.len() == 0 // But for the first call, this function will eventually send the handshake packet (IV/Salt + ADDR) to the remote address. // // https://github.com/shadowsocks/shadowsocks-rust/issues/232 // // For protocols that requires *Server Hello* message, like FTP, clients won't send anything to the server until server sends handshake messages. // This could be achieved by calling poll_write with an empty input buffer. return Ok(buf.len()).into(); } ProxyClientStreamWriteState::Connected => { return this.stream.poll_write_encrypted(cx, buf); } } } } #[inline] fn poll_flush(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Result<(), io::Error>> { self.project().stream.poll_flush(cx) } #[inline] fn poll_shutdown(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Result<(), io::Error>> { self.project().stream.poll_shutdown(cx) } }
34.768546
165
0.569941
e532573ceb9c0a8e285b55347316f4b30f570c2a
400
#[doc = "Reader of register PSELSCL"] pub type R = crate::R<u32, super::PSELSCL>; #[doc = "Writer for register PSELSCL"] pub type W = crate::W<u32, super::PSELSCL>; #[doc = "Register PSELSCL `reset()`'s with value 0xffff_ffff"] impl crate::ResetValue for super::PSELSCL { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0xffff_ffff } } impl R {} impl W {}
26.666667
62
0.635
4a9fa418405bb0be4886d4768e2e3de7bc97f108
5,280
use std::io::BufRead; #[derive(Debug, PartialEq)] enum Token { OpenParen, CloseParen, OpenSquare, CloseSquare, OpenCurly, CloseCurly, OpenAngle, CloseAngle, } #[derive(Debug, PartialEq)] enum LineStatus { Valid, Corrupt { expected: Token, illegal: Token }, Incomplete { expected: Token }, } impl LineStatus { fn score(&self) -> usize { match self { LineStatus::Corrupt { illegal, .. } => match illegal { Token::CloseParen => 3, Token::CloseSquare => 57, Token::CloseCurly => 1197, Token::CloseAngle => 25137, token => panic!("Encountered invalid token: {:?}", token), }, _ => 0, } } } fn lex(s: &str) -> Vec<Token> { let s = s.trim(); s.chars() .map(|c| match c { '(' => Token::OpenParen, ')' => Token::CloseParen, '[' => Token::OpenSquare, ']' => Token::CloseSquare, '{' => Token::OpenCurly, '}' => Token::CloseCurly, '<' => Token::OpenAngle, '>' => Token::CloseAngle, _ => panic!("Invalid character"), }) .collect() } fn parse(tokens: &[Token]) -> LineStatus { let mut counter = 0; let mut expected = vec![]; for token in tokens { match token { Token::OpenParen => { expected.push(Token::CloseParen); counter += 1; } Token::CloseParen => { if let Some(t) = expected.pop() { if !matches!(t, Token::CloseParen) { return LineStatus::Corrupt { expected: t, illegal: Token::CloseParen, }; } } counter -= 1; } Token::OpenSquare => { expected.push(Token::CloseSquare); counter += 1; } Token::CloseSquare => { if let Some(t) = expected.pop() { if !matches!(t, Token::CloseSquare) { return LineStatus::Corrupt { expected: t, illegal: Token::CloseSquare, }; } } counter -= 1; } Token::OpenCurly => { expected.push(Token::CloseCurly); counter += 1; } Token::CloseCurly => { if let Some(t) = expected.pop() { if !matches!(t, Token::CloseCurly) { return LineStatus::Corrupt { expected: t, illegal: Token::CloseCurly, }; } } counter -= 1; } Token::OpenAngle => { expected.push(Token::CloseAngle); counter += 1; } Token::CloseAngle => { if let Some(t) = expected.pop() { if !matches!(t, Token::CloseAngle) { return LineStatus::Corrupt { expected: t, illegal: Token::CloseAngle, }; } } counter -= 1; } } } if counter != 0 { return LineStatus::Incomplete { expected: expected.pop().unwrap(), }; } LineStatus::Valid } fn main() { let input = std::fs::File::open("inputs/10.txt").unwrap(); let lines = std::io::BufReader::new(input).lines(); let file_lines = lines.map(|line| line.unwrap()); let mut corrupt_score = 0; for line in file_lines { let lexed = lex(&line); let line_status = parse(&lexed); corrupt_score += line_status.score(); } dbg!(corrupt_score); } #[cfg(test)] mod tests { use super::*; #[test] fn simple_valid() { let input = "(())"; let lexed = lex(input); let parsed = parse(&lexed); assert_eq!(parsed, LineStatus::Valid) } #[test] fn simple_corrupt() { let input = "(()]"; let lexed = lex(input); let parsed = parse(&lexed); assert_eq!( parsed, LineStatus::Corrupt { expected: Token::CloseParen, illegal: Token::CloseSquare } ) } #[test] fn more_complex_corrupt() { let input = "(()[]{}<><<>>}"; let lexed = lex(input); let parsed = parse(&lexed); assert_eq!( parsed, LineStatus::Corrupt { expected: Token::CloseParen, illegal: Token::CloseCurly } ) } #[test] fn incomplete() { let input = "(()"; let lexed = lex(input); let parsed = parse(&lexed); assert_eq!( parsed, LineStatus::Incomplete { expected: Token::CloseParen, } ) } }
26.138614
74
0.417045
38da3113a5c99c79898b3b1b7c5e34277e637c2f
27
fn main() { print!("\"); }
13.5
26
0.407407
fed0158b82fa5b12176f15ba6fa93c2e40cd7321
3,710
use crate::errors::*; use crate::types::*; use uuid::Uuid; /// Edits the content of a live location in an inline message sent via a bot; for bots only #[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct EditInlineMessageLiveLocation { #[doc(hidden)] #[serde(rename(serialize = "@extra", deserialize = "@extra"))] extra: Option<String>, #[serde(rename(serialize = "@client_id", deserialize = "@client_id"))] client_id: Option<i32>, /// Inline message identifier inline_message_id: String, /// The new message reply markup #[serde(skip_serializing_if = "ReplyMarkup::_is_default")] reply_markup: ReplyMarkup, /// New location content of the message; may be null. Pass null to stop sharing the live location location: Option<Location>, /// The new direction in which the location moves, in degrees; 1-360. Pass 0 if unknown heading: i32, /// The new maximum distance for proximity alerts, in meters (0-100000). Pass 0 if the notification is disabled proximity_alert_radius: i32, #[serde(rename(serialize = "@type"))] td_type: String, } impl RObject for EditInlineMessageLiveLocation { #[doc(hidden)] fn extra(&self) -> Option<&str> { self.extra.as_deref() } #[doc(hidden)] fn client_id(&self) -> Option<i32> { self.client_id } } impl RFunction for EditInlineMessageLiveLocation {} impl EditInlineMessageLiveLocation { pub fn from_json<S: AsRef<str>>(json: S) -> RTDResult<Self> { Ok(serde_json::from_str(json.as_ref())?) } pub fn builder() -> RTDEditInlineMessageLiveLocationBuilder { let mut inner = EditInlineMessageLiveLocation::default(); inner.extra = Some(Uuid::new_v4().to_string()); inner.td_type = "editInlineMessageLiveLocation".to_string(); RTDEditInlineMessageLiveLocationBuilder { inner } } pub fn inline_message_id(&self) -> &String { &self.inline_message_id } pub fn reply_markup(&self) -> &ReplyMarkup { &self.reply_markup } pub fn location(&self) -> &Option<Location> { &self.location } pub fn heading(&self) -> i32 { self.heading } pub fn proximity_alert_radius(&self) -> i32 { self.proximity_alert_radius } } #[doc(hidden)] pub struct RTDEditInlineMessageLiveLocationBuilder { inner: EditInlineMessageLiveLocation, } impl RTDEditInlineMessageLiveLocationBuilder { pub fn build(&self) -> EditInlineMessageLiveLocation { self.inner.clone() } pub fn inline_message_id<T: AsRef<str>>(&mut self, inline_message_id: T) -> &mut Self { self.inner.inline_message_id = inline_message_id.as_ref().to_string(); self } pub fn reply_markup<T: AsRef<ReplyMarkup>>(&mut self, reply_markup: T) -> &mut Self { self.inner.reply_markup = reply_markup.as_ref().clone(); self } pub fn location<T: AsRef<Location>>(&mut self, location: T) -> &mut Self { self.inner.location = Some(location.as_ref().clone()); self } pub fn heading(&mut self, heading: i32) -> &mut Self { self.inner.heading = heading; self } pub fn proximity_alert_radius(&mut self, proximity_alert_radius: i32) -> &mut Self { self.inner.proximity_alert_radius = proximity_alert_radius; self } } impl AsRef<EditInlineMessageLiveLocation> for EditInlineMessageLiveLocation { fn as_ref(&self) -> &EditInlineMessageLiveLocation { self } } impl AsRef<EditInlineMessageLiveLocation> for RTDEditInlineMessageLiveLocationBuilder { fn as_ref(&self) -> &EditInlineMessageLiveLocation { &self.inner } }
29.919355
115
0.666846
d504af5f1e2e6670c41016e449da51f56ad699be
1,765
use num_traits::Signed; use super::{is_triangle_convex, point_in_triangle}; #[inline] pub fn triangulate<T>(points: &[[T; 2]]) -> Vec<usize> where T: Copy + Signed + PartialOrd, { let len = points.len(); let mut tgs = Vec::new(); if len < 3 { tgs } else { let mut avl = Vec::with_capacity(len); for i in 0..len { avl.push(i); } let mut i = 0; let mut al = len; while al > 3 { let i0 = avl[i % al]; let i1 = avl[(i + 1) % al]; let i2 = avl[(i + 2) % al]; let a = &points[i0]; let b = &points[i1]; let c = &points[i2]; let mut ear_found = false; if is_triangle_convex(a, b, c) { ear_found = true; for j in 0..al { let vi = avl[j]; if vi != i0 && vi != i1 && vi != i2 { if point_in_triangle(&points[vi], a, b, c) { ear_found = false; break; } } } } if ear_found { tgs.push(i0); tgs.push(i1); tgs.push(i2); avl.remove((i + 1) % al); al -= 1; i = 0; } else if i > 3 * al { break; } else { i += 1; } } tgs.push(avl[0]); tgs.push(avl[1]); tgs.push(avl[2]); tgs } } #[test] fn test_triangulate() { let points = [[1, -1], [1, 1], [-1, 1], [-1, -1]]; let tgs = triangulate(&points); assert_eq!(tgs, [0, 1, 2, 0, 2, 3]); }
22.922078
68
0.364873
fe52a894682e5a02983d52103d242a869ed50e58
9,333
use std::future::Future; use std::pin::Pin; use std::rc::Rc; use std::task::{Context, Poll}; use super::{Service, ServiceFactory}; /// Service for the `and_then` combinator, chaining a computation onto the end /// of another service which completes successfully. /// /// This is created by the `ServiceExt::and_then` method. pub(crate) struct AndThenService<A, B>(Rc<(A, B)>); impl<A, B> AndThenService<A, B> { /// Create new `AndThen` combinator pub(crate) fn new(a: A, b: B) -> Self where A: Service, B: Service<Request = A::Response, Error = A::Error>, { Self(Rc::new((a, b))) } } impl<A, B> Clone for AndThenService<A, B> { fn clone(&self) -> Self { AndThenService(self.0.clone()) } } impl<A, B> Service for AndThenService<A, B> where A: Service, B: Service<Request = A::Response, Error = A::Error>, { type Request = A::Request; type Response = B::Response; type Error = A::Error; type Future = AndThenServiceResponse<A, B>; fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { let srv = self.0.as_ref(); let not_ready = !srv.0.poll_ready(cx)?.is_ready(); if !srv.1.poll_ready(cx)?.is_ready() || not_ready { Poll::Pending } else { Poll::Ready(Ok(())) } } fn poll_shutdown(&self, cx: &mut Context<'_>, is_error: bool) -> Poll<()> { let srv = self.0.as_ref(); if srv.0.poll_shutdown(cx, is_error).is_ready() && srv.1.poll_shutdown(cx, is_error).is_ready() { Poll::Ready(()) } else { Poll::Pending } } #[inline] fn call(&self, req: A::Request) -> Self::Future { AndThenServiceResponse { state: State::A(self.0.as_ref().0.call(req), Some(self.0.clone())), } } } pin_project_lite::pin_project! { pub(crate) struct AndThenServiceResponse<A, B> where A: Service, B: Service<Request = A::Response, Error = A::Error>, { #[pin] state: State<A, B>, } } #[pin_project::pin_project(project = StateProject)] enum State<A, B> where A: Service, B: Service<Request = A::Response, Error = A::Error>, { A(#[pin] A::Future, Option<Rc<(A, B)>>), B(#[pin] B::Future), Empty, } impl<A, B> Future for AndThenServiceResponse<A, B> where A: Service, B: Service<Request = A::Response, Error = A::Error>, { type Output = Result<B::Response, A::Error>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let mut this = self.as_mut().project(); match this.state.as_mut().project() { StateProject::A(fut, b) => match fut.poll(cx)? { Poll::Ready(res) => { let b = b.take().unwrap(); this.state.set(State::Empty); // drop fut A let fut = b.as_ref().1.call(res); this.state.set(State::B(fut)); self.poll(cx) } Poll::Pending => Poll::Pending, }, StateProject::B(fut) => fut.poll(cx).map(|r| { this.state.set(State::Empty); r }), StateProject::Empty => { panic!("future must not be polled after it returned `Poll::Ready`") } } } } /// `.and_then()` service factory combinator pub(crate) struct AndThenServiceFactory<A, B> where A: ServiceFactory, A::Config: Clone, B: ServiceFactory< Config = A::Config, Request = A::Response, Error = A::Error, InitError = A::InitError, >, { inner: Rc<(A, B)>, } impl<A, B> AndThenServiceFactory<A, B> where A: ServiceFactory, A::Config: Clone, B: ServiceFactory< Config = A::Config, Request = A::Response, Error = A::Error, InitError = A::InitError, >, { /// Create new `AndThenFactory` combinator pub(crate) fn new(a: A, b: B) -> Self { Self { inner: Rc::new((a, b)), } } } impl<A, B> ServiceFactory for AndThenServiceFactory<A, B> where A: ServiceFactory, A::Config: Clone, B: ServiceFactory< Config = A::Config, Request = A::Response, Error = A::Error, InitError = A::InitError, >, { type Request = A::Request; type Response = B::Response; type Error = A::Error; type Config = A::Config; type Service = AndThenService<A::Service, B::Service>; type InitError = A::InitError; type Future = AndThenServiceFactoryResponse<A, B>; fn new_service(&self, cfg: A::Config) -> Self::Future { let inner = &*self.inner; AndThenServiceFactoryResponse::new( inner.0.new_service(cfg.clone()), inner.1.new_service(cfg), ) } } impl<A, B> Clone for AndThenServiceFactory<A, B> where A: ServiceFactory, A::Config: Clone, B: ServiceFactory< Config = A::Config, Request = A::Response, Error = A::Error, InitError = A::InitError, >, { fn clone(&self) -> Self { Self { inner: self.inner.clone(), } } } pin_project_lite::pin_project! { pub(crate) struct AndThenServiceFactoryResponse<A, B> where A: ServiceFactory, B: ServiceFactory<Request = A::Response>, { #[pin] fut_a: A::Future, #[pin] fut_b: B::Future, a: Option<A::Service>, b: Option<B::Service>, } } impl<A, B> AndThenServiceFactoryResponse<A, B> where A: ServiceFactory, B: ServiceFactory<Request = A::Response>, { fn new(fut_a: A::Future, fut_b: B::Future) -> Self { AndThenServiceFactoryResponse { fut_a, fut_b, a: None, b: None, } } } impl<A, B> Future for AndThenServiceFactoryResponse<A, B> where A: ServiceFactory, B: ServiceFactory<Request = A::Response, Error = A::Error, InitError = A::InitError>, { type Output = Result<AndThenService<A::Service, B::Service>, A::InitError>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let this = self.project(); if this.a.is_none() { if let Poll::Ready(service) = this.fut_a.poll(cx)? { *this.a = Some(service); } } if this.b.is_none() { if let Poll::Ready(service) = this.fut_b.poll(cx)? { *this.b = Some(service); } } if this.a.is_some() && this.b.is_some() { Poll::Ready(Ok(AndThenService::new( this.a.take().unwrap(), this.b.take().unwrap(), ))) } else { Poll::Pending } } } #[cfg(test)] mod tests { use std::cell::Cell; use std::rc::Rc; use std::task::{Context, Poll}; use futures_util::future::{lazy, ok, ready, Ready}; use crate::{fn_factory, pipeline, pipeline_factory, Service, ServiceFactory}; struct Srv1(Rc<Cell<usize>>); impl Service for Srv1 { type Request = &'static str; type Response = &'static str; type Error = (); type Future = Ready<Result<Self::Response, ()>>; fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { self.0.set(self.0.get() + 1); Poll::Ready(Ok(())) } fn call(&self, req: &'static str) -> Self::Future { ok(req) } } #[derive(Clone)] struct Srv2(Rc<Cell<usize>>); impl Service for Srv2 { type Request = &'static str; type Response = (&'static str, &'static str); type Error = (); type Future = Ready<Result<Self::Response, ()>>; fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { self.0.set(self.0.get() + 1); Poll::Ready(Ok(())) } fn call(&self, req: &'static str) -> Self::Future { ok((req, "srv2")) } } #[ntex_rt::test] async fn test_poll_ready() { let cnt = Rc::new(Cell::new(0)); let srv = pipeline(Srv1(cnt.clone())) .and_then(Srv2(cnt.clone())) .clone(); let res = lazy(|cx| srv.poll_ready(cx)).await; assert_eq!(res, Poll::Ready(Ok(()))); assert_eq!(cnt.get(), 2); let res = lazy(|cx| srv.poll_shutdown(cx, false)).await; assert_eq!(res, Poll::Ready(())); } #[ntex_rt::test] async fn test_call() { let cnt = Rc::new(Cell::new(0)); let srv = pipeline(Srv1(cnt.clone())).and_then(Srv2(cnt)); let res = srv.call("srv1").await; assert!(res.is_ok()); assert_eq!(res.unwrap(), ("srv1", "srv2")); } #[ntex_rt::test] async fn test_factory() { let cnt = Rc::new(Cell::new(0)); let cnt2 = cnt.clone(); let new_srv = pipeline_factory(fn_factory(move || ready(Ok::<_, ()>(Srv1(cnt2.clone()))))) .and_then(move || ready(Ok(Srv2(cnt.clone())))) .clone(); let srv = new_srv.new_service(()).await.unwrap(); let res = srv.call("srv1").await; assert!(res.is_ok()); assert_eq!(res.unwrap(), ("srv1", "srv2")); } }
26.439093
89
0.534126
5b83f5c9bdbc2d083faf6f941c080cac024e7057
24,859
/// A span represents a single operation within a trace. Spans can be /// nested to form a trace tree. Spans may also be linked to other spans /// from the same or different trace. And form graphs. Often, a trace /// contains a root span that describes the end-to-end latency, and one /// or more subspans for its sub-operations. A trace can also contain /// multiple root spans, or none at all. Spans do not need to be /// contiguous - there may be gaps or overlaps between spans in a trace. /// /// The next id is 17. /// TODO(bdrutu): Add an example. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Span { /// A unique identifier for a trace. All spans from the same trace share /// the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes /// is considered invalid. /// /// This field is semantically required. Receiver should generate new /// random trace_id if empty or invalid trace_id was received. /// /// This field is required. #[prost(bytes="vec", tag="1")] pub trace_id: ::prost::alloc::vec::Vec<u8>, /// A unique identifier for a span within a trace, assigned when the span /// is created. The ID is an 8-byte array. An ID with all zeroes is considered /// invalid. /// /// This field is semantically required. Receiver should generate new /// random span_id if empty or invalid span_id was received. /// /// This field is required. #[prost(bytes="vec", tag="2")] pub span_id: ::prost::alloc::vec::Vec<u8>, /// The Tracestate on the span. #[prost(message, optional, tag="15")] pub tracestate: ::core::option::Option<span::Tracestate>, /// The `span_id` of this span's parent span. If this is a root span, then this /// field must be empty. The ID is an 8-byte array. #[prost(bytes="vec", tag="3")] pub parent_span_id: ::prost::alloc::vec::Vec<u8>, /// A description of the span's operation. /// /// For example, the name can be a qualified method name or a file name /// and a line number where the operation is called. A best practice is to use /// the same display name at the same call point in an application. /// This makes it easier to correlate spans in different traces. /// /// This field is semantically required to be set to non-empty string. /// When null or empty string received - receiver may use string "name" /// as a replacement. There might be smarted algorithms implemented by /// receiver to fix the empty span name. /// /// This field is required. #[prost(message, optional, tag="4")] pub name: ::core::option::Option<TruncatableString>, /// Distinguishes between spans generated in a particular context. For example, /// two spans with the same name may be distinguished using `CLIENT` (caller) /// and `SERVER` (callee) to identify queueing latency associated with the span. #[prost(enumeration="span::SpanKind", tag="14")] pub kind: i32, /// The start time of the span. On the client side, this is the time kept by /// the local machine where the span execution starts. On the server side, this /// is the time when the server's application handler starts running. /// /// This field is semantically required. When not set on receive - /// receiver should set it to the value of end_time field if it was /// set. Or to the current time if neither was set. It is important to /// keep end_time > start_time for consistency. /// /// This field is required. #[prost(message, optional, tag="5")] pub start_time: ::core::option::Option<::prost_types::Timestamp>, /// The end time of the span. On the client side, this is the time kept by /// the local machine where the span execution ends. On the server side, this /// is the time when the server application handler stops running. /// /// This field is semantically required. When not set on receive - /// receiver should set it to start_time value. It is important to /// keep end_time > start_time for consistency. /// /// This field is required. #[prost(message, optional, tag="6")] pub end_time: ::core::option::Option<::prost_types::Timestamp>, /// A set of attributes on the span. #[prost(message, optional, tag="7")] pub attributes: ::core::option::Option<span::Attributes>, /// A stack trace captured at the start of the span. #[prost(message, optional, tag="8")] pub stack_trace: ::core::option::Option<StackTrace>, /// The included time events. #[prost(message, optional, tag="9")] pub time_events: ::core::option::Option<span::TimeEvents>, /// The included links. #[prost(message, optional, tag="10")] pub links: ::core::option::Option<span::Links>, /// An optional final status for this span. Semantically when Status /// wasn't set it is means span ended without errors and assume /// Status.Ok (code = 0). #[prost(message, optional, tag="11")] pub status: ::core::option::Option<Status>, /// An optional resource that is associated with this span. If not set, this span /// should be part of a batch that does include the resource information, unless resource /// information is unknown. #[prost(message, optional, tag="16")] pub resource: ::core::option::Option<super::super::resource::v1::Resource>, /// A highly recommended but not required flag that identifies when a /// trace crosses a process boundary. True when the parent_span belongs /// to the same process as the current span. This flag is most commonly /// used to indicate the need to adjust time as clocks in different /// processes may not be synchronized. #[prost(message, optional, tag="12")] pub same_process_as_parent_span: ::core::option::Option<bool>, /// An optional number of child spans that were generated while this span /// was active. If set, allows an implementation to detect missing child spans. #[prost(message, optional, tag="13")] pub child_span_count: ::core::option::Option<u32>, } /// Nested message and enum types in `Span`. pub mod span { /// This field conveys information about request position in multiple distributed tracing graphs. /// It is a list of Tracestate.Entry with a maximum of 32 members in the list. /// /// See the <https://github.com/w3c/distributed-tracing> for more details about this field. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Tracestate { /// A list of entries that represent the Tracestate. #[prost(message, repeated, tag="1")] pub entries: ::prost::alloc::vec::Vec<tracestate::Entry>, } /// Nested message and enum types in `Tracestate`. pub mod tracestate { #[derive(Clone, PartialEq, ::prost::Message)] pub struct Entry { /// The key must begin with a lowercase letter, and can only contain /// lowercase letters 'a'-'z', digits '0'-'9', underscores '_', dashes /// '-', asterisks '*', and forward slashes '/'. #[prost(string, tag="1")] pub key: ::prost::alloc::string::String, /// The value is opaque string up to 256 characters printable ASCII /// RFC0020 characters (i.e., the range 0x20 to 0x7E) except ',' and '='. /// Note that this also excludes tabs, newlines, carriage returns, etc. #[prost(string, tag="2")] pub value: ::prost::alloc::string::String, } } /// A set of attributes, each with a key and a value. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Attributes { /// The set of attributes. The value can be a string, an integer, a double /// or the Boolean values `true` or `false`. Note, global attributes like /// server name can be set as tags using resource API. Examples of attributes: /// /// "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" /// "/http/server_latency": 300 /// "abc.com/myattribute": true /// "abc.com/score": 10.239 #[prost(map="string, message", tag="1")] pub attribute_map: ::std::collections::HashMap<::prost::alloc::string::String, super::AttributeValue>, /// The number of attributes that were discarded. Attributes can be discarded /// because their keys are too long or because there are too many attributes. /// If this value is 0, then no attributes were dropped. #[prost(int32, tag="2")] pub dropped_attributes_count: i32, } /// A time-stamped annotation or message event in the Span. #[derive(Clone, PartialEq, ::prost::Message)] pub struct TimeEvent { /// The time the event occurred. #[prost(message, optional, tag="1")] pub time: ::core::option::Option<::prost_types::Timestamp>, /// A `TimeEvent` can contain either an `Annotation` object or a /// `MessageEvent` object, but not both. #[prost(oneof="time_event::Value", tags="2, 3")] pub value: ::core::option::Option<time_event::Value>, } /// Nested message and enum types in `TimeEvent`. pub mod time_event { /// A text annotation with a set of attributes. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Annotation { /// A user-supplied message describing the event. #[prost(message, optional, tag="1")] pub description: ::core::option::Option<super::super::TruncatableString>, /// A set of attributes on the annotation. #[prost(message, optional, tag="2")] pub attributes: ::core::option::Option<super::Attributes>, } /// An event describing a message sent/received between Spans. #[derive(Clone, PartialEq, ::prost::Message)] pub struct MessageEvent { /// The type of MessageEvent. Indicates whether the message was sent or /// received. #[prost(enumeration="message_event::Type", tag="1")] pub r#type: i32, /// An identifier for the MessageEvent's message that can be used to match /// SENT and RECEIVED MessageEvents. For example, this field could /// represent a sequence ID for a streaming RPC. It is recommended to be /// unique within a Span. #[prost(uint64, tag="2")] pub id: u64, /// The number of uncompressed bytes sent or received. #[prost(uint64, tag="3")] pub uncompressed_size: u64, /// The number of compressed bytes sent or received. If zero, assumed to /// be the same size as uncompressed. #[prost(uint64, tag="4")] pub compressed_size: u64, } /// Nested message and enum types in `MessageEvent`. pub mod message_event { /// Indicates whether the message was sent or received. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum Type { /// Unknown event type. Unspecified = 0, /// Indicates a sent message. Sent = 1, /// Indicates a received message. Received = 2, } } /// A `TimeEvent` can contain either an `Annotation` object or a /// `MessageEvent` object, but not both. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Value { /// A text annotation with a set of attributes. #[prost(message, tag="2")] Annotation(Annotation), /// An event describing a message sent/received between Spans. #[prost(message, tag="3")] MessageEvent(MessageEvent), } } /// A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation /// on the span, consisting of either user-supplied key-value pairs, or /// details of a message sent/received between Spans. #[derive(Clone, PartialEq, ::prost::Message)] pub struct TimeEvents { /// A collection of `TimeEvent`s. #[prost(message, repeated, tag="1")] pub time_event: ::prost::alloc::vec::Vec<TimeEvent>, /// The number of dropped annotations in all the included time events. /// If the value is 0, then no annotations were dropped. #[prost(int32, tag="2")] pub dropped_annotations_count: i32, /// The number of dropped message events in all the included time events. /// If the value is 0, then no message events were dropped. #[prost(int32, tag="3")] pub dropped_message_events_count: i32, } /// A pointer from the current span to another span in the same trace or in a /// different trace. For example, this can be used in batching operations, /// where a single batch handler processes multiple requests from different /// traces or when the handler receives a request from a different project. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Link { /// A unique identifier of a trace that this linked span is part of. The ID is a /// 16-byte array. #[prost(bytes="vec", tag="1")] pub trace_id: ::prost::alloc::vec::Vec<u8>, /// A unique identifier for the linked span. The ID is an 8-byte array. #[prost(bytes="vec", tag="2")] pub span_id: ::prost::alloc::vec::Vec<u8>, /// The relationship of the current span relative to the linked span. #[prost(enumeration="link::Type", tag="3")] pub r#type: i32, /// A set of attributes on the link. #[prost(message, optional, tag="4")] pub attributes: ::core::option::Option<Attributes>, /// The Tracestate associated with the link. #[prost(message, optional, tag="5")] pub tracestate: ::core::option::Option<Tracestate>, } /// Nested message and enum types in `Link`. pub mod link { /// The relationship of the current span relative to the linked span: child, /// parent, or unspecified. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum Type { /// The relationship of the two spans is unknown, or known but other /// than parent-child. Unspecified = 0, /// The linked span is a child of the current span. ChildLinkedSpan = 1, /// The linked span is a parent of the current span. ParentLinkedSpan = 2, } } /// A collection of links, which are references from this span to a span /// in the same or different trace. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Links { /// A collection of links. #[prost(message, repeated, tag="1")] pub link: ::prost::alloc::vec::Vec<Link>, /// The number of dropped links after the maximum size was enforced. If /// this value is 0, then no links were dropped. #[prost(int32, tag="2")] pub dropped_links_count: i32, } /// Type of span. Can be used to specify additional relationships between spans /// in addition to a parent/child relationship. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum SpanKind { /// Unspecified. Unspecified = 0, /// Indicates that the span covers server-side handling of an RPC or other /// remote network request. Server = 1, /// Indicates that the span covers the client-side wrapper around an RPC or /// other remote request. Client = 2, } } /// The `Status` type defines a logical error model that is suitable for different /// programming environments, including REST APIs and RPC APIs. This proto's fields /// are a subset of those of /// \[google.rpc.Status\](<https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto>), /// which is used by \[gRPC\](<https://github.com/grpc>). #[derive(Clone, PartialEq, ::prost::Message)] pub struct Status { /// The status code. This is optional field. It is safe to assume 0 (OK) /// when not set. #[prost(int32, tag="1")] pub code: i32, /// A developer-facing error message, which should be in English. #[prost(string, tag="2")] pub message: ::prost::alloc::string::String, } /// The value of an Attribute. #[derive(Clone, PartialEq, ::prost::Message)] pub struct AttributeValue { /// The type of the value. #[prost(oneof="attribute_value::Value", tags="1, 2, 3, 4")] pub value: ::core::option::Option<attribute_value::Value>, } /// Nested message and enum types in `AttributeValue`. pub mod attribute_value { /// The type of the value. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Value { /// A string up to 256 bytes long. #[prost(message, tag="1")] StringValue(super::TruncatableString), /// A 64-bit signed integer. #[prost(int64, tag="2")] IntValue(i64), /// A Boolean value represented by `true` or `false`. #[prost(bool, tag="3")] BoolValue(bool), /// A double value. #[prost(double, tag="4")] DoubleValue(f64), } } /// The call stack which originated this span. #[derive(Clone, PartialEq, ::prost::Message)] pub struct StackTrace { /// Stack frames in this stack trace. #[prost(message, optional, tag="1")] pub stack_frames: ::core::option::Option<stack_trace::StackFrames>, /// The hash ID is used to conserve network bandwidth for duplicate /// stack traces within a single trace. /// /// Often multiple spans will have identical stack traces. /// The first occurrence of a stack trace should contain both /// `stack_frames` and a value in `stack_trace_hash_id`. /// /// Subsequent spans within the same request can refer /// to that stack trace by setting only `stack_trace_hash_id`. /// /// TODO: describe how to deal with the case where stack_trace_hash_id is /// zero because it was not set. #[prost(uint64, tag="2")] pub stack_trace_hash_id: u64, } /// Nested message and enum types in `StackTrace`. pub mod stack_trace { /// A single stack frame in a stack trace. #[derive(Clone, PartialEq, ::prost::Message)] pub struct StackFrame { /// The fully-qualified name that uniquely identifies the function or /// method that is active in this frame. #[prost(message, optional, tag="1")] pub function_name: ::core::option::Option<super::TruncatableString>, /// An un-mangled function name, if `function_name` is /// \[mangled\](<http://www.avabodh.com/cxxin/namemangling.html>). The name can /// be fully qualified. #[prost(message, optional, tag="2")] pub original_function_name: ::core::option::Option<super::TruncatableString>, /// The name of the source file where the function call appears. #[prost(message, optional, tag="3")] pub file_name: ::core::option::Option<super::TruncatableString>, /// The line number in `file_name` where the function call appears. #[prost(int64, tag="4")] pub line_number: i64, /// The column number where the function call appears, if available. /// This is important in JavaScript because of its anonymous functions. #[prost(int64, tag="5")] pub column_number: i64, /// The binary module from where the code was loaded. #[prost(message, optional, tag="6")] pub load_module: ::core::option::Option<super::Module>, /// The version of the deployed source code. #[prost(message, optional, tag="7")] pub source_version: ::core::option::Option<super::TruncatableString>, } /// A collection of stack frames, which can be truncated. #[derive(Clone, PartialEq, ::prost::Message)] pub struct StackFrames { /// Stack frames in this call stack. #[prost(message, repeated, tag="1")] pub frame: ::prost::alloc::vec::Vec<StackFrame>, /// The number of stack frames that were dropped because there /// were too many stack frames. /// If this value is 0, then no stack frames were dropped. #[prost(int32, tag="2")] pub dropped_frames_count: i32, } } /// A description of a binary module. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Module { /// TODO: document the meaning of this field. /// For example: main binary, kernel modules, and dynamic libraries /// such as libc.so, sharedlib.so. #[prost(message, optional, tag="1")] pub module: ::core::option::Option<TruncatableString>, /// A unique identifier for the module, usually a hash of its /// contents. #[prost(message, optional, tag="2")] pub build_id: ::core::option::Option<TruncatableString>, } /// A string that might be shortened to a specified length. #[derive(Clone, PartialEq, ::prost::Message)] pub struct TruncatableString { /// The shortened string. For example, if the original string was 500 bytes long and /// the limit of the string was 128 bytes, then this value contains the first 128 /// bytes of the 500-byte string. Note that truncation always happens on a /// character boundary, to ensure that a truncated string is still valid UTF-8. /// Because it may contain multi-byte characters, the size of the truncated string /// may be less than the truncation limit. #[prost(string, tag="1")] pub value: ::prost::alloc::string::String, /// The number of bytes removed from the original string. If this /// value is 0, then the string was not shortened. #[prost(int32, tag="2")] pub truncated_byte_count: i32, } /// Global configuration of the trace service. All fields must be specified, or /// the default (zero) values will be used for each type. #[derive(Clone, PartialEq, ::prost::Message)] pub struct TraceConfig { /// The global default max number of attributes per span. #[prost(int64, tag="4")] pub max_number_of_attributes: i64, /// The global default max number of annotation events per span. #[prost(int64, tag="5")] pub max_number_of_annotations: i64, /// The global default max number of message events per span. #[prost(int64, tag="6")] pub max_number_of_message_events: i64, /// The global default max number of link entries per span. #[prost(int64, tag="7")] pub max_number_of_links: i64, /// The global default sampler used to make decisions on span sampling. #[prost(oneof="trace_config::Sampler", tags="1, 2, 3")] pub sampler: ::core::option::Option<trace_config::Sampler>, } /// Nested message and enum types in `TraceConfig`. pub mod trace_config { /// The global default sampler used to make decisions on span sampling. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Sampler { #[prost(message, tag="1")] ProbabilitySampler(super::ProbabilitySampler), #[prost(message, tag="2")] ConstantSampler(super::ConstantSampler), #[prost(message, tag="3")] RateLimitingSampler(super::RateLimitingSampler), } } /// Sampler that tries to uniformly sample traces with a given probability. /// The probability of sampling a trace is equal to that of the specified probability. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ProbabilitySampler { /// The desired probability of sampling. Must be within [0.0, 1.0]. #[prost(double, tag="1")] pub sampling_probability: f64, } /// Sampler that always makes a constant decision on span sampling. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ConstantSampler { #[prost(enumeration="constant_sampler::ConstantDecision", tag="1")] pub decision: i32, } /// Nested message and enum types in `ConstantSampler`. pub mod constant_sampler { /// How spans should be sampled: /// - Always off /// - Always on /// - Always follow the parent Span's decision (off if no parent). #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum ConstantDecision { AlwaysOff = 0, AlwaysOn = 1, AlwaysParent = 2, } } /// Sampler that tries to sample with a rate per time window. #[derive(Clone, PartialEq, ::prost::Message)] pub struct RateLimitingSampler { /// Rate per second. #[prost(int64, tag="1")] pub qps: i64, }
47.805769
158
0.637435
f4eb2b28f688453089125f96ca58b5e689f9c646
3,823
// Copyright 2018 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::fs::{self, File}; /// Wrappers around the `zip-rs` library to compress and decompress zip archives. use std::io; use std::panic; use std::path::Path; use walkdir::WalkDir; use self::zip_rs::result::{ZipError, ZipResult}; use self::zip_rs::write::FileOptions; use zip as zip_rs; /// Compress a source directory recursively into a zip file. /// Permissions are set to 644 by default to avoid any /// unwanted execution bits. pub fn compress(src_dir: &Path, dst_file: &File) -> ZipResult<()> { if !Path::new(src_dir).is_dir() { return Err(ZipError::Io(io::Error::new( io::ErrorKind::Other, "Source must be a directory.", ))); } let options = FileOptions::default() .compression_method(zip_rs::CompressionMethod::Stored) .unix_permissions(0o644); let mut zip = zip_rs::ZipWriter::new(dst_file); let walkdir = WalkDir::new(src_dir.to_str().unwrap()); let it = walkdir.into_iter(); for dent in it.filter_map(|e| e.ok()) { let path = dent.path(); let name = path .strip_prefix(Path::new(src_dir)) .unwrap() .to_str() .unwrap(); if path.is_file() { zip.start_file(name, options)?; let mut f = File::open(path)?; io::copy(&mut f, &mut zip)?; } } zip.finish()?; dst_file.sync_all()?; Ok(()) } /// Decompress a source file into the provided destination path. pub fn decompress<R, F>(src_file: R, dest: &Path, expected: F) -> ZipResult<usize> where R: io::Read + io::Seek + panic::UnwindSafe, F: Fn(&Path) -> bool + panic::UnwindSafe, { let mut decompressed = 0; // catch the panic to avoid the thread quit panic::set_hook(Box::new(|panic_info| { error!( "panic occurred: {:?}", panic_info.payload().downcast_ref::<&str>().unwrap() ); })); let result = panic::catch_unwind(move || { let mut archive = zip_rs::ZipArchive::new(src_file)?; for i in 0..archive.len() { let mut file = archive.by_index(i)?; let san_name = file.sanitized_name(); if san_name.to_str().unwrap_or("").replace("\\", "/") != file.name().replace("\\", "/") || !expected(&san_name) { info!( "ignoring a suspicious file: {}, got {:?}", file.name(), san_name.to_str() ); continue; } let file_path = dest.join(san_name); if (&*file.name()).ends_with('/') { fs::create_dir_all(&file_path)?; } else { if let Some(p) = file_path.parent() { if !p.exists() { fs::create_dir_all(&p)?; } } let res = fs::File::create(&file_path); let mut outfile = match res { Err(e) => { error!("{:?}", e); return Err(zip::result::ZipError::Io(e)); } Ok(r) => r, }; io::copy(&mut file, &mut outfile)?; decompressed += 1; } // Get and Set permissions #[cfg(unix)] { use std::os::unix::fs::PermissionsExt; if let Some(mode) = file.unix_mode() { fs::set_permissions( &file_path.to_str().unwrap(), PermissionsExt::from_mode(mode), )?; } } } Ok(decompressed) }); match result { Ok(res) => match res { Err(e) => Err(e.into()), Ok(_) => res, }, Err(_) => { error!("panic occurred on zip::decompress!"); Err(zip::result::ZipError::InvalidArchive( "panic occurred on zip::decompress", )) } } }
26.365517
90
0.629349
2280933106ed5cbbdc14598e1f3580b8786d5335
5,149
/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ //! Load retry configuration properties from an AWS profile use std::str::FromStr; use aws_smithy_types::retry::{RetryConfigBuilder, RetryConfigErr, RetryMode}; use aws_types::os_shim_internal::{Env, Fs}; use crate::provider_config::ProviderConfig; /// Load retry configuration properties from a profile file /// /// This provider will attempt to load AWS shared configuration, then read retry configuration properties /// from the active profile. /// /// # Examples /// /// **Loads 2 as the `max_attempts` to make when sending a request** /// ```ini /// [default] /// max_attempts = 2 /// ``` /// /// **Loads `standard` as the `retry_mode` _if and only if_ the `other` profile is selected.** /// /// ```ini /// [profile other] /// retry_mode = standard /// ``` /// /// This provider is part of the [default retry_config provider chain](crate::default_provider::retry_config). #[derive(Debug, Default)] pub struct ProfileFileRetryConfigProvider { fs: Fs, env: Env, profile_override: Option<String>, } /// Builder for [ProfileFileRetryConfigProvider] #[derive(Default)] pub struct Builder { config: Option<ProviderConfig>, profile_override: Option<String>, } impl Builder { /// Override the configuration for this provider pub fn configure(mut self, config: &ProviderConfig) -> Self { self.config = Some(config.clone()); self } /// Override the profile name used by the [ProfileFileRetryConfigProvider] pub fn profile_name(mut self, profile_name: impl Into<String>) -> Self { self.profile_override = Some(profile_name.into()); self } /// Build a [ProfileFileRetryConfigProvider] from this builder pub fn build(self) -> ProfileFileRetryConfigProvider { let conf = self.config.unwrap_or_default(); ProfileFileRetryConfigProvider { env: conf.env(), fs: conf.fs(), profile_override: self.profile_override, } } } impl ProfileFileRetryConfigProvider { /// Create a new [ProfileFileRetryConfigProvider] /// /// To override the selected profile, set the `AWS_PROFILE` environment variable or use the [Builder]. pub fn new() -> Self { Self { fs: Fs::real(), env: Env::real(), profile_override: None, } } /// [Builder] to construct a [ProfileFileRetryConfigProvider] pub fn builder() -> Builder { Builder::default() } /// Attempt to create a new RetryConfigBuilder from a profile file. pub async fn retry_config_builder(&self) -> Result<RetryConfigBuilder, RetryConfigErr> { let profile = match super::parser::load(&self.fs, &self.env).await { Ok(profile) => profile, Err(err) => { tracing::warn!(err = %err, "failed to parse profile"); // return an empty builder return Ok(RetryConfigBuilder::new()); } }; let selected_profile = self .profile_override .as_deref() .unwrap_or_else(|| profile.selected_profile()); let selected_profile = match profile.get_profile(selected_profile) { Some(profile) => profile, None => { // Only warn if the user specified a profile name to use. if self.profile_override.is_some() { tracing::warn!("failed to get selected '{}' profile", selected_profile); } // return an empty builder return Ok(RetryConfigBuilder::new()); } }; let max_attempts = match selected_profile.get("max_attempts") { Some(max_attempts) => match max_attempts.parse::<u32>() { Ok(max_attempts) if max_attempts == 0 => { return Err(RetryConfigErr::MaxAttemptsMustNotBeZero { set_by: "aws profile".into(), }); } Ok(max_attempts) => Some(max_attempts), Err(source) => { return Err(RetryConfigErr::FailedToParseMaxAttempts { set_by: "aws profile".into(), source, }); } }, None => None, }; let retry_mode = match selected_profile.get("retry_mode") { Some(retry_mode) => match RetryMode::from_str(retry_mode) { Ok(retry_mode) => Some(retry_mode), Err(retry_mode_err) => { return Err(RetryConfigErr::InvalidRetryMode { set_by: "aws profile".into(), source: retry_mode_err, }); } }, None => None, }; let mut retry_config_builder = RetryConfigBuilder::new(); retry_config_builder .set_max_attempts(max_attempts) .set_mode(retry_mode); Ok(retry_config_builder) } }
32.796178
110
0.580695
3ae70ca854bdbf5c6083978bdf41923cd0d5200a
1,380
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use target::TargetOptions; use std::default::Default; pub fn opts() -> TargetOptions { TargetOptions { dynamic_linking: true, executables: true, morestack: true, linker_is_gnu: true, has_rpath: true, pre_link_args: vec![ // We want to be able to strip as much executable code as possible // from the linker command line, and this flag indicates to the // linker that it can avoid linking in dynamic libraries that don't // actually satisfy any symbols up to that point (as with many other // resolutions the linker does). This option only applies to all // following libraries so we're sure to pass it as one of the first // arguments. "-Wl,--as-needed".to_string(), ], position_independent_executables: true, .. Default::default() } }
39.428571
80
0.654348
564adfa68208f82b0c2cf91c5cade2f22979a24a
1,728
//! A helper that wraps a request to overwrite the method. use std::{io::Read, net::SocketAddr}; use conduit::{Method, RequestExt}; type RequestMutRef<'a> = &'a mut (dyn RequestExt + 'a); pub struct RequestProxy<'a> { other: RequestMutRef<'a>, method: conduit::Method, } impl<'a> RequestProxy<'a> { /// Wrap a request and overwrite the method with the provided value. pub(crate) fn rewrite_method(req: RequestMutRef<'a>, method: Method) -> Self { RequestProxy { other: req, method } } } impl<'a> RequestExt for RequestProxy<'a> { fn method(&self) -> &conduit::Method { &self.method } fn path(&self) -> &str { self.other.path() } fn path_mut(&mut self) -> &mut String { self.other.path_mut() } // Pass-through fn http_version(&self) -> conduit::Version { self.other.http_version() } fn scheme(&self) -> conduit::Scheme { self.other.scheme() } fn host(&self) -> conduit::Host<'_> { self.other.host() } fn virtual_root(&self) -> Option<&str> { self.other.virtual_root() } fn query_string(&self) -> Option<&str> { self.other.query_string() } fn remote_addr(&self) -> SocketAddr { self.other.remote_addr() } fn content_length(&self) -> Option<u64> { self.other.content_length() } fn headers(&self) -> &conduit::HeaderMap { self.other.headers() } fn body(&mut self) -> &mut dyn Read { self.other.body() } fn extensions(&self) -> &conduit::Extensions { self.other.extensions() } fn mut_extensions(&mut self) -> &mut conduit::Extensions { self.other.mut_extensions() } }
25.043478
82
0.586227
f8206daf2033e06d832b6842dc7acd790701369b
11,635
/// A module for calculating IBU using Tinseth Formula: /// /// IBUs = decimal alpha acid utilization * mg/l of added alpha acids /// /// /// See: /// https://www.realbeer.com/hops/research.html /// http://www.backtoschoolbrewing.com/blog/2016/9/5/how-to-calculate-ibus /// https://straighttothepint.com/ibu-calculator/ /// https://www.brewersfriend.com/2010/02/27/hops-alpha-acid-table-2009/ /// use crate::conversions::RelativeDensity; use measurements::{Mass, Volume}; /// Internal function to calculate Aplha Acid Utilization (Tinseth formula) /// given Boil Time and Wort Original Gravity /// # Arguments /// /// * `wort_gravity`: wort Original Gravity /// * `time_mins`: boil time (min) /// fn _calculate_utilization(wort_gravity: &RelativeDensity, time_mins: u32) -> f64 { let bigness_factor = 1.65 * f64::powf(0.000125, wort_gravity.as_specific_gravity() - 1.0); let boil_time_factor = (1.0 - f64::exp(-0.04 * (time_mins as f64))) / 4.15; bigness_factor * boil_time_factor } /// Internal function to calculate IBU contributed by single hop addition, /// /// # Arguments /// /// * `weight`: weight of the hop addition /// * `alpha_acid_percentage`: AA% of the hop variety /// * `time_mins`: boil time (min) /// * `finished_volume_liters`: volume of the final wort (liters) /// * `gravity_boil`: the wort original gravity /// fn _calculate_ibu_single_hop( weight: &Mass, alpha_acid_percentage: f64, time_mins: u32, finished_volume: &Volume, gravity_boil: &RelativeDensity, utilization_multiplier: f64, ) -> f64 { let mg_per_liter_added_aa = (alpha_acid_percentage * weight.as_grams() * 1000.0) / finished_volume.as_liters(); let decimal_alpha_acid_utilization = _calculate_utilization(gravity_boil, time_mins) * utilization_multiplier; mg_per_liter_added_aa * decimal_alpha_acid_utilization } /// An enum of hop types #[derive(Debug, Copy, Clone)] pub enum HopAdditionType { /// Whole, default Whole, // Plugs, same utilization as whole hops Plug, /// Pellets, 10% higher utilization Pellet, } impl Default for HopAdditionType { fn default() -> Self { HopAdditionType::Whole } } /// A representation of one hop addition /// /// Example: /// ``` /// use rustybeer::calculators::ibu::{HopAddition, HopAdditionType}; /// use rustybeer::measurements::Mass; /// // Centennial (8.5% AA) Pellets: 7g - 60 min /// HopAddition { /// weight: Mass::from_grams(7.), /// alpha_acid_percentage: 0.085, /// time_mins: 60, /// hop_type: HopAdditionType::Pellet /// }; ///``` /// #[derive(Debug, Copy, Clone)] // TODO: YAML/JSON serialization pub struct HopAddition { /// the weight of the hop addition pub weight: Mass, /// AA% of the hop variety pub alpha_acid_percentage: f64, /// boil time (min) pub time_mins: u32, /// type of hop added: whole or pellets. [default() = HopAdditionType::Whole] pub hop_type: HopAdditionType, } impl HopAddition { pub fn new( weight: Mass, alpha_acid_percentage: f64, time_mins: u32, hop_type: HopAdditionType, ) -> Self { Self { weight, alpha_acid_percentage, time_mins, hop_type, } } } #[derive(Debug, Clone, PartialEq, Eq)] pub struct NegativeIbuError; /// Calculates IBU contributed by hop additions /// /// # Arguments /// /// * `hop_additions`: the added hops weights (g), AA%, and boil time (min) /// * `finished_volume_liters`: volume of the final wort (liters) /// * `gravity_boil`: wort original gravity /// /// # Examples /// /// * Target Batch Size: 20 liters /// * Original Gravity: 1.050 /// * Cascade (6.4% AA): 28g - 45 mins /// /// ``` /// use rustybeer::calculators::ibu::{HopAddition, calculate_ibu}; /// use rustybeer::assert_approx; /// use rustybeer::measurements::{Mass, Volume}; /// use rustybeer::conversions::RelativeDensity; /// /// assert_approx!( /// 18.9723, /// calculate_ibu( /// vec![HopAddition::new(Mass::from_grams(28.0), 0.064, 45, Default::default())], /// &Volume::from_liters(20.0), /// &RelativeDensity::from_specific_gravity(1.050) /// ) /// ); /// ``` /// pub fn calculate_ibu( hop_additions: Vec<HopAddition>, finished_volume: &Volume, gravity_boil: &RelativeDensity, ) -> f64 { hop_additions .into_iter() .map(|h| { _calculate_ibu_single_hop( &h.weight, h.alpha_acid_percentage, h.time_mins, finished_volume, gravity_boil, match h.hop_type { HopAdditionType::Whole | HopAdditionType::Plug => 1., HopAdditionType::Pellet => 1.1, }, ) }) .sum() } /// Calculates the needed amount of bittering hop to reach a target IBU for given variety alpha /// acid percentage and boil time of the hop /// /// # Arguments /// /// * `hop_additions`: Optional other flavor or aroma hops additions /// * `bittering_alpha_acid_percentage`: the alpha acid percentage of the bittering hop variety /// * `bittering_time_mins`: Optional boil time of the bittering hop (min) /// * `finished_volume_liters`: volume of the final wort (liters) /// * `gravity_boil`: wort original gravity /// * `target_ibu`: target IBU /// /// # Examples /// /// * Target Batch Size: 22 liters /// * Original Gravity: 1.058 /// * Target IBU: 17 /// * Centennial (8.5% AA) hops to be added for 60min boil /// * No other hops additions /// ``` /// use rustybeer::calculators::ibu::calculate_bittering_weight; /// use rustybeer::assert_approx; /// use rustybeer::measurements::Volume; /// use rustybeer::conversions::RelativeDensity; /// /// let bittering = calculate_bittering_weight( /// None, /// 0.085, /// None, /// &Volume::from_liters(22.), /// &RelativeDensity::from_specific_gravity(1.058), /// 17. /// ); /// assert_approx!( 20.4973, bittering.unwrap()); /// ``` /// /// With addition of 20gm of Centennial (8.5% AA) for 60min boil, /// can't get IBU down to just 10 /// /// ```{.should_panic} /// use rustybeer::calculators::ibu::calculate_bittering_weight; /// use rustybeer::calculators::ibu::HopAddition; /// use rustybeer::measurements::{Mass, Volume}; /// use rustybeer::conversions::RelativeDensity; /// /// let bittering = calculate_bittering_weight(Some(vec![ /// HopAddition { /// weight: Mass::from_grams(20.), /// alpha_acid_percentage: 0.085, /// time_mins: 60, /// hop_type: Default::default() /// }]), /// 0.085, /// None, /// &Volume::from_liters(22.), /// &RelativeDensity::from_specific_gravity(1.058), /// 10. /// ); /// /// bittering.expect("Too low IBU target"); /// ``` /// pub fn calculate_bittering_weight( hop_additions: Option<Vec<HopAddition>>, bittering_alpha_acid_percentage: f64, bittering_time_mins: Option<u32>, finished_volume: &Volume, gravity_boil: &RelativeDensity, target_ibu: f64, ) -> Result<f64, NegativeIbuError> { let bittering_ibu = match hop_additions { Some(h) => target_ibu - calculate_ibu(h, finished_volume, gravity_boil), None => target_ibu, }; match bittering_ibu.is_sign_positive() { true => { let bittering_time = bittering_time_mins.unwrap_or(60); let bittering_alpha_acid_utilization = _calculate_utilization(gravity_boil, bittering_time); let bittering_weight = (bittering_ibu * finished_volume.as_liters()) / (bittering_alpha_acid_utilization * bittering_alpha_acid_percentage) / 1000.0; Ok(bittering_weight) } false => Err(NegativeIbuError), } } #[cfg(test)] pub mod tests { use super::{ calculate_bittering_weight, calculate_ibu, HopAddition, HopAdditionType, NegativeIbuError, _calculate_ibu_single_hop, _calculate_utilization, }; use crate::assert_approx; use crate::conversions::RelativeDensity; use measurements::{Mass, Volume}; #[test] fn utilization() { let test_vector = crate::calculators::test_vectors::utilization_test_vector::get_vector(); for (og_idx, og) in test_vector.og.iter().enumerate() { for (boiling_time_idx, boiling_time) in test_vector.boiling_time.iter().enumerate() { let ut = _calculate_utilization( &RelativeDensity::from_specific_gravity(*og), *boiling_time, ); // Only three decimals provided in test vector approx::assert_relative_eq!( test_vector.utilization[boiling_time_idx][og_idx], ut, epsilon = 1e-3 ); } } } #[test] fn single_hop_ibu() { assert_approx!( 2.8808, _calculate_ibu_single_hop( &Mass::from_grams(7.0), 0.085, 15, &Volume::from_liters(22.0), &RelativeDensity::from_specific_gravity(1.058), 1. ) ); } #[test] fn multiple_hops_ibu() { assert_approx!( 5.7615, calculate_ibu( vec![ HopAddition::new(Mass::from_grams(7.0), 0.085, 15, HopAdditionType::Whole), HopAddition::new(Mass::from_grams(7.0), 0.085, 15, HopAdditionType::Whole) ], &Volume::from_liters(22.0), &RelativeDensity::from_specific_gravity(1.058) ), ); } #[test] fn pellet_hops_ibu() { // 6.336 = 5.76 * 1.1 assert_approx!( 6.3376, calculate_ibu( vec![ HopAddition::new(Mass::from_grams(7.0), 0.085, 15, HopAdditionType::Pellet), HopAddition::new(Mass::from_grams(7.0), 0.085, 15, HopAdditionType::Pellet) ], &Volume::from_liters(22.0), &RelativeDensity::from_specific_gravity(1.058) ), ); } #[test] #[should_panic] fn negative_ibu() { calculate_bittering_weight( Some(vec![HopAddition::new( Mass::from_grams(20.0), 0.085, 60, HopAdditionType::Whole, )]), 0.085, None, &Volume::from_liters(22.0), &RelativeDensity::from_specific_gravity(1.058), 10., ) .expect("too low IBU"); } #[test] fn bitter_hops_weight() -> Result<(), NegativeIbuError> { assert_approx!( 13.2611, calculate_bittering_weight( Some(vec![ HopAddition::new(Mass::from_grams(7.0), 0.085, 15, HopAdditionType::Whole), HopAddition::new(Mass::from_grams(7.0), 0.085, 15, HopAdditionType::Plug) ]), 0.085, Some(60), &Volume::from_liters(22.0), &RelativeDensity::from_specific_gravity(1.058), 16.76, )?, ); Ok(()) } #[test] fn zero_hops_ibu() { assert_approx!( 0., calculate_ibu( vec![], &Volume::from_liters(22.0), &RelativeDensity::from_specific_gravity(1.058) ) ); } }
30.142487
98
0.58728
481153efc646a0d0413b43bf9f2970ccb08810da
1,629
fn main() { // In general, the `{}` will be automatically replaced with any // arguments. These will be stringified. println!("{} days", 31); // Without a suffix, 31 becomes an i32. You can change what type 31 is // by providing a suffix. The number 31i64 for example has the type i64. // There are various optional patterns this works with. Positional // arguments can be used. println!("{0}, this is {1}. {1}, this is {0}", "Alice", "Bob"); // As can named arguments. println!("{subject} {verb} {object}", object="the lazy dog", subject="the quick brown fox", verb="jumps over"); // Special formatting can be specified after a `:`. println!("{} of {:b} people know binary, the other half doesn't", 1, 2); // You can right-align text with a specified width. This will output // " 1". 5 white spaces and a "1". println!("{number:>width$}", number=1, width=6); // You can pad numbers with extra zeroes. This will output "000001". println!("{number:>0width$}", number=1, width=6); // Rust even checks to make sure the correct number of arguments are // used. println!("My name is {0}, {1} {0}", "Bond","James"); // FIXME ^ Add the missing argument: "James" // Create a structure named `Structure` which contains an `i32`. #[allow(dead_code)] struct Structure(i32); // However, custom types such as this structure require more complicated // handling. This will not work. // println!("This struct {} won't print...", Structure(3)); // FIXME ^ Comment out this line. }
37.883721
76
0.617557
acd95f53b364f3350c1df71d95fe19f492d9cd0c
74,505
//! `matchingrules` module includes all the classes to deal with V3/V4 spec matchers use std::{fmt, mem}; use std::cmp::Ordering; use std::collections::{HashMap, HashSet}; #[cfg(test)] use std::collections::hash_map::DefaultHasher; use std::fmt::{Display, Formatter}; use std::hash::{Hash, Hasher}; use std::str::FromStr; use anyhow::{anyhow, Context as _}; #[cfg(test)] use expectest::prelude::*; use itertools::Either; use log::*; use maplit::hashmap; use serde_json::{json, Map, Value}; use crate::{HttpStatus, PactSpecification}; use crate::generators::{Generator, GeneratorCategory, Generators}; use crate::json_utils::{json_to_num, json_to_string}; use crate::matchingrules::expressions::{MatchingReference, MatchingRuleDefinition, ValueType}; use crate::path_exp::DocPath; pub mod expressions; fn generator_from_json(json: &Map<String, Value>) -> Option<Generator> { if let Some(generator_json) = json.get("generator") { match generator_json { Value::Object(attributes) => if let Some(generator_type) = attributes.get("type") { match generator_type { Value::String(generator_type) => Generator::from_map(generator_type.as_str(), attributes), _ => None } } else { None } _ => None } } else { None } } fn rules_from_json(attributes: &Map<String, Value>) -> anyhow::Result<Vec<Either<MatchingRule, MatchingReference>>> { match attributes.get("rules") { Some(rules) => match rules { Value::Array(rules) => { let rules = rules.iter() .map(|rule| MatchingRule::from_json(rule)); if let Some(err) = rules.clone().find(|rule| rule.is_err()) { Err(anyhow!("Matching rule configuration is not correct - {}", err.unwrap_err())) } else { Ok(rules.map(|rule| Either::Left(rule.unwrap())).collect()) } } _ => Err(anyhow!("EachKey matcher config is not valid. Was expected an array but got {}", rules)) } None => Ok(vec![]) } } /// Set of all matching rules #[derive(Debug, Clone, Eq)] pub enum MatchingRule { /// Matcher using equals Equality, /// Match using a regular expression Regex(String), /// Match using the type of the value Type, /// Match using the type of the value and a minimum length for collections MinType(usize), /// Match using the type of the value and a maximum length for collections MaxType(usize), /// Match using the type of the value and a minimum and maximum length for collections MinMaxType(usize, usize), /// Match the value using a timestamp pattern Timestamp(String), /// Match the value using a time pattern Time(String), /// Match the value using a date pattern Date(String), /// Match if the value includes the given value Include(String), /// Match if the value is a number Number, /// Match if the value is an integer number Integer, /// Match if the value is a decimal number Decimal, /// Match if the value is a null value (this is content specific, for JSON will match a JSON null) Null, /// Match binary data by its content type (magic file check) ContentType(String), /// Match array items in any order against a list of variants ArrayContains(Vec<(usize, MatchingRuleCategory, HashMap<DocPath, Generator>)>), /// Matcher for values in a map, ignoring the keys Values, /// Matches boolean values (booleans and the string values `true` and `false`) Boolean, /// Request status code matcher StatusCode(HttpStatus), /// Value must be the same type and not empty NotEmpty, /// Value must a semantic version Semver, /// Matcher for keys in a map EachKey(MatchingRuleDefinition), /// Matcher for values in a collection. This delegates to the Values matcher for maps. EachValue(MatchingRuleDefinition) } impl MatchingRule { /// Builds a `MatchingRule` from a `Value` struct pub fn from_json(value: &Value) -> anyhow::Result<MatchingRule> { match value { Value::Object(m) => match m.get("match") { Some(match_val) => { let val = json_to_string(match_val); MatchingRule::create(val.as_str(), value) } None => if let Some(val) = m.get("regex") { Ok(MatchingRule::Regex(json_to_string(val))) } else if let Some(val) = json_to_num(m.get("min").cloned()) { Ok(MatchingRule::MinType(val)) } else if let Some(val) = json_to_num(m.get("max").cloned()) { Ok(MatchingRule::MaxType(val)) } else if let Some(val) = m.get("timestamp") { Ok(MatchingRule::Timestamp(json_to_string(val))) } else if let Some(val) = m.get("time") { Ok(MatchingRule::Time(json_to_string(val))) } else if let Some(val) = m.get("date") { Ok(MatchingRule::Date(json_to_string(val))) } else { Err(anyhow!("Matching rule missing 'match' field and unable to guess its type")) } }, _ => Err(anyhow!("Matching rule JSON is not an Object")), } } /// Converts this `MatchingRule` to a `Value` struct pub fn to_json(&self) -> Value { match self { MatchingRule::Equality => json!({ "match": "equality" }), MatchingRule::Regex(ref r) => json!({ "match": "regex", "regex": r.clone() }), MatchingRule::Type => json!({ "match": "type" }), MatchingRule::MinType(min) => json!({ "match": "type", "min": json!(*min as u64) }), MatchingRule::MaxType(max) => json!({ "match": "type", "max": json!(*max as u64) }), MatchingRule::MinMaxType(min, max) => json!({ "match": "type", "min": json!(*min as u64), "max": json!(*max as u64) }), MatchingRule::Timestamp(ref t) => json!({ "match": "timestamp", "timestamp": Value::String(t.clone()) }), MatchingRule::Time(ref t) => json!({ "match": "time", "time": Value::String(t.clone()) }), MatchingRule::Date(ref d) => json!({ "match": "date", "date": Value::String(d.clone()) }), MatchingRule::Include(ref s) => json!({ "match": "include", "value": Value::String(s.clone()) }), MatchingRule::Number => json!({ "match": "number" }), MatchingRule::Integer => json!({ "match": "integer" }), MatchingRule::Decimal => json!({ "match": "decimal" }), MatchingRule::Boolean => json!({ "match": "boolean" }), MatchingRule::Null => json!({ "match": "null" }), MatchingRule::ContentType(ref r) => json!({ "match": "contentType", "value": Value::String(r.clone()) }), MatchingRule::ArrayContains(variants) => json!({ "match": "arrayContains", "variants": variants.iter().map(|(index, rules, generators)| { let mut json = json!({ "index": index, "rules": rules.to_v3_json() }); if !generators.is_empty() { json["generators"] = Value::Object(generators.iter() .map(|(k, gen)| { if let Some(json) = gen.to_json() { Some((String::from(k), json)) } else { None } }) .filter(|item| item.is_some()) .map(|item| item.unwrap()) .collect()) } json }).collect::<Vec<Value>>() }), MatchingRule::Values => json!({ "match": "values" }), MatchingRule::StatusCode(status) => json!({ "match": "statusCode", "status": status.to_json() }), MatchingRule::NotEmpty => json!({ "match": "notEmpty" }), MatchingRule::Semver => json!({ "match": "semver" }), MatchingRule::EachKey(definition) => { let mut json = json!({ "match": "eachKey", "rules": definition.rules.iter() .map(|rule| rule.as_ref().expect_left("Expected a matching rule, found an unresolved reference").to_json()) .collect::<Vec<Value>>() }); let map = json.as_object_mut().unwrap(); if !definition.value.is_empty() { map.insert("value".to_string(), Value::String(definition.value.clone())); } if let Some(generator) = &definition.generator { map.insert("generator".to_string(), generator.to_json().unwrap_or_default()); } Value::Object(map.clone()) } MatchingRule::EachValue(definition) => { let mut json = json!({ "match": "eachValue", "rules": definition.rules.iter() .map(|rule| rule.as_ref().expect_left("Expected a matching rule, found an unresolved reference").to_json()) .collect::<Vec<Value>>() }); let map = json.as_object_mut().unwrap(); if !definition.value.is_empty() { map.insert("value".to_string(), Value::String(definition.value.clone())); } if let Some(generator) = &definition.generator { map.insert("generator".to_string(), generator.to_json().unwrap_or_default()); } Value::Object(map.clone()) } } } /// If there are any generators associated with this matching rule pub fn has_generators(&self) -> bool { match self { MatchingRule::ArrayContains(variants) => variants.iter() .any(|(_, _, generators)| !generators.is_empty()), _ => false } } /// Return the generators for this rule pub fn generators(&self) -> Vec<Generator> { match self { MatchingRule::ArrayContains(variants) => vec![Generator::ArrayContains(variants.clone())], _ => vec![] } } /// Returns the type name of this matching rule pub fn name(&self) -> String { match self { MatchingRule::Equality => "equality", MatchingRule::Regex(_) => "regex", MatchingRule::Type => "type", MatchingRule::MinType(_) => "min-type", MatchingRule::MaxType(_) => "max-type", MatchingRule::MinMaxType(_, _) => "min-max-type", MatchingRule::Timestamp(_) => "datetime", MatchingRule::Time(_) => "time", MatchingRule::Date(_) => "date", MatchingRule::Include(_) => "include", MatchingRule::Number => "number", MatchingRule::Integer => "integer", MatchingRule::Decimal => "decimal", MatchingRule::Null => "null", MatchingRule::ContentType(_) => "content-type", MatchingRule::ArrayContains(_) => "array-contains", MatchingRule::Values => "values", MatchingRule::Boolean => "boolean", MatchingRule::StatusCode(_) => "status-code", MatchingRule::NotEmpty => "not-empty", MatchingRule::Semver => "semver", MatchingRule::EachKey(_) => "each-key", MatchingRule::EachValue(_) => "each-value" }.to_string() } /// Returns the type name of this matching rule pub fn values(&self) -> HashMap<&'static str, Value> { let empty = hashmap!{}; match self { MatchingRule::Equality => empty, MatchingRule::Regex(r) => hashmap!{ "regex" => Value::String(r.clone()) }, MatchingRule::Type => empty, MatchingRule::MinType(min) => hashmap!{ "min" => json!(min) }, MatchingRule::MaxType(max) => hashmap!{ "max" => json!(max) }, MatchingRule::MinMaxType(min, max) => hashmap!{ "min" => json!(min), "max" => json!(max) }, MatchingRule::Timestamp(f) => hashmap!{ "format" => Value::String(f.clone()) }, MatchingRule::Time(f) => hashmap!{ "format" => Value::String(f.clone()) }, MatchingRule::Date(f) => hashmap!{ "format" => Value::String(f.clone()) }, MatchingRule::Include(s) => hashmap!{ "value" => Value::String(s.clone()) }, MatchingRule::Number => empty, MatchingRule::Integer => empty, MatchingRule::Decimal => empty, MatchingRule::Null => empty, MatchingRule::ContentType(ct) => hashmap!{ "value" => Value::String(ct.clone()) }, MatchingRule::ArrayContains(variants) => hashmap! { "variants" => variants.iter().map(|(variant, rules, gens)| { Value::Array(vec![json!(variant), rules.to_v3_json(), Value::Object(gens.iter().map(|(key, gen)| { (key.to_string(), gen.to_json().unwrap()) }).collect())]) }).collect() }, MatchingRule::Values => empty, MatchingRule::Boolean => empty, MatchingRule::StatusCode(sc) => hashmap!{ "status" => sc.to_json() }, MatchingRule::NotEmpty => empty, MatchingRule::Semver => empty, MatchingRule::EachKey(definition) | MatchingRule::EachValue(definition) => { let mut map = hashmap! { "rules" => Value::Array(definition.rules.iter() .map(|rule| rule.as_ref().expect_left("Expected a matching rule, found an unresolved reference").to_json()) .collect()) }; if !definition.value.is_empty() { map.insert("value", Value::String(definition.value.clone())); } if let Some(generator) = &definition.generator { map.insert("generator", generator.to_json().unwrap_or_default()); } map } } } /// Creates a `MatchingRule` from a type and a map of attributes pub fn create(rule_type: &str, attributes: &Value) -> anyhow::Result<MatchingRule> { trace!("rule_type: {}, attributes: {}", rule_type, attributes); let attributes = match attributes { Value::Object(values) => values, _ => { error!("Matching rule attributes {} are not valid", attributes); return Err(anyhow!("Matching rule attributes {} are not valid", attributes)); } }; match rule_type { "regex" => match attributes.get(rule_type) { Some(s) => Ok(MatchingRule::Regex(json_to_string(s))), None => Err(anyhow!("Regex matcher missing 'regex' field")), }, "equality" => Ok(MatchingRule::Equality), "include" => match attributes.get("value") { Some(s) => Ok(MatchingRule::Include(json_to_string(s))), None => Err(anyhow!("Include matcher missing 'value' field")), }, "type" => match (json_to_num(attributes.get("min").cloned()), json_to_num(attributes.get("max").cloned())) { (Some(min), Some(max)) => Ok(MatchingRule::MinMaxType(min, max)), (Some(min), None) => Ok(MatchingRule::MinType(min)), (None, Some(max)) => Ok(MatchingRule::MaxType(max)), _ => Ok(MatchingRule::Type) }, "number" => Ok(MatchingRule::Number), "integer" => Ok(MatchingRule::Integer), "decimal" => Ok(MatchingRule::Decimal), "real" => Ok(MatchingRule::Decimal), "boolean" => Ok(MatchingRule::Boolean), "min" => match json_to_num(attributes.get(rule_type).cloned()) { Some(min) => Ok(MatchingRule::MinType(min)), None => Err(anyhow!("Min matcher missing 'min' field")), }, "max" => match json_to_num(attributes.get(rule_type).cloned()) { Some(max) => Ok(MatchingRule::MaxType(max)), None => Err(anyhow!("Max matcher missing 'max' field")), }, "timestamp" | "datetime" => match attributes.get("format").or_else(|| attributes.get(rule_type)) { Some(s) => Ok(MatchingRule::Timestamp(json_to_string(s))), None => Err(anyhow!("Timestamp matcher missing 'timestamp' or 'format' field")), }, "date" => match attributes.get("format").or_else(|| attributes.get(rule_type)) { Some(s) => Ok(MatchingRule::Date(json_to_string(s))), None => Err(anyhow!("Date matcher missing 'date' or 'format' field")), }, "time" => match attributes.get("format").or_else(|| attributes.get(rule_type)) { Some(s) => Ok(MatchingRule::Time(json_to_string(s))), None => Err(anyhow!("Time matcher missing 'time' or 'format' field")), }, "null" => Ok(MatchingRule::Null), "contentType" | "content-type" => match attributes.get("value") { Some(s) => Ok(MatchingRule::ContentType(json_to_string(s))), None => Err(anyhow!("ContentType matcher missing 'value' field")), }, "arrayContains" | "array-contains" => match attributes.get("variants") { Some(variants) => match variants { Value::Array(variants) => { let mut values = Vec::new(); for variant in variants { let index = json_to_num(variant.get("index").cloned()).unwrap_or_default(); let mut category = MatchingRuleCategory::empty("body"); if let Some(rules) = variant.get("rules") { category.add_rules_from_json(rules) .with_context(|| format!("Unable to parse matching rules: {:?}", rules))?; } else { category.add_rule( DocPath::empty(), MatchingRule::Equality, RuleLogic::And); } let generators = if let Some(generators_json) = variant.get("generators") { let mut g = Generators::default(); let cat = GeneratorCategory::BODY; if let Value::Object(map) = generators_json { for (k, v) in map { if let Value::Object(ref map) = v { let path = DocPath::new(k)?; g.parse_generator_from_map(&cat, map, Some(path)); } } } g.categories.get(&cat).cloned().unwrap_or_default() } else { HashMap::default() }; values.push((index, category, generators)); } Ok(MatchingRule::ArrayContains(values)) } _ => Err(anyhow!("ArrayContains matcher 'variants' field is not an Array")), } None => Err(anyhow!("ArrayContains matcher missing 'variants' field")), } "values" => Ok(MatchingRule::Values), "statusCode" | "status-code" => match attributes.get("status") { Some(s) => { let status = HttpStatus::from_json(s) .context("Unable to parse status code for StatusCode matcher")?; Ok(MatchingRule::StatusCode(status)) }, None => Ok(MatchingRule::StatusCode(HttpStatus::Success)) }, "notEmpty" | "not-empty" => Ok(MatchingRule::NotEmpty), "semver" => Ok(MatchingRule::Semver), "eachKey" | "each-key" => { let generator = generator_from_json(&attributes); let value = attributes.get("value").cloned().unwrap_or_default(); let rules = rules_from_json(attributes)?; let definition = MatchingRuleDefinition { value: json_to_string(&value), value_type: ValueType::Unknown, rules, generator }; Ok(MatchingRule::EachKey(definition)) } "eachValue" | "each-value" => { let generator = generator_from_json(&attributes); let value = attributes.get("value").cloned().unwrap_or_default(); let rules = rules_from_json(attributes)?; let definition = MatchingRuleDefinition { value: json_to_string(&value), value_type: ValueType::Unknown, rules, generator }; Ok(MatchingRule::EachValue(definition)) } _ => Err(anyhow!("{} is not a valid matching rule type", rule_type)), } } } impl Hash for MatchingRule { fn hash<H: Hasher>(&self, state: &mut H) { mem::discriminant(self).hash(state); match self { MatchingRule::Regex(s) => s.hash(state), MatchingRule::MinType(min) => min.hash(state), MatchingRule::MaxType(max) => max.hash(state), MatchingRule::MinMaxType(min, max) => { min.hash(state); max.hash(state); } MatchingRule::Timestamp(format) => format.hash(state), MatchingRule::Time(format) => format.hash(state), MatchingRule::Date(format) => format.hash(state), MatchingRule::Include(str) => str.hash(state), MatchingRule::ContentType(str) => str.hash(state), MatchingRule::ArrayContains(variants) => { for (index, rules, generators) in variants { index.hash(state); rules.hash(state); for (s, g) in generators { s.hash(state); g.hash(state); } } } _ => () } } } impl PartialEq for MatchingRule { fn eq(&self, other: &Self) -> bool { match (self, other) { (MatchingRule::Regex(s1), MatchingRule::Regex(s2)) => s1 == s2, (MatchingRule::MinType(min1), MatchingRule::MinType(min2)) => min1 == min2, (MatchingRule::MaxType(max1), MatchingRule::MaxType(max2)) => max1 == max2, (MatchingRule::MinMaxType(min1, max1), MatchingRule::MinMaxType(min2, max2)) => min1 == min2 && max1 == max2, (MatchingRule::Timestamp(format1), MatchingRule::Timestamp(format2)) => format1 == format2, (MatchingRule::Time(format1), MatchingRule::Time(format2)) => format1 == format2, (MatchingRule::Date(format1), MatchingRule::Date(format2)) => format1 == format2, (MatchingRule::Include(str1), MatchingRule::Include(str2)) => str1 == str2, (MatchingRule::ContentType(str1), MatchingRule::ContentType(str2)) => str1 == str2, (MatchingRule::ArrayContains(variants1), MatchingRule::ArrayContains(variants2)) => variants1 == variants2, _ => mem::discriminant(self) == mem::discriminant(other) } } } #[cfg(test)] fn h(rule: &MatchingRule) -> u64 { let mut hasher = DefaultHasher::new(); rule.hash(&mut hasher); hasher.finish() } #[test] fn hash_and_partial_eq_for_matching_rule() { expect!(h(&MatchingRule::Equality)).to(be_equal_to(h(&MatchingRule::Equality))); expect!(MatchingRule::Equality).to(be_equal_to(MatchingRule::Equality)); expect!(MatchingRule::Equality).to_not(be_equal_to(MatchingRule::Type)); expect!(h(&MatchingRule::Type)).to(be_equal_to(h(&MatchingRule::Type))); expect!(MatchingRule::Type).to(be_equal_to(MatchingRule::Type)); expect!(h(&MatchingRule::Number)).to(be_equal_to(h(&MatchingRule::Number))); expect!(MatchingRule::Number).to(be_equal_to(MatchingRule::Number)); expect!(h(&MatchingRule::Integer)).to(be_equal_to(h(&MatchingRule::Integer))); expect!(MatchingRule::Integer).to(be_equal_to(MatchingRule::Integer)); expect!(h(&MatchingRule::Decimal)).to(be_equal_to(h(&MatchingRule::Decimal))); expect!(MatchingRule::Decimal).to(be_equal_to(MatchingRule::Decimal)); expect!(h(&MatchingRule::Null)).to(be_equal_to(h(&MatchingRule::Null))); expect!(MatchingRule::Null).to(be_equal_to(MatchingRule::Null)); let regex1 = MatchingRule::Regex("\\d+".into()); let regex2 = MatchingRule::Regex("\\w+".into()); expect!(h(&regex1)).to(be_equal_to(h(&regex1))); expect!(&regex1).to(be_equal_to(&regex1)); expect!(h(&regex1)).to_not(be_equal_to(h(&regex2))); expect!(&regex1).to_not(be_equal_to(&regex2)); let min1 = MatchingRule::MinType(100); let min2 = MatchingRule::MinType(200); expect!(h(&min1)).to(be_equal_to(h(&min1))); expect!(&min1).to(be_equal_to(&min1)); expect!(h(&min1)).to_not(be_equal_to(h(&min2))); expect!(&min1).to_not(be_equal_to(&min2)); let max1 = MatchingRule::MaxType(100); let max2 = MatchingRule::MaxType(200); expect!(h(&max1)).to(be_equal_to(h(&max1))); expect!(&max1).to(be_equal_to(&max1)); expect!(h(&max1)).to_not(be_equal_to(h(&max2))); expect!(&max1).to_not(be_equal_to(&max2)); let minmax1 = MatchingRule::MinMaxType(100, 200); let minmax2 = MatchingRule::MinMaxType(200, 200); expect!(h(&minmax1)).to(be_equal_to(h(&minmax1))); expect!(&minmax1).to(be_equal_to(&minmax1)); expect!(h(&minmax1)).to_not(be_equal_to(h(&minmax2))); expect!(&minmax1).to_not(be_equal_to(&minmax2)); let datetime1 = MatchingRule::Timestamp("yyyy-MM-dd HH:mm:ss".into()); let datetime2 = MatchingRule::Timestamp("yyyy-MM-ddTHH:mm:ss".into()); expect!(h(&datetime1)).to(be_equal_to(h(&datetime1))); expect!(&datetime1).to(be_equal_to(&datetime1)); expect!(h(&datetime1)).to_not(be_equal_to(h(&datetime2))); expect!(&datetime1).to_not(be_equal_to(&datetime2)); let date1 = MatchingRule::Date("yyyy-MM-dd".into()); let date2 = MatchingRule::Date("yy-MM-dd".into()); expect!(h(&date1)).to(be_equal_to(h(&date1))); expect!(&date1).to(be_equal_to(&date1)); expect!(h(&date1)).to_not(be_equal_to(h(&date2))); expect!(&date1).to_not(be_equal_to(&date2)); let time1 = MatchingRule::Time("HH:mm:ss".into()); let time2 = MatchingRule::Time("hh:mm:ss".into()); expect!(h(&time1)).to(be_equal_to(h(&time1))); expect!(&time1).to(be_equal_to(&time1)); expect!(h(&time1)).to_not(be_equal_to(h(&time2))); expect!(&time1).to_not(be_equal_to(&time2)); let inc1 = MatchingRule::Include("string one".into()); let inc2 = MatchingRule::Include("string two".into()); expect!(h(&inc1)).to(be_equal_to(h(&inc1))); expect!(&inc1).to(be_equal_to(&inc1)); expect!(h(&inc1)).to_not(be_equal_to(h(&inc2))); expect!(&inc1).to_not(be_equal_to(&inc2)); let content1 = MatchingRule::ContentType("one".into()); let content2 = MatchingRule::ContentType("two".into()); expect!(h(&content1)).to(be_equal_to(h(&content1))); expect!(&content1).to(be_equal_to(&content1)); expect!(h(&content1)).to_not(be_equal_to(h(&content2))); expect!(&content1).to_not(be_equal_to(&content2)); let ac1 = MatchingRule::ArrayContains(vec![]); let ac2 = MatchingRule::ArrayContains(vec![(0, MatchingRuleCategory::empty("body"), hashmap!{})]); let ac3 = MatchingRule::ArrayContains(vec![(1, MatchingRuleCategory::empty("body"), hashmap!{})]); let ac4 = MatchingRule::ArrayContains(vec![(0, MatchingRuleCategory::equality("body"), hashmap!{})]); let ac5 = MatchingRule::ArrayContains(vec![(0, MatchingRuleCategory::empty("body"), hashmap!{ DocPath::new_unwrap("A") => Generator::RandomBoolean })]); let ac6 = MatchingRule::ArrayContains(vec![ (0, MatchingRuleCategory::empty("body"), hashmap!{ DocPath::new_unwrap("A") => Generator::RandomBoolean }), (1, MatchingRuleCategory::empty("body"), hashmap!{ DocPath::new_unwrap("A") => Generator::RandomDecimal(10) }) ]); let ac7 = MatchingRule::ArrayContains(vec![ (0, MatchingRuleCategory::empty("body"), hashmap!{ DocPath::new_unwrap("A") => Generator::RandomBoolean }), (1, MatchingRuleCategory::equality("body"), hashmap!{ DocPath::new_unwrap("A") => Generator::RandomDecimal(10) }) ]); expect!(h(&ac1)).to(be_equal_to(h(&ac1))); expect!(h(&ac1)).to_not(be_equal_to(h(&ac2))); expect!(h(&ac1)).to_not(be_equal_to(h(&ac3))); expect!(h(&ac1)).to_not(be_equal_to(h(&ac4))); expect!(h(&ac1)).to_not(be_equal_to(h(&ac5))); expect!(h(&ac1)).to_not(be_equal_to(h(&ac6))); expect!(h(&ac1)).to_not(be_equal_to(h(&ac7))); expect!(h(&ac2)).to(be_equal_to(h(&ac2))); expect!(h(&ac2)).to_not(be_equal_to(h(&ac1))); expect!(h(&ac2)).to_not(be_equal_to(h(&ac3))); expect!(h(&ac2)).to_not(be_equal_to(h(&ac4))); expect!(h(&ac2)).to_not(be_equal_to(h(&ac5))); expect!(h(&ac2)).to_not(be_equal_to(h(&ac6))); expect!(h(&ac2)).to_not(be_equal_to(h(&ac7))); expect!(h(&ac3)).to(be_equal_to(h(&ac3))); expect!(h(&ac3)).to_not(be_equal_to(h(&ac2))); expect!(h(&ac3)).to_not(be_equal_to(h(&ac1))); expect!(h(&ac3)).to_not(be_equal_to(h(&ac4))); expect!(h(&ac3)).to_not(be_equal_to(h(&ac5))); expect!(h(&ac3)).to_not(be_equal_to(h(&ac6))); expect!(h(&ac3)).to_not(be_equal_to(h(&ac7))); expect!(h(&ac4)).to(be_equal_to(h(&ac4))); expect!(h(&ac4)).to_not(be_equal_to(h(&ac2))); expect!(h(&ac4)).to_not(be_equal_to(h(&ac3))); expect!(h(&ac4)).to_not(be_equal_to(h(&ac1))); expect!(h(&ac4)).to_not(be_equal_to(h(&ac5))); expect!(h(&ac4)).to_not(be_equal_to(h(&ac6))); expect!(h(&ac4)).to_not(be_equal_to(h(&ac7))); expect!(h(&ac5)).to(be_equal_to(h(&ac5))); expect!(h(&ac5)).to_not(be_equal_to(h(&ac2))); expect!(h(&ac5)).to_not(be_equal_to(h(&ac3))); expect!(h(&ac5)).to_not(be_equal_to(h(&ac4))); expect!(h(&ac5)).to_not(be_equal_to(h(&ac1))); expect!(h(&ac5)).to_not(be_equal_to(h(&ac6))); expect!(h(&ac5)).to_not(be_equal_to(h(&ac7))); expect!(h(&ac6)).to(be_equal_to(h(&ac6))); expect!(h(&ac6)).to_not(be_equal_to(h(&ac2))); expect!(h(&ac6)).to_not(be_equal_to(h(&ac3))); expect!(h(&ac6)).to_not(be_equal_to(h(&ac4))); expect!(h(&ac6)).to_not(be_equal_to(h(&ac5))); expect!(h(&ac6)).to_not(be_equal_to(h(&ac1))); expect!(h(&ac6)).to_not(be_equal_to(h(&ac7))); expect!(h(&ac7)).to(be_equal_to(h(&ac7))); expect!(h(&ac7)).to_not(be_equal_to(h(&ac2))); expect!(h(&ac7)).to_not(be_equal_to(h(&ac3))); expect!(h(&ac7)).to_not(be_equal_to(h(&ac4))); expect!(h(&ac7)).to_not(be_equal_to(h(&ac5))); expect!(h(&ac7)).to_not(be_equal_to(h(&ac6))); expect!(h(&ac7)).to_not(be_equal_to(h(&ac1))); expect!(&ac1).to(be_equal_to(&ac1)); expect!(&ac1).to_not(be_equal_to(&ac2)); expect!(&ac1).to_not(be_equal_to(&ac3)); expect!(&ac1).to_not(be_equal_to(&ac4)); expect!(&ac1).to_not(be_equal_to(&ac5)); expect!(&ac1).to_not(be_equal_to(&ac6)); expect!(&ac1).to_not(be_equal_to(&ac7)); expect!(&ac2).to(be_equal_to(&ac2)); expect!(&ac2).to_not(be_equal_to(&ac1)); expect!(&ac2).to_not(be_equal_to(&ac3)); expect!(&ac2).to_not(be_equal_to(&ac4)); expect!(&ac2).to_not(be_equal_to(&ac5)); expect!(&ac2).to_not(be_equal_to(&ac6)); expect!(&ac2).to_not(be_equal_to(&ac7)); expect!(&ac3).to(be_equal_to(&ac3)); expect!(&ac3).to_not(be_equal_to(&ac2)); expect!(&ac3).to_not(be_equal_to(&ac1)); expect!(&ac3).to_not(be_equal_to(&ac4)); expect!(&ac3).to_not(be_equal_to(&ac5)); expect!(&ac3).to_not(be_equal_to(&ac6)); expect!(&ac3).to_not(be_equal_to(&ac7)); expect!(&ac4).to(be_equal_to(&ac4)); expect!(&ac4).to_not(be_equal_to(&ac2)); expect!(&ac4).to_not(be_equal_to(&ac3)); expect!(&ac4).to_not(be_equal_to(&ac1)); expect!(&ac4).to_not(be_equal_to(&ac5)); expect!(&ac4).to_not(be_equal_to(&ac6)); expect!(&ac4).to_not(be_equal_to(&ac7)); expect!(&ac5).to(be_equal_to(&ac5)); expect!(&ac5).to_not(be_equal_to(&ac2)); expect!(&ac5).to_not(be_equal_to(&ac3)); expect!(&ac5).to_not(be_equal_to(&ac4)); expect!(&ac5).to_not(be_equal_to(&ac1)); expect!(&ac5).to_not(be_equal_to(&ac6)); expect!(&ac5).to_not(be_equal_to(&ac7)); expect!(&ac6).to(be_equal_to(&ac6)); expect!(&ac6).to_not(be_equal_to(&ac2)); expect!(&ac6).to_not(be_equal_to(&ac3)); expect!(&ac6).to_not(be_equal_to(&ac4)); expect!(&ac6).to_not(be_equal_to(&ac5)); expect!(&ac6).to_not(be_equal_to(&ac1)); expect!(&ac6).to_not(be_equal_to(&ac7)); expect!(&ac7).to(be_equal_to(&ac7)); expect!(&ac7).to_not(be_equal_to(&ac2)); expect!(&ac7).to_not(be_equal_to(&ac3)); expect!(&ac7).to_not(be_equal_to(&ac4)); expect!(&ac7).to_not(be_equal_to(&ac5)); expect!(&ac7).to_not(be_equal_to(&ac6)); expect!(&ac7).to_not(be_equal_to(&ac1)); } /// Enumeration to define how to combine rules #[derive(PartialEq, Debug, Clone, Copy, Eq, Hash, PartialOrd, Ord)] pub enum RuleLogic { /// All rules must match And, /// At least one rule must match Or } impl RuleLogic { fn to_json(&self) -> Value { Value::String(match self { RuleLogic::And => "AND", RuleLogic::Or => "OR" }.into()) } } /// Data structure for representing a list of rules and the logic needed to combine them #[derive(Debug, Clone, Eq)] pub struct RuleList { /// List of rules to apply pub rules: Vec<MatchingRule>, /// Rule logic to use to evaluate multiple rules pub rule_logic: RuleLogic, /// If this rule list has matched the exact path or if it has cascaded (i.e. is a parent) pub cascaded: bool } impl RuleList { /// Creates a new empty rule list pub fn empty(rule_logic: RuleLogic) -> RuleList { RuleList { rules: Vec::new(), rule_logic, cascaded: false } } /// Creates a default rule list with an equality matcher pub fn equality() -> RuleList { RuleList { rules: vec![ MatchingRule::Equality ], rule_logic: RuleLogic::And, cascaded: false } } /// Creates a new rule list with the single matching rule pub fn new(rule: MatchingRule) -> RuleList { RuleList { rules: vec![ rule ], rule_logic: RuleLogic::And, cascaded: false } } /// If the rule list is empty (has no matchers) pub fn is_empty(&self) -> bool { self.rules.is_empty() } fn to_v3_json(&self) -> Value { json!({ "combine": self.rule_logic.to_json(), "matchers": Value::Array(self.rules.iter().map(|matcher| matcher.to_json()).collect()) }) } fn to_v2_json(&self) -> Value { match self.rules.get(0) { Some(rule) => rule.to_json(), None => json!({}) } } /// If there is a type matcher defined for the rule list pub fn type_matcher_defined(&self) -> bool { self.rules.iter().any(|rule| match rule { MatchingRule::Type => true, MatchingRule::MinType(_) => true, MatchingRule::MaxType(_) => true, MatchingRule::MinMaxType(_, _) => true, _ => false }) } /// If the values matcher is defined for the rule list pub fn values_matcher_defined(&self) -> bool { self.rules.iter().any(|rule| match rule { MatchingRule::Values => true, _ => false }) } /// Add a matching rule to the rule list pub fn add_rule(&mut self, rule: &MatchingRule) { self.rules.push(rule.clone()) } /// If this rule list has matched the exact path or if it has cascaded (i.e. is a parent) pub fn as_cascaded(&self, b: bool) -> RuleList { RuleList { cascaded: b, .. self.clone() } } /// Add all the rules from the list to this list pub fn add_rules(&mut self, rules: &RuleList) { for rule in &rules.rules { self.add_rule(rule); } } } impl Hash for RuleList { fn hash<H: Hasher>(&self, state: &mut H) { self.rule_logic.hash(state); for rule in &self.rules { rule.hash(state); } } } impl PartialEq for RuleList { fn eq(&self, other: &Self) -> bool { self.rule_logic == other.rule_logic && self.rules == other.rules } } impl Default for RuleList { fn default() -> Self { RuleList::empty(RuleLogic::And) } } /// Category that the matching rule is applied to #[derive(PartialEq, Debug, Clone, Eq, Hash, PartialOrd, Ord)] pub enum Category { /// Request Method METHOD, /// Request Path PATH, /// Request/Response Header HEADER, /// Request Query Parameter QUERY, /// Body BODY, /// Response Status STATUS, /// Message contents (body) CONTENTS, /// Message metadata METADATA } impl FromStr for Category { type Err = String; fn from_str(s: &str) -> Result<Self, Self::Err> { match s.to_lowercase().as_str() { "method" => Ok(Category::METHOD), "path" => Ok(Category::PATH), "header" => Ok(Category::HEADER), "query" => Ok(Category::QUERY), "body" => Ok(Category::BODY), "status" => Ok(Category::STATUS), "contents" => Ok(Category::CONTENTS), "metadata" => Ok(Category::METADATA), _ => Err(format!("'{}' is not a valid Category", s)) } } } impl <'a> Into<&'a str> for Category { fn into(self) -> &'a str { match self { Category::METHOD => "method", Category::PATH => "path", Category::HEADER => "header", Category::QUERY => "query", Category::BODY => "body", Category::STATUS => "status", Category::CONTENTS => "contents", Category::METADATA => "metadata" } } } impl Into<String> for Category { fn into(self) -> String { self.to_string() } } impl <'a> From<&'a str> for Category { fn from(s: &'a str) -> Self { Category::from_str(s).unwrap_or_default() } } impl From<String> for Category { fn from(s: String) -> Self { Category::from_str(&s).unwrap_or_default() } } impl Default for Category { fn default() -> Self { Category::BODY } } impl Display for Category { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { let s: &str = self.clone().into(); write!(f, "{}", s) } } /// Data structure for representing a category of matching rules #[derive(Debug, Clone, Eq, Default)] pub struct MatchingRuleCategory { /// Name of the category pub name: Category, /// Matching rules for this category pub rules: HashMap<DocPath, RuleList> } impl MatchingRuleCategory { /// Creates an empty category pub fn empty<S>(name: S) -> MatchingRuleCategory where S: Into<Category> { MatchingRuleCategory { name: name.into(), rules: hashmap! {}, } } /// Creates a default category pub fn equality<S>(name: S) -> MatchingRuleCategory where S: Into<Category> { MatchingRuleCategory { name: name.into(), rules: hashmap! { DocPath::empty() => RuleList::equality() } } } /// If the matching rules in the category are empty pub fn is_empty(&self) -> bool { self.rules.is_empty() } /// If the matching rules in the category are not empty pub fn is_not_empty(&self) -> bool { !self.rules.is_empty() } /// Adds a rule from the Value representation pub fn rule_from_json( &mut self, key: DocPath, matcher_json: &Value, rule_logic: RuleLogic, ) -> anyhow::Result<()> { let matching_rule = MatchingRule::from_json(matcher_json) .with_context(|| format!("Could not parse matcher JSON {:?}", matcher_json))?; let rules = self.rules.entry(key) .or_insert_with(|| RuleList::empty(rule_logic)); rules.rules.push(matching_rule); Ok(()) } /// Adds a rule to this category pub fn add_rule( &mut self, key: DocPath, matcher: MatchingRule, rule_logic: RuleLogic, ) { let rules = self.rules.entry(key).or_insert_with(|| RuleList::empty(rule_logic)); rules.rules.push(matcher); } /// Filters the matchers in the category by the predicate, and returns a new category pub fn filter<F>(&self, predicate: F) -> MatchingRuleCategory where F : Fn(&(&DocPath, &RuleList)) -> bool { MatchingRuleCategory { name: self.name.clone(), rules: self.rules.iter().filter(predicate) .map(|(path, rules)| (path.clone(), rules.clone())).collect() } } fn max_by_path(&self, path: &[&str]) -> RuleList { self.rules.iter().map(|(k, v)| (k, v, k.path_weight(path))) .filter(|&(_, _, (w, _))| w > 0) .max_by_key(|&(_, _, (w, t))| w * t) .map(|(_, v, (_, t))| v.as_cascaded(t != path.len())) .unwrap_or_default() } /// Returns a JSON Value representation in V3 format pub fn to_v3_json(&self) -> Value { Value::Object(self.rules.iter().fold(serde_json::Map::new(), |mut map, (category, rulelist)| { map.insert(String::from(category), rulelist.to_v3_json()); map })) } /// Returns a JSON Value representation in V2 format pub fn to_v2_json(&self) -> HashMap<String, Value> { let mut map = hashmap!{}; match &self.name { Category::PATH => for (_, v) in self.rules.clone() { map.insert("$.path".to_string(), v.to_v2_json()); } Category::BODY => for (k, v) in self.rules.clone() { map.insert(String::from(k).replace("$", "$.body"), v.to_v2_json()); } _ => for (k, v) in &self.rules { map.insert(format!("$.{}.{}", self.name, k), v.to_v2_json()); } }; map } /// If there is a type matcher defined for the category pub fn type_matcher_defined(&self) -> bool { self.rules.values().any(|rule_list| rule_list.type_matcher_defined()) } /// If there is a values matcher defined in the rules pub fn values_matcher_defined(&self) -> bool { self.rules.values().any(|rule_list| rule_list.values_matcher_defined()) } /// If there is a matcher defined for the path pub fn matcher_is_defined(&self, path: &[&str]) -> bool { let result = !self.resolve_matchers_for_path(path).is_empty(); trace!("matcher_is_defined: for category {} and path {:?} -> {}", self.name.to_string(), path, result); result } /// filters this category with all rules that match the given path for categories that contain /// collections (eg. bodies, headers, query parameters). Returns self otherwise. pub fn resolve_matchers_for_path(&self, path: &[&str]) -> MatchingRuleCategory { match self.name { Category::HEADER| Category::QUERY | Category::BODY | Category::CONTENTS | Category::METADATA => self.filter(|(val, _)| { val.matches_path(path) }), _ => self.clone() } } /// Selects the best matcher for the given path by calculating a weighting for each one pub fn select_best_matcher(&self, path: &[&str]) -> RuleList { match self.name { Category::BODY | Category::METADATA => self.max_by_path(path), _ => self.resolve_matchers_for_path(path).as_rule_list() } } /// Returns this category as a matching rule list. Returns a None if there are no rules pub fn as_rule_list(&self) -> RuleList { self.rules.values().next().cloned().unwrap_or_default() } /// Adds the rules to the category from the provided JSON pub fn add_rules_from_json(&mut self, rules: &Value) -> anyhow::Result<()> { if self.name == Category::PATH && rules.get("matchers").is_some() { let rule_logic = match rules.get("combine") { Some(val) => if json_to_string(val).to_uppercase() == "OR" { RuleLogic::Or } else { RuleLogic::And }, None => RuleLogic::And }; if let Some(matchers) = rules.get("matchers") { if let Value::Array(array) = matchers { for matcher in array { self.rule_from_json(DocPath::empty(), &matcher, rule_logic)?; } } } } else if let Value::Object(m) = rules { if m.contains_key("matchers") { self.add_rule_list(DocPath::empty(), rules)?; } else { for (k, v) in m { self.add_rule_list(DocPath::new(k)?, v)?; } } } Ok(()) } fn add_rule_list(&mut self, k: DocPath, v: &Value) -> anyhow::Result<()> { let rule_logic = match v.get("combine") { Some(val) => if json_to_string(val).to_uppercase() == "OR" { RuleLogic::Or } else { RuleLogic::And }, None => RuleLogic::And }; if let Some(&Value::Array(ref array)) = v.get("matchers") { for matcher in array { self.rule_from_json(k.clone(), &matcher, rule_logic)?; } } Ok(()) } /// Returns any generators associated with these matching rules pub fn generators(&self) -> HashMap<DocPath, Generator> { let mut generators = hashmap!{}; for (base_path, rules) in &self.rules { for rule in &rules.rules { if rule.has_generators() { for generator in rule.generators() { generators.insert(base_path.clone(), generator); } } } } generators } /// Clones this category with the new name pub fn rename<S>(&self, name: S) -> Self where S: Into<Category> { MatchingRuleCategory { name: name.into(), .. self.clone() } } /// Add all the rules from the provided rules pub fn add_rules(&mut self, category: MatchingRuleCategory) { for (path, rules) in &category.rules { if self.rules.contains_key(path) { self.rules.get_mut(path).unwrap().add_rules(rules) } else { self.rules.insert(path.clone(), rules.clone()); } } } } impl Hash for MatchingRuleCategory { fn hash<H: Hasher>(&self, state: &mut H) { self.name.hash(state); for (k, v) in self.rules.clone() { k.hash(state); v.hash(state); } } } impl PartialEq for MatchingRuleCategory { fn eq(&self, other: &Self) -> bool { self.name == other.name && self.rules == other.rules } fn ne(&self, other: &Self) -> bool { self.name != other.name || self.rules != other.rules } } impl PartialOrd for MatchingRuleCategory { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { self.name.partial_cmp(&other.name) } } impl Ord for MatchingRuleCategory { fn cmp(&self, other: &Self) -> Ordering { self.name.cmp(&other.name) } } /// Data structure for representing a collection of matchers #[derive(Debug, Clone, Eq)] pub struct MatchingRules { /// Categories of matching rules pub rules: HashMap<Category, MatchingRuleCategory> } impl MatchingRules { /// If the matching rules are empty (that is there are no rules assigned to any categories) pub fn is_empty(&self) -> bool { self.rules.values().all(|category| category.is_empty()) } /// If the matching rules are not empty (that is there is at least one rule assigned to a category) pub fn is_not_empty(&self) -> bool { self.rules.values().any(|category| category.is_not_empty()) } /// Adds the category to the map of rules pub fn add_category<S>(&mut self, category: S) -> &mut MatchingRuleCategory where S: Into<Category> + Clone { let category = category.into(); if !self.rules.contains_key(&category) { self.rules.insert(category.clone(), MatchingRuleCategory::empty(category.clone())); } self.rules.get_mut(&category).unwrap() } /// Returns all the category names in this rule set pub fn categories(&self) -> HashSet<Category> { self.rules.keys().cloned().collect() } /// Returns the category of rules for a given category name pub fn rules_for_category<S>(&self, category: S) -> Option<MatchingRuleCategory> where S: Into<Category> { self.rules.get(&category.into()).cloned() } /// If there is a matcher defined for the category and path pub fn matcher_is_defined<S>(&self, category: S, path: &Vec<&str>) -> bool where S: Into<Category> + Clone { let result = match self.resolve_matchers(category.clone().into(), path) { Some(ref rules) => !rules.is_empty(), None => false }; trace!("matcher_is_defined for category {} and path {:?} -> {}", category.into(), path, result); result } /// If there is a wildcard matcher defined for the category and path pub fn wildcard_matcher_is_defined<S>(&self, category: S, path: &Vec<&str>) -> bool where S: Into<Category> + Clone { match self.resolve_wildcard_matchers(category, path) { Some(ref rules) => !rules.filter(|&(val, _)| val.is_wildcard()).is_empty(), None => false } } /// If there is a type matcher defined for the category and path pub fn type_matcher_defined<S>(&self, category: S, path: &Vec<&str>) -> bool where S: Into<Category> + Display + Clone { let result = match self.resolve_matchers(category.clone(), path) { Some(ref rules) => rules.type_matcher_defined(), None => false }; trace!("type_matcher_defined for category {} and path {:?} -> {}", category.into(), path, result); result } /// Returns a `Category` filtered with all rules that match the given path. pub fn resolve_matchers<S>(&self, category: S, path: &Vec<&str>) -> Option<MatchingRuleCategory> where S: Into<Category> { self.rules_for_category(category) .map(|rules| rules.resolve_matchers_for_path(path)) } /// Returns a list of rules from the body category that match the given path pub fn resolve_body_matchers_by_path(&self, path: &Vec<&str>) -> RuleList { match self.rules_for_category("body") { Some(category) => category.max_by_path(path), None => RuleList::default() } } fn resolve_wildcard_matchers<S>(&self, category: S, path: &Vec<&str>) -> Option<MatchingRuleCategory> where S: Into<Category> + Clone { let category = category.into(); match category { Category::BODY => self.rules_for_category(Category::BODY).map(|category| category.filter(|&(val, _)| { val.matches_path_exactly(path) })), Category::HEADER | Category::QUERY => self.rules_for_category(category.clone()).map(|category| category.filter(|&(val, _)| { path.len() == 1 && Some(path[0]) == val.first_field() })), _ => self.rules_for_category(category) } } fn load_from_v2_map(&mut self, map: &serde_json::Map<String, Value> ) -> anyhow::Result<()> { for (key, v) in map { let path = key.split('.').collect::<Vec<&str>>(); if key.starts_with("$.body") { if key == "$.body" { self.add_v2_rule("body", DocPath::root(), v)?; } else { self.add_v2_rule("body", DocPath::new(format!("${}", &key[6..]))?, v)?; } } else if key.starts_with("$.headers") { self.add_v2_rule("header", DocPath::new(path[2])?, v)?; } else { self.add_v2_rule( path[1], if path.len() > 2 { DocPath::new(path[2])? } else { DocPath::empty() }, v, )?; } } Ok(()) } fn load_from_v3_map(&mut self, map: &serde_json::Map<String, Value> ) -> anyhow::Result<()> { for (k, v) in map { self.add_rules_private(k, v)?; } Ok(()) } fn add_rules_private<S: Into<String>>(&mut self, category_name: S, rules: &Value ) -> anyhow::Result<()> { let category = self.add_category(category_name.into()); category.add_rules_from_json(rules) } fn add_v2_rule<S: Into<String>>( &mut self, category_name: S, sub_category: DocPath, rule: &Value, ) -> anyhow::Result<()> { let category = self.add_category(category_name.into()); category.rule_from_json(sub_category, rule, RuleLogic::And) } fn to_v3_json(&self) -> Value { Value::Object(self.rules.iter().fold(serde_json::Map::new(), |mut map, (name, sub_category)| { match name { Category::PATH => if let Some(rules) = sub_category.rules.get(&DocPath::empty()) { map.insert(name.to_string(), rules.to_v3_json()); } _ => { map.insert(name.to_string(), sub_category.to_v3_json()); } } map })) } fn to_v2_json(&self) -> Value { Value::Object(self.rules.iter().fold(serde_json::Map::new(), |mut map, (_, category)| { for (key, value) in category.to_v2_json() { map.insert(key.clone(), value); } map })) } /// Clones the matching rules, renaming the category pub fn rename<S>(&self, old_name: S, new_name: S) -> Self where S: Into<Category> { let old = old_name.into(); let new = new_name.into(); MatchingRules { rules: self.rules.iter().map(|(key, value)| { if key == &old { (new.clone(), value.rename(new.clone())) } else { (key.clone(), value.clone()) } }).collect() } } /// Add the rules to the category pub fn add_rules<S>(&mut self, category: S, rules: MatchingRuleCategory) where S: Into<Category> { let category = category.into(); let entry = self.rules.entry(category.clone()) .or_insert_with(|| MatchingRuleCategory::empty(category.clone())); entry.add_rules(rules); } } impl Hash for MatchingRules { fn hash<H: Hasher>(&self, state: &mut H) { for (k, v) in self.rules.iter() { k.hash(state); v.hash(state); } } } impl PartialEq for MatchingRules { fn eq(&self, other: &Self) -> bool { self.rules == other.rules } fn ne(&self, other: &Self) -> bool { self.rules != other.rules } } impl Default for MatchingRules { fn default() -> Self { MatchingRules { rules: hashmap!{} } } } /// Parses the matching rules from the Value structure pub fn matchers_from_json(value: &Value, deprecated_name: &Option<String> ) -> anyhow::Result<MatchingRules> { let matchers_json = match (value.get("matchingRules"), deprecated_name.clone().and_then(|name| value.get(&name))) { (Some(v), _) => Some(v), (None, Some(v)) => Some(v), (None, None) => None }; let mut matching_rules = MatchingRules::default(); match matchers_json { Some(value) => match value { &Value::Object(ref m) => { if m.keys().next().unwrap_or(&String::default()).starts_with("$") { matching_rules.load_from_v2_map(m)? } else { matching_rules.load_from_v3_map(m)? } }, _ => () }, None => () } Ok(matching_rules) } /// Generates a Value structure for the provided matching rules pub fn matchers_to_json(matchers: &MatchingRules, spec_version: &PactSpecification) -> Value { match spec_version { &PactSpecification::V3 | &PactSpecification::V4 => matchers.to_v3_json(), _ => matchers.to_v2_json() } } /// Macro to ease constructing matching rules /// Example usage: /// ```ignore /// matchingrules! { /// "query" => { "user_id" => [ MatchingRule::Regex(s!("^[0-9]+$")) ] } /// } /// ``` #[macro_export] macro_rules! matchingrules { ( $( $name:expr => { $( $subname:expr => [ $( $matcher:expr ), * ] ),* }), * ) => {{ let mut _rules = $crate::matchingrules::MatchingRules::default(); $({ let mut _category = _rules.add_category($name); $({ $({ _category.add_rule( $crate::path_exp::DocPath::new_unwrap($subname), $matcher, $crate::matchingrules::RuleLogic::And, ); })* })* })* _rules }}; } /// Macro to ease constructing matching rules /// Example usage: /// ```ignore /// matchingrules_list! { /// "body"; "user_id" => [ MatchingRule::Regex(s!("^[0-9]+$")) ] /// } /// ``` #[macro_export] macro_rules! matchingrules_list { ( $name:expr ; $( $subname:expr => [ $( $matcher:expr ), * ] ),* ) => {{ let mut _category = $crate::matchingrules::MatchingRuleCategory::empty($name); $( $( _category.add_rule( $crate::path_exp::DocPath::new_unwrap($subname), $matcher, $crate::matchingrules::RuleLogic::And, ); )* )* _category }}; ( $name:expr ; [ $( $matcher:expr ), * ] ) => {{ let mut _category = $crate::matchingrules::MatchingRuleCategory::empty($name); $( _category.add_rule( $crate::path_exp::DocPath::empty(), $matcher, $crate::matchingrules::RuleLogic::And, ); )* _category }}; } #[cfg(test)] mod tests { use expectest::prelude::*; use maplit::hashset; use serde_json::Value; use speculate::speculate; use crate::generators::*; use super::*; use super::super::*; #[test] fn rules_are_empty_when_there_are_no_categories() { expect!(MatchingRules::default().is_empty()).to(be_true()); } #[test] fn rules_are_empty_when_there_are_only_empty_categories() { expect!(MatchingRules { rules: hashmap!{ "body".into() => MatchingRuleCategory::empty("body"), "header".into() => MatchingRuleCategory::empty("header"), "query".into() => MatchingRuleCategory::empty("query") } }.is_empty()).to(be_true()); } #[test] fn rules_are_not_empty_when_there_is_a_nonempty_category() { expect!(MatchingRules { rules: hashmap!{ "body".into() => MatchingRuleCategory::empty("body"), "header".into() => MatchingRuleCategory::empty("headers"), "query".into() => MatchingRuleCategory { name: "query".into(), rules: hashmap!{ DocPath::empty() => RuleList { rules: vec![ MatchingRule::Equality ], rule_logic: RuleLogic::And, cascaded: false } } }, } }.is_empty()).to(be_false()); } #[test] fn matchers_from_json_test() { let matching_rules = matchers_from_json(&Value::Null, &None); let matching_rules = matching_rules.unwrap(); expect!(matching_rules.rules.iter()).to(be_empty()); } #[test] fn loads_v2_matching_rules() { let matching_rules_json = Value::from_str(r#"{"matchingRules": { "$.path": { "match": "regex", "regex": "\\w+" }, "$.query.Q1": { "match": "regex", "regex": "\\d+" }, "$.header.HEADERY": {"match": "include", "value": "ValueA"}, "$.body.animals": {"min": 1, "match": "type"}, "$.body.animals[*].*": {"match": "type"}, "$.body.animals[*].children": {"min": 1}, "$.body.animals[*].children[*].*": {"match": "type"} }}"#).unwrap(); let matching_rules = matchers_from_json(&matching_rules_json, &None); let matching_rules = matching_rules.unwrap(); expect!(matching_rules.rules.iter()).to_not(be_empty()); expect!(matching_rules.categories()).to(be_equal_to(hashset!{ Category::PATH, Category::QUERY, Category::HEADER, Category::BODY })); expect!(matching_rules.rules_for_category("path")).to(be_some().value(MatchingRuleCategory { name: "path".into(), rules: hashmap! { DocPath::empty() => RuleList { rules: vec![ MatchingRule::Regex("\\w+".to_string()) ], rule_logic: RuleLogic::And, cascaded: false } } })); expect!(matching_rules.rules_for_category("query")).to(be_some().value(MatchingRuleCategory { name: "query".into(), rules: hashmap!{ DocPath::new_unwrap("Q1") => RuleList { rules: vec![ MatchingRule::Regex("\\d+".to_string()) ], rule_logic: RuleLogic::And, cascaded: false } } })); expect!(matching_rules.rules_for_category("header")).to(be_some().value(MatchingRuleCategory { name: "header".into(), rules: hashmap!{ DocPath::new_unwrap("HEADERY") => RuleList { rules: vec![ MatchingRule::Include("ValueA".to_string()) ], rule_logic: RuleLogic::And, cascaded: false } } })); expect!(matching_rules.rules_for_category("body")).to(be_some().value(MatchingRuleCategory { name: "body".into(), rules: hashmap!{ DocPath::new_unwrap("$.animals") => RuleList { rules: vec![ MatchingRule::MinType(1) ], rule_logic: RuleLogic::And, cascaded: false }, DocPath::new_unwrap("$.animals[*].*") => RuleList { rules: vec![ MatchingRule::Type ], rule_logic: RuleLogic::And, cascaded: false }, DocPath::new_unwrap("$.animals[*].children") => RuleList { rules: vec![ MatchingRule::MinType(1) ], rule_logic: RuleLogic::And, cascaded: false }, DocPath::new_unwrap("$.animals[*].children[*].*") => RuleList { rules: vec![ MatchingRule::Type ], rule_logic: RuleLogic::And, cascaded: false } } })); } #[test] fn loads_v3_matching_rules() { let matching_rules_json = Value::from_str(r#"{"matchingRules": { "path": { "matchers": [ { "match": "regex", "regex": "\\w+" } ] }, "query": { "Q1": { "matchers": [ { "match": "regex", "regex": "\\d+" } ] } }, "header": { "HEADERY": { "combine": "OR", "matchers": [ {"match": "include", "value": "ValueA"}, {"match": "include", "value": "ValueB"} ] } }, "body": { "$.animals": { "matchers": [{"min": 1, "match": "type"}] }, "$.animals[*].*": { "matchers": [{"match": "type"}] }, "$.animals[*].children": { "matchers": [{"min": 1}] }, "$.animals[*].children[*].*": { "matchers": [{"match": "type"}] } } }}"#).unwrap(); let matching_rules = matchers_from_json(&matching_rules_json, &None); let matching_rules = matching_rules.unwrap(); expect!(matching_rules.rules.iter()).to_not(be_empty()); expect!(matching_rules.categories()).to(be_equal_to(hashset!{ Category::PATH, Category::QUERY, Category::HEADER, Category::BODY })); expect!(matching_rules.rules_for_category("path")).to(be_some().value(MatchingRuleCategory { name: "path".into(), rules: hashmap! { DocPath::empty() => RuleList { rules: vec![ MatchingRule::Regex("\\w+".to_string()) ], rule_logic: RuleLogic::And, cascaded: false } } })); expect!(matching_rules.rules_for_category("query")).to(be_some().value(MatchingRuleCategory { name: "query".into(), rules: hashmap!{ DocPath::new_unwrap("Q1") => RuleList { rules: vec![ MatchingRule::Regex("\\d+".to_string()) ], rule_logic: RuleLogic::And, cascaded: false } } })); expect!(matching_rules.rules_for_category("header")).to(be_some().value(MatchingRuleCategory { name: "header".into(), rules: hashmap!{ DocPath::new_unwrap("HEADERY") => RuleList { rules: vec![ MatchingRule::Include("ValueA".to_string()), MatchingRule::Include("ValueB".to_string()) ], rule_logic: RuleLogic::Or, cascaded: false } } })); expect!(matching_rules.rules_for_category("body")).to(be_some().value(MatchingRuleCategory { name: "body".into(), rules: hashmap!{ DocPath::new_unwrap("$.animals") => RuleList { rules: vec![ MatchingRule::MinType(1) ], rule_logic: RuleLogic::And, cascaded: false }, DocPath::new_unwrap("$.animals[*].*") => RuleList { rules: vec![ MatchingRule::Type ], rule_logic: RuleLogic::And, cascaded: false }, DocPath::new_unwrap("$.animals[*].children") => RuleList { rules: vec![ MatchingRule::MinType(1) ], rule_logic: RuleLogic::And, cascaded: false }, DocPath::new_unwrap("$.animals[*].children[*].*") => RuleList { rules: vec![ MatchingRule::Type ], rule_logic: RuleLogic::And, cascaded: false } } })); } #[test] fn correctly_loads_v3_matching_rules_with_incorrect_path_format() { let matching_rules_json = Value::from_str(r#"{"matchingRules": { "path": { "": { "matchers": [ { "match": "regex", "regex": "\\w+" } ] } } }}"#).unwrap(); let matching_rules = matchers_from_json(&matching_rules_json, &None); let matching_rules = matching_rules.unwrap(); expect!(matching_rules.rules.iter()).to_not(be_empty()); expect!(matching_rules.categories()).to(be_equal_to(hashset!{ Category::PATH })); expect!(matching_rules.rules_for_category("path")).to(be_some().value(MatchingRuleCategory { name: "path".into(), rules: hashmap! { DocPath::empty() => RuleList { rules: vec![ MatchingRule::Regex("\\w+".to_string()) ], rule_logic: RuleLogic::And, cascaded: false } } })); } speculate! { describe "generating matcher JSON" { before { let matchers = matchingrules!{ "body" => { "$.a.b" => [ MatchingRule::Type ] }, "path" => { "" => [ MatchingRule::Regex("/path/\\d+".to_string()) ] }, "query" => { "a" => [ MatchingRule::Regex("\\w+".to_string()) ] }, "header" => { "item1" => [ MatchingRule::Regex("5".to_string()) ] } }; } it "generates V2 matcher format" { expect!(matchers.to_v2_json().to_string()).to(be_equal_to( "{\"$.body.a.b\":{\"match\":\"type\"},\ \"$.header.item1\":{\"match\":\"regex\",\"regex\":\"5\"},\ \"$.path\":{\"match\":\"regex\",\"regex\":\"/path/\\\\d+\"},\ \"$.query.a\":{\"match\":\"regex\",\"regex\":\"\\\\w+\"}}" )); } it "generates V3 matcher format" { expect!(matchers.to_v3_json().to_string()).to(be_equal_to( "{\"body\":{\"$.a.b\":{\"combine\":\"AND\",\"matchers\":[{\"match\":\"type\"}]}},\ \"header\":{\"item1\":{\"combine\":\"AND\",\"matchers\":[{\"match\":\"regex\",\"regex\":\"5\"}]}},\ \"path\":{\"combine\":\"AND\",\"matchers\":[{\"match\":\"regex\",\"regex\":\"/path/\\\\d+\"}]},\ \"query\":{\"a\":{\"combine\":\"AND\",\"matchers\":[{\"match\":\"regex\",\"regex\":\"\\\\w+\"}]}}}" )); } } } #[test] fn matching_rule_from_json_test() { expect!(MatchingRule::from_json(&Value::from_str("\"test string\"").unwrap())).to(be_err()); expect!(MatchingRule::from_json(&Value::from_str("null").unwrap())).to(be_err()); expect!(MatchingRule::from_json(&Value::from_str("{}").unwrap())).to(be_err()); expect!(MatchingRule::from_json(&Value::from_str("[]").unwrap())).to(be_err()); expect!(MatchingRule::from_json(&Value::from_str("true").unwrap())).to(be_err()); expect!(MatchingRule::from_json(&Value::from_str("false").unwrap())).to(be_err()); expect!(MatchingRule::from_json(&Value::from_str("100").unwrap())).to(be_err()); expect!(MatchingRule::from_json(&Value::from_str("100.10").unwrap())).to(be_err()); expect!(MatchingRule::from_json(&Value::from_str("{\"stuff\": 100}").unwrap())).to(be_err()); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"stuff\"}").unwrap())).to(be_err()); expect!(MatchingRule::from_json(&Value::from_str("{\"regex\": \"[0-9]\"}").unwrap())).to( be_ok().value(MatchingRule::Regex("[0-9]".to_string()))); expect!(MatchingRule::from_json(&Value::from_str("{\"min\": 100}").unwrap())).to( be_ok().value(MatchingRule::MinType(100))); expect!(MatchingRule::from_json(&Value::from_str("{\"max\": 100}").unwrap())).to( be_ok().value(MatchingRule::MaxType(100))); expect!(MatchingRule::from_json(&Value::from_str("{\"timestamp\": \"yyyy\"}").unwrap())).to( be_ok().value(MatchingRule::Timestamp("yyyy".to_string()))); expect!(MatchingRule::from_json(&Value::from_str("{\"date\": \"yyyy\"}").unwrap())).to( be_ok().value(MatchingRule::Date("yyyy".to_string()))); expect!(MatchingRule::from_json(&Value::from_str("{\"time\": \"hh:mm\"}").unwrap())).to( be_ok().value(MatchingRule::Time("hh:mm".to_string()))); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"regex\", \"regex\": \"[0-9]\"}").unwrap())).to( be_ok().value(MatchingRule::Regex("[0-9]".to_string()))); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"regex\"}").unwrap())).to(be_err()); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"equality\"}").unwrap())).to( be_ok().value(MatchingRule::Equality)); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"include\", \"value\": \"A\"}").unwrap())).to( be_ok().value(MatchingRule::Include("A".to_string()))); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"include\"}").unwrap())).to(be_err()); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"type\", \"min\": 1}").unwrap())).to( be_ok().value(MatchingRule::MinType(1))); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"type\", \"max\": \"1\"}").unwrap())).to( be_ok().value(MatchingRule::MaxType(1))); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"type\", \"min\": 1, \"max\": \"1\"}").unwrap())).to( be_ok().value(MatchingRule::MinMaxType(1, 1))); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"type\"}").unwrap())).to( be_ok().value(MatchingRule::Type)); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"type\", \"value\": 100}").unwrap())).to( be_ok().value(MatchingRule::Type)); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"min\", \"min\": 1}").unwrap())).to( be_ok().value(MatchingRule::MinType(1))); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"max\", \"max\": \"1\"}").unwrap())).to( be_ok().value(MatchingRule::MaxType(1))); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"min\"}").unwrap())).to(be_err()); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"max\"}").unwrap())).to(be_err()); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"number\"}").unwrap())).to( be_ok().value(MatchingRule::Number)); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"integer\"}").unwrap())).to( be_ok().value(MatchingRule::Integer)); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"decimal\"}").unwrap())).to( be_ok().value(MatchingRule::Decimal)); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"real\"}").unwrap())).to( be_ok().value(MatchingRule::Decimal)); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"boolean\"}").unwrap())).to( be_ok().value(MatchingRule::Boolean)); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"timestamp\", \"timestamp\": \"A\"}").unwrap())).to( be_ok().value(MatchingRule::Timestamp("A".to_string()))); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"timestamp\"}").unwrap())).to(be_err()); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"time\", \"time\": \"A\"}").unwrap())).to( be_ok().value(MatchingRule::Time("A".to_string()))); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"time\"}").unwrap())).to(be_err()); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"date\", \"date\": \"A\"}").unwrap())).to( be_ok().value(MatchingRule::Date("A".to_string()))); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"date\"}").unwrap())).to(be_err()); expect!(MatchingRule::from_json(&Value::from_str("{\"match\": \"null\"}").unwrap())).to( be_ok().value(MatchingRule::Null)); let json = json!({ "match": "arrayContains", "variants": [] }); expect!(MatchingRule::from_json(&json)).to(be_ok().value(MatchingRule::ArrayContains(vec![]))); let json = json!({ "match": "arrayContains", "variants": [ { "index": 0, "rules": { "matchers": [ { "match": "equality" } ] } } ] }); expect!(MatchingRule::from_json(&json)).to(be_ok().value( MatchingRule::ArrayContains( vec![ (0, matchingrules_list! { "body"; [ MatchingRule::Equality ] }, HashMap::default()) ]) )); let json = json!({ "match": "arrayContains", "variants": [ { "index": 0, "rules": { "matchers": [ { "match": "equality" } ] }, "generators": { "a": { "type": "Uuid" } } } ] }); let generators = hashmap!{ DocPath::new_unwrap("a") => Generator::Uuid(None) }; expect!(MatchingRule::from_json(&json)).to(be_ok().value( MatchingRule::ArrayContains( vec![ (0, matchingrules_list! { "body"; [ MatchingRule::Equality ] }, generators) ]) )); let json = json!({ "match": "statusCode", "status": "success" }); expect!(MatchingRule::from_json(&json)).to(be_ok().value( MatchingRule::StatusCode(HttpStatus::Success) )); let json = json!({ "match": "statusCode", "status": [200, 201, 204] }); expect!(MatchingRule::from_json(&json)).to(be_ok().value( MatchingRule::StatusCode(HttpStatus::StatusCodes(vec![200, 201, 204])) )); } #[test] fn matching_rule_to_json_test() { expect!(MatchingRule::StatusCode(HttpStatus::ClientError).to_json()).to( be_equal_to(json!({ "match": "statusCode", "status": "clientError" }))); expect!(MatchingRule::StatusCode(HttpStatus::StatusCodes(vec![400, 401, 404])).to_json()).to( be_equal_to(json!({ "match": "statusCode", "status": [400, 401, 404] }))); } #[test] fn matcher_is_defined_returns_false_when_there_are_no_matchers() { let matchers = matchingrules!{}; expect!(matchers.matcher_is_defined("body", &vec!["$", "a", "b"])).to(be_false()); } #[test] fn matcher_is_defined_returns_false_when_the_path_does_not_have_a_matcher_entry() { let matchers = matchingrules!{ "body" => { } }; expect!(matchers.matcher_is_defined("body", &vec!["$", "a", "b"])).to(be_false()); } #[test] fn matcher_is_defined_returns_true_when_the_path_does_have_a_matcher_entry() { let matchers = matchingrules! { "body" => { "$.a.b" => [ MatchingRule::Type ] } }; expect!(matchers.matcher_is_defined("body", &vec!["$", "a", "b"])).to(be_true()); } #[test] fn matcher_is_defined_returns_false_when_the_path_is_empty() { let matchers = matchingrules! { "body" => { "$.a.b" => [ MatchingRule::Type ] } }; expect!(matchers.matcher_is_defined("body", &vec![])).to(be_false()); } #[test] fn matcher_is_defined_returns_true_when_the_parent_of_the_path_does_have_a_matcher_entry() { let matchers = matchingrules!{ "body" => { "$.a.b" => [ MatchingRule::Type ] } }; expect!(matchers.matcher_is_defined("body", &vec!["$", "a", "b", "c"])).to(be_true()); } #[test] fn wildcard_matcher_is_defined_returns_false_when_there_are_no_matchers() { let matchers = matchingrules!{}; expect!(matchers.wildcard_matcher_is_defined("body", &vec!["$", "a", "b"])).to(be_false()); } #[test] fn wildcard_matcher_is_defined_returns_false_when_the_path_does_not_have_a_matcher_entry() { let matchers = matchingrules!{ "body" => { } }; expect!(matchers.wildcard_matcher_is_defined("body", &vec!["$", "a", "b"])).to(be_false()); } #[test] fn wildcard_matcher_is_defined_returns_false_when_the_path_does_have_a_matcher_entry_and_it_is_not_a_wildcard() { let matchers = matchingrules!{ "body" => { "$.a.b" => [ MatchingRule::Type ], "$.*" => [ MatchingRule::Type ] } }; expect!(matchers.wildcard_matcher_is_defined("body", &vec!["$", "a", "b"])).to(be_false()); } #[test] fn wildcard_matcher_is_defined_returns_true_when_the_path_does_have_a_matcher_entry_and_it_is_a_widcard() { let matchers = matchingrules!{ "body" => { "$.a.*" => [ MatchingRule::Type ] } }; expect!(matchers.wildcard_matcher_is_defined("body", &vec!["$", "a", "b"])).to(be_true()); } #[test] fn wildcard_matcher_is_defined_returns_false_when_the_parent_of_the_path_does_have_a_matcher_entry() { let matchers = matchingrules!{ "body" => { "$.a.*" => [ MatchingRule::Type ] } }; expect!(matchers.wildcard_matcher_is_defined("body", &vec!["$", "a", "b", "c"])).to(be_false()); } #[test] fn min_and_max_values_get_serialised_to_json_as_numbers() { expect!(MatchingRule::MinType(1).to_json().to_string()).to(be_equal_to("{\"match\":\"type\",\"min\":1}")); expect!(MatchingRule::MaxType(1).to_json().to_string()).to(be_equal_to("{\"match\":\"type\",\"max\":1}")); expect!(MatchingRule::MinMaxType(1, 10).to_json().to_string()).to(be_equal_to("{\"match\":\"type\",\"max\":10,\"min\":1}")); } }
36.665846
166
0.603007
db4a1701e976b9ec4f9ff1506c05265986157675
25,471
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![allow(non_snake_case)] register_long_diagnostics! { E0373: r##" This error occurs when an attempt is made to use data captured by a closure, when that data may no longer exist. It's most commonly seen when attempting to return a closure: ```compile_fail,E0373 fn foo() -> Box<Fn(u32) -> u32> { let x = 0u32; Box::new(|y| x + y) } ``` Notice that `x` is stack-allocated by `foo()`. By default, Rust captures closed-over data by reference. This means that once `foo()` returns, `x` no longer exists. An attempt to access `x` within the closure would thus be unsafe. Another situation where this might be encountered is when spawning threads: ```compile_fail,E0373 fn foo() { let x = 0u32; let y = 1u32; let thr = std::thread::spawn(|| { x + y }); } ``` Since our new thread runs in parallel, the stack frame containing `x` and `y` may well have disappeared by the time we try to use them. Even if we call `thr.join()` within foo (which blocks until `thr` has completed, ensuring the stack frame won't disappear), we will not succeed: the compiler cannot prove that this behaviour is safe, and so won't let us do it. The solution to this problem is usually to switch to using a `move` closure. This approach moves (or copies, where possible) data into the closure, rather than taking references to it. For example: ``` fn foo() -> Box<Fn(u32) -> u32> { let x = 0u32; Box::new(move |y| x + y) } ``` Now that the closure has its own copy of the data, there's no need to worry about safety. "##, E0381: r##" It is not allowed to use or capture an uninitialized variable. For example: ```compile_fail,E0381 fn main() { let x: i32; let y = x; // error, use of possibly uninitialized variable } ``` To fix this, ensure that any declared variables are initialized before being used. Example: ``` fn main() { let x: i32 = 0; let y = x; // ok! } ``` "##, E0382: r##" This error occurs when an attempt is made to use a variable after its contents have been moved elsewhere. For example: ```compile_fail,E0382 struct MyStruct { s: u32 } fn main() { let mut x = MyStruct{ s: 5u32 }; let y = x; x.s = 6; println!("{}", x.s); } ``` Since `MyStruct` is a type that is not marked `Copy`, the data gets moved out of `x` when we set `y`. This is fundamental to Rust's ownership system: outside of workarounds like `Rc`, a value cannot be owned by more than one variable. If we own the type, the easiest way to address this problem is to implement `Copy` and `Clone` on it, as shown below. This allows `y` to copy the information in `x`, while leaving the original version owned by `x`. Subsequent changes to `x` will not be reflected when accessing `y`. ``` #[derive(Copy, Clone)] struct MyStruct { s: u32 } fn main() { let mut x = MyStruct{ s: 5u32 }; let y = x; x.s = 6; println!("{}", x.s); } ``` Alternatively, if we don't control the struct's definition, or mutable shared ownership is truly required, we can use `Rc` and `RefCell`: ``` use std::cell::RefCell; use std::rc::Rc; struct MyStruct { s: u32 } fn main() { let mut x = Rc::new(RefCell::new(MyStruct{ s: 5u32 })); let y = x.clone(); x.borrow_mut().s = 6; println!("{}", x.borrow().s); } ``` With this approach, x and y share ownership of the data via the `Rc` (reference count type). `RefCell` essentially performs runtime borrow checking: ensuring that at most one writer or multiple readers can access the data at any one time. If you wish to learn more about ownership in Rust, start with the chapter in the Book: https://doc.rust-lang.org/book/ownership.html "##, E0383: r##" This error occurs when an attempt is made to partially reinitialize a structure that is currently uninitialized. For example, this can happen when a drop has taken place: ```ignore struct Foo { a: u32, } let mut x = Foo { a: 1 }; drop(x); // `x` is now uninitialized x.a = 2; // error, partial reinitialization of uninitialized structure `t` ``` This error can be fixed by fully reinitializing the structure in question: ``` struct Foo { a: u32, } let mut x = Foo { a: 1 }; drop(x); x = Foo { a: 2 }; ``` "##, E0384: r##" This error occurs when an attempt is made to reassign an immutable variable. For example: ```compile_fail,E0384 fn main() { let x = 3; x = 5; // error, reassignment of immutable variable } ``` By default, variables in Rust are immutable. To fix this error, add the keyword `mut` after the keyword `let` when declaring the variable. For example: ``` fn main() { let mut x = 3; x = 5; } ``` "##, E0386: r##" This error occurs when an attempt is made to mutate the target of a mutable reference stored inside an immutable container. For example, this can happen when storing a `&mut` inside an immutable `Box`: ```compile_fail,E0386 let mut x: i64 = 1; let y: Box<_> = Box::new(&mut x); **y = 2; // error, cannot assign to data in an immutable container ``` This error can be fixed by making the container mutable: ``` let mut x: i64 = 1; let mut y: Box<_> = Box::new(&mut x); **y = 2; ``` It can also be fixed by using a type with interior mutability, such as `Cell` or `RefCell`: ``` use std::cell::Cell; let x: i64 = 1; let y: Box<Cell<_>> = Box::new(Cell::new(x)); y.set(2); ``` "##, E0387: r##" This error occurs when an attempt is made to mutate or mutably reference data that a closure has captured immutably. Examples of this error are shown below: ```compile_fail,E0387 // Accepts a function or a closure that captures its environment immutably. // Closures passed to foo will not be able to mutate their closed-over state. fn foo<F: Fn()>(f: F) { } // Attempts to mutate closed-over data. Error message reads: // `cannot assign to data in a captured outer variable...` fn mutable() { let mut x = 0u32; foo(|| x = 2); } // Attempts to take a mutable reference to closed-over data. Error message // reads: `cannot borrow data mutably in a captured outer variable...` fn mut_addr() { let mut x = 0u32; foo(|| { let y = &mut x; }); } ``` The problem here is that foo is defined as accepting a parameter of type `Fn`. Closures passed into foo will thus be inferred to be of type `Fn`, meaning that they capture their context immutably. If the definition of `foo` is under your control, the simplest solution is to capture the data mutably. This can be done by defining `foo` to take FnMut rather than Fn: ``` fn foo<F: FnMut()>(f: F) { } ``` Alternatively, we can consider using the `Cell` and `RefCell` types to achieve interior mutability through a shared reference. Our example's `mutable` function could be redefined as below: ``` use std::cell::Cell; fn foo<F: Fn()>(f: F) { } fn mutable() { let x = Cell::new(0u32); foo(|| x.set(2)); } ``` You can read more about cell types in the API documentation: https://doc.rust-lang.org/std/cell/ "##, E0388: r##" E0388 was removed and is no longer issued. "##, E0389: r##" An attempt was made to mutate data using a non-mutable reference. This commonly occurs when attempting to assign to a non-mutable reference of a mutable reference (`&(&mut T)`). Example of erroneous code: ```compile_fail,E0389 struct FancyNum { num: u8, } fn main() { let mut fancy = FancyNum{ num: 5 }; let fancy_ref = &(&mut fancy); fancy_ref.num = 6; // error: cannot assign to data in a `&` reference println!("{}", fancy_ref.num); } ``` Here, `&mut fancy` is mutable, but `&(&mut fancy)` is not. Creating an immutable reference to a value borrows it immutably. There can be multiple references of type `&(&mut T)` that point to the same value, so they must be immutable to prevent multiple mutable references to the same value. To fix this, either remove the outer reference: ``` struct FancyNum { num: u8, } fn main() { let mut fancy = FancyNum{ num: 5 }; let fancy_ref = &mut fancy; // `fancy_ref` is now &mut FancyNum, rather than &(&mut FancyNum) fancy_ref.num = 6; // No error! println!("{}", fancy_ref.num); } ``` Or make the outer reference mutable: ``` struct FancyNum { num: u8 } fn main() { let mut fancy = FancyNum{ num: 5 }; let fancy_ref = &mut (&mut fancy); // `fancy_ref` is now &mut(&mut FancyNum), rather than &(&mut FancyNum) fancy_ref.num = 6; // No error! println!("{}", fancy_ref.num); } ``` "##, E0499: r##" A variable was borrowed as mutable more than once. Erroneous code example: ```compile_fail,E0499 let mut i = 0; let mut x = &mut i; let mut a = &mut i; // error: cannot borrow `i` as mutable more than once at a time ``` Please note that in rust, you can either have many immutable references, or one mutable reference. Take a look at https://doc.rust-lang.org/stable/book/references-and-borrowing.html for more information. Example: ``` let mut i = 0; let mut x = &mut i; // ok! // or: let mut i = 0; let a = &i; // ok! let b = &i; // still ok! let c = &i; // still ok! ``` "##, E0500: r##" A borrowed variable was used in another closure. Example of erroneous code: ```compile_fail fn you_know_nothing(jon_snow: &mut i32) { let nights_watch = || { *jon_snow = 2; }; let starks = || { *jon_snow = 3; // error: closure requires unique access to `jon_snow` // but it is already borrowed }; } ``` In here, `jon_snow` is already borrowed by the `nights_watch` closure, so it cannot be borrowed by the `starks` closure at the same time. To fix this issue, you can put the closure in its own scope: ``` fn you_know_nothing(jon_snow: &mut i32) { { let nights_watch = || { *jon_snow = 2; }; } // At this point, `jon_snow` is free. let starks = || { *jon_snow = 3; }; } ``` Or, if the type implements the `Clone` trait, you can clone it between closures: ``` fn you_know_nothing(jon_snow: &mut i32) { let mut jon_copy = jon_snow.clone(); let nights_watch = || { jon_copy = 2; }; let starks = || { *jon_snow = 3; }; } ``` "##, E0501: r##" This error indicates that a mutable variable is being used while it is still captured by a closure. Because the closure has borrowed the variable, it is not available for use until the closure goes out of scope. Note that a capture will either move or borrow a variable, but in this situation, the closure is borrowing the variable. Take a look at http://rustbyexample.com/fn/closures/capture.html for more information about capturing. Example of erroneous code: ```compile_fail,E0501 fn inside_closure(x: &mut i32) { // Actions which require unique access } fn outside_closure(x: &mut i32) { // Actions which require unique access } fn foo(a: &mut i32) { let bar = || { inside_closure(a) }; outside_closure(a); // error: cannot borrow `*a` as mutable because previous // closure requires unique access. } ``` To fix this error, you can place the closure in its own scope: ``` fn inside_closure(x: &mut i32) {} fn outside_closure(x: &mut i32) {} fn foo(a: &mut i32) { { let bar = || { inside_closure(a) }; } // borrow on `a` ends. outside_closure(a); // ok! } ``` Or you can pass the variable as a parameter to the closure: ``` fn inside_closure(x: &mut i32) {} fn outside_closure(x: &mut i32) {} fn foo(a: &mut i32) { let bar = |s: &mut i32| { inside_closure(s) }; outside_closure(a); bar(a); } ``` It may be possible to define the closure later: ``` fn inside_closure(x: &mut i32) {} fn outside_closure(x: &mut i32) {} fn foo(a: &mut i32) { outside_closure(a); let bar = || { inside_closure(a) }; } ``` "##, E0502: r##" This error indicates that you are trying to borrow a variable as mutable when it has already been borrowed as immutable. Example of erroneous code: ```compile_fail,E0502 fn bar(x: &mut i32) {} fn foo(a: &mut i32) { let ref y = a; // a is borrowed as immutable. bar(a); // error: cannot borrow `*a` as mutable because `a` is also borrowed // as immutable } ``` To fix this error, ensure that you don't have any other references to the variable before trying to access it mutably: ``` fn bar(x: &mut i32) {} fn foo(a: &mut i32) { bar(a); let ref y = a; // ok! } ``` For more information on the rust ownership system, take a look at https://doc.rust-lang.org/stable/book/references-and-borrowing.html. "##, E0503: r##" A value was used after it was mutably borrowed. Example of erroneous code: ```compile_fail,E0503 fn main() { let mut value = 3; // Create a mutable borrow of `value`. This borrow // lives until the end of this function. let _borrow = &mut value; let _sum = value + 1; // error: cannot use `value` because // it was mutably borrowed } ``` In this example, `value` is mutably borrowed by `borrow` and cannot be used to calculate `sum`. This is not possible because this would violate Rust's mutability rules. You can fix this error by limiting the scope of the borrow: ``` fn main() { let mut value = 3; // By creating a new block, you can limit the scope // of the reference. { let _borrow = &mut value; // Use `_borrow` inside this block. } // The block has ended and with it the borrow. // You can now use `value` again. let _sum = value + 1; } ``` Or by cloning `value` before borrowing it: ``` fn main() { let mut value = 3; // We clone `value`, creating a copy. let value_cloned = value.clone(); // The mutable borrow is a reference to `value` and // not to `value_cloned`... let _borrow = &mut value; // ... which means we can still use `value_cloned`, let _sum = value_cloned + 1; // even though the borrow only ends here. } ``` You can find more information about borrowing in the rust-book: http://doc.rust-lang.org/stable/book/references-and-borrowing.html "##, E0504: r##" This error occurs when an attempt is made to move a borrowed variable into a closure. Example of erroneous code: ```compile_fail,E0504 struct FancyNum { num: u8, } fn main() { let fancy_num = FancyNum { num: 5 }; let fancy_ref = &fancy_num; let x = move || { println!("child function: {}", fancy_num.num); // error: cannot move `fancy_num` into closure because it is borrowed }; x(); println!("main function: {}", fancy_ref.num); } ``` Here, `fancy_num` is borrowed by `fancy_ref` and so cannot be moved into the closure `x`. There is no way to move a value into a closure while it is borrowed, as that would invalidate the borrow. If the closure can't outlive the value being moved, try using a reference rather than moving: ``` struct FancyNum { num: u8, } fn main() { let fancy_num = FancyNum { num: 5 }; let fancy_ref = &fancy_num; let x = move || { // fancy_ref is usable here because it doesn't move `fancy_num` println!("child function: {}", fancy_ref.num); }; x(); println!("main function: {}", fancy_num.num); } ``` If the value has to be borrowed and then moved, try limiting the lifetime of the borrow using a scoped block: ``` struct FancyNum { num: u8, } fn main() { let fancy_num = FancyNum { num: 5 }; { let fancy_ref = &fancy_num; println!("main function: {}", fancy_ref.num); // `fancy_ref` goes out of scope here } let x = move || { // `fancy_num` can be moved now (no more references exist) println!("child function: {}", fancy_num.num); }; x(); } ``` If the lifetime of a reference isn't enough, such as in the case of threading, consider using an `Arc` to create a reference-counted value: ``` use std::sync::Arc; use std::thread; struct FancyNum { num: u8, } fn main() { let fancy_ref1 = Arc::new(FancyNum { num: 5 }); let fancy_ref2 = fancy_ref1.clone(); let x = thread::spawn(move || { // `fancy_ref1` can be moved and has a `'static` lifetime println!("child thread: {}", fancy_ref1.num); }); x.join().expect("child thread should finish"); println!("main thread: {}", fancy_ref2.num); } ``` "##, E0505: r##" A value was moved out while it was still borrowed. Erroneous code example: ```compile_fail,E0505 struct Value {} fn eat(val: Value) {} fn main() { let x = Value{}; { let _ref_to_val: &Value = &x; eat(x); } } ``` Here, the function `eat` takes the ownership of `x`. However, `x` cannot be moved because it was borrowed to `_ref_to_val`. To fix that you can do few different things: * Try to avoid moving the variable. * Release borrow before move. * Implement the `Copy` trait on the type. Examples: ``` struct Value {} fn eat(val: &Value) {} fn main() { let x = Value{}; { let _ref_to_val: &Value = &x; eat(&x); // pass by reference, if it's possible } } ``` Or: ``` struct Value {} fn eat(val: Value) {} fn main() { let x = Value{}; { let _ref_to_val: &Value = &x; } eat(x); // release borrow and then move it. } ``` Or: ``` #[derive(Clone, Copy)] // implement Copy trait struct Value {} fn eat(val: Value) {} fn main() { let x = Value{}; { let _ref_to_val: &Value = &x; eat(x); // it will be copied here. } } ``` You can find more information about borrowing in the rust-book: http://doc.rust-lang.org/stable/book/references-and-borrowing.html "##, E0506: r##" This error occurs when an attempt is made to assign to a borrowed value. Example of erroneous code: ```compile_fail,E0506 struct FancyNum { num: u8, } fn main() { let mut fancy_num = FancyNum { num: 5 }; let fancy_ref = &fancy_num; fancy_num = FancyNum { num: 6 }; // error: cannot assign to `fancy_num` because it is borrowed println!("Num: {}, Ref: {}", fancy_num.num, fancy_ref.num); } ``` Because `fancy_ref` still holds a reference to `fancy_num`, `fancy_num` can't be assigned to a new value as it would invalidate the reference. Alternatively, we can move out of `fancy_num` into a second `fancy_num`: ``` struct FancyNum { num: u8, } fn main() { let mut fancy_num = FancyNum { num: 5 }; let moved_num = fancy_num; fancy_num = FancyNum { num: 6 }; println!("Num: {}, Moved num: {}", fancy_num.num, moved_num.num); } ``` If the value has to be borrowed, try limiting the lifetime of the borrow using a scoped block: ``` struct FancyNum { num: u8, } fn main() { let mut fancy_num = FancyNum { num: 5 }; { let fancy_ref = &fancy_num; println!("Ref: {}", fancy_ref.num); } // Works because `fancy_ref` is no longer in scope fancy_num = FancyNum { num: 6 }; println!("Num: {}", fancy_num.num); } ``` Or by moving the reference into a function: ``` struct FancyNum { num: u8, } fn main() { let mut fancy_num = FancyNum { num: 5 }; print_fancy_ref(&fancy_num); // Works because function borrow has ended fancy_num = FancyNum { num: 6 }; println!("Num: {}", fancy_num.num); } fn print_fancy_ref(fancy_ref: &FancyNum){ println!("Ref: {}", fancy_ref.num); } ``` "##, E0507: r##" You tried to move out of a value which was borrowed. Erroneous code example: ```compile_fail,E0507 use std::cell::RefCell; struct TheDarkKnight; impl TheDarkKnight { fn nothing_is_true(self) {} } fn main() { let x = RefCell::new(TheDarkKnight); x.borrow().nothing_is_true(); // error: cannot move out of borrowed content } ``` Here, the `nothing_is_true` method takes the ownership of `self`. However, `self` cannot be moved because `.borrow()` only provides an `&TheDarkKnight`, which is a borrow of the content owned by the `RefCell`. To fix this error, you have three choices: * Try to avoid moving the variable. * Somehow reclaim the ownership. * Implement the `Copy` trait on the type. Examples: ``` use std::cell::RefCell; struct TheDarkKnight; impl TheDarkKnight { fn nothing_is_true(&self) {} // First case, we don't take ownership } fn main() { let x = RefCell::new(TheDarkKnight); x.borrow().nothing_is_true(); // ok! } ``` Or: ``` use std::cell::RefCell; struct TheDarkKnight; impl TheDarkKnight { fn nothing_is_true(self) {} } fn main() { let x = RefCell::new(TheDarkKnight); let x = x.into_inner(); // we get back ownership x.nothing_is_true(); // ok! } ``` Or: ``` use std::cell::RefCell; #[derive(Clone, Copy)] // we implement the Copy trait struct TheDarkKnight; impl TheDarkKnight { fn nothing_is_true(self) {} } fn main() { let x = RefCell::new(TheDarkKnight); x.borrow().nothing_is_true(); // ok! } ``` Moving out of a member of a mutably borrowed struct is fine if you put something back. `mem::replace` can be used for that: ```ignore struct TheDarkKnight; impl TheDarkKnight { fn nothing_is_true(self) {} } struct Batcave { knight: TheDarkKnight } fn main() { use std::mem; let mut cave = Batcave { knight: TheDarkKnight }; let borrowed = &mut cave; borrowed.knight.nothing_is_true(); // E0507 mem::replace(&mut borrowed.knight, TheDarkKnight).nothing_is_true(); // ok! } ``` You can find more information about borrowing in the rust-book: http://doc.rust-lang.org/stable/book/references-and-borrowing.html "##, E0508: r##" A value was moved out of a non-copy fixed-size array. Example of erroneous code: ```compile_fail,E0508 struct NonCopy; fn main() { let array = [NonCopy; 1]; let _value = array[0]; // error: cannot move out of type `[NonCopy; 1]`, // a non-copy fixed-size array } ``` The first element was moved out of the array, but this is not possible because `NonCopy` does not implement the `Copy` trait. Consider borrowing the element instead of moving it: ``` struct NonCopy; fn main() { let array = [NonCopy; 1]; let _value = &array[0]; // Borrowing is allowed, unlike moving. } ``` Alternatively, if your type implements `Clone` and you need to own the value, consider borrowing and then cloning: ``` #[derive(Clone)] struct NonCopy; fn main() { let array = [NonCopy; 1]; // Now you can clone the array element. let _value = array[0].clone(); } ``` "##, E0509: r##" This error occurs when an attempt is made to move out of a value whose type implements the `Drop` trait. Example of erroneous code: ```compile_fail,E0509 struct FancyNum { num: usize } struct DropStruct { fancy: FancyNum } impl Drop for DropStruct { fn drop(&mut self) { // Destruct DropStruct, possibly using FancyNum } } fn main() { let drop_struct = DropStruct{fancy: FancyNum{num: 5}}; let fancy_field = drop_struct.fancy; // Error E0509 println!("Fancy: {}", fancy_field.num); // implicit call to `drop_struct.drop()` as drop_struct goes out of scope } ``` Here, we tried to move a field out of a struct of type `DropStruct` which implements the `Drop` trait. However, a struct cannot be dropped if one or more of its fields have been moved. Structs implementing the `Drop` trait have an implicit destructor that gets called when they go out of scope. This destructor may use the fields of the struct, so moving out of the struct could make it impossible to run the destructor. Therefore, we must think of all values whose type implements the `Drop` trait as single units whose fields cannot be moved. This error can be fixed by creating a reference to the fields of a struct, enum, or tuple using the `ref` keyword: ``` struct FancyNum { num: usize } struct DropStruct { fancy: FancyNum } impl Drop for DropStruct { fn drop(&mut self) { // Destruct DropStruct, possibly using FancyNum } } fn main() { let drop_struct = DropStruct{fancy: FancyNum{num: 5}}; let ref fancy_field = drop_struct.fancy; // No more errors! println!("Fancy: {}", fancy_field.num); // implicit call to `drop_struct.drop()` as drop_struct goes out of scope } ``` Note that this technique can also be used in the arms of a match expression: ``` struct FancyNum { num: usize } enum DropEnum { Fancy(FancyNum) } impl Drop for DropEnum { fn drop(&mut self) { // Destruct DropEnum, possibly using FancyNum } } fn main() { // Creates and enum of type `DropEnum`, which implements `Drop` let drop_enum = DropEnum::Fancy(FancyNum{num: 10}); match drop_enum { // Creates a reference to the inside of `DropEnum::Fancy` DropEnum::Fancy(ref fancy_field) => // No error! println!("It was fancy-- {}!", fancy_field.num), } // implicit call to `drop_enum.drop()` as drop_enum goes out of scope } ``` "##, } register_diagnostics! { E0385, // {} in an aliasable location E0524, // two closures require unique access to `..` at the same time }
22.681211
80
0.654195
e2d997a93fe0410b980c68d67c51c71a9fb0bdfe
18,806
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![allow(non_camel_case_types)] //! Validates all used crates and extern libraries and loads their metadata use back::svh::Svh; use driver::session::Session; use driver::{driver, config}; use metadata::cstore; use metadata::cstore::{CStore, CrateSource}; use metadata::decoder; use metadata::loader; use metadata::loader::CratePaths; use plugin::load::PluginMetadata; use std::rc::Rc; use std::collections::HashMap; use std::collections::hashmap::{Occupied, Vacant}; use syntax::ast; use syntax::abi; use syntax::attr; use syntax::attr::AttrMetaMethods; use syntax::codemap::{Span}; use syntax::diagnostic::SpanHandler; use syntax::parse::token::InternedString; use syntax::parse::token; use syntax::visit; use util::fs; struct Env<'a> { sess: &'a Session, next_crate_num: ast::CrateNum, } // Traverses an AST, reading all the information about use'd crates and extern // libraries necessary for later resolving, typechecking, linking, etc. pub fn read_crates(sess: &Session, krate: &ast::Crate) { let mut e = Env { sess: sess, next_crate_num: sess.cstore.next_crate_num(), }; visit_crate(&e, krate); visit::walk_crate(&mut e, krate); dump_crates(&sess.cstore); warn_if_multiple_versions(sess.diagnostic(), &sess.cstore) } impl<'a, 'v> visit::Visitor<'v> for Env<'a> { fn visit_view_item(&mut self, a: &ast::ViewItem) { visit_view_item(self, a); visit::walk_view_item(self, a); } fn visit_item(&mut self, a: &ast::Item) { visit_item(self, a); visit::walk_item(self, a); } } fn dump_crates(cstore: &CStore) { debug!("resolved crates:"); cstore.iter_crate_data_origins(|_, data, opt_source| { debug!(" name: {}", data.name()); debug!(" cnum: {}", data.cnum); debug!(" hash: {}", data.hash()); opt_source.map(|cs| { let CrateSource { dylib, rlib, cnum: _ } = cs; dylib.map(|dl| debug!(" dylib: {}", dl.display())); rlib.map(|rl| debug!(" rlib: {}", rl.display())); }); }) } fn warn_if_multiple_versions(diag: &SpanHandler, cstore: &CStore) { let mut map = HashMap::new(); cstore.iter_crate_data(|cnum, data| { match map.entry(data.name()) { Vacant(entry) => { entry.set(vec![cnum]); }, Occupied(mut entry) => { entry.get_mut().push(cnum); }, } }); for (name, dupes) in map.into_iter() { if dupes.len() == 1 { continue } diag.handler().warn( format!("using multiple versions of crate `{}`", name).as_slice()); for dupe in dupes.into_iter() { let data = cstore.get_crate_data(dupe); diag.span_note(data.span, "used here"); loader::note_crate_name(diag, data.name().as_slice()); } } } fn visit_crate(e: &Env, c: &ast::Crate) { for a in c.attrs.iter().filter(|m| m.name().equiv(&("link_args"))) { match a.value_str() { Some(ref linkarg) => e.sess.cstore.add_used_link_args(linkarg.get()), None => { /* fallthrough */ } } } } fn should_link(i: &ast::ViewItem) -> bool { i.attrs.iter().all(|attr| { attr.name().get() != "phase" || attr.meta_item_list().map_or(false, |phases| { attr::contains_name(phases.as_slice(), "link") }) }) } fn visit_view_item(e: &mut Env, i: &ast::ViewItem) { if !should_link(i) { return; } match extract_crate_info(e, i) { Some(info) => { let (cnum, _, _) = resolve_crate(e, &None, info.ident.as_slice(), info.name.as_slice(), None, i.span); e.sess.cstore.add_extern_mod_stmt_cnum(info.id, cnum); } None => () } } struct CrateInfo { ident: String, name: String, id: ast::NodeId, should_link: bool, } fn extract_crate_info(e: &Env, i: &ast::ViewItem) -> Option<CrateInfo> { match i.node { ast::ViewItemExternCrate(ident, ref path_opt, id) => { let ident = token::get_ident(ident); debug!("resolving extern crate stmt. ident: {} path_opt: {}", ident, path_opt); let name = match *path_opt { Some((ref path_str, _)) => { let name = path_str.get().to_string(); validate_crate_name(Some(e.sess), name.as_slice(), Some(i.span)); name } None => ident.get().to_string(), }; Some(CrateInfo { ident: ident.get().to_string(), name: name, id: id, should_link: should_link(i), }) } _ => None } } pub fn validate_crate_name(sess: Option<&Session>, s: &str, sp: Option<Span>) { let err = |s: &str| { match (sp, sess) { (_, None) => fail!("{}", s), (Some(sp), Some(sess)) => sess.span_err(sp, s), (None, Some(sess)) => sess.err(s), } }; if s.len() == 0 { err("crate name must not be empty"); } for c in s.chars() { if c.is_alphanumeric() { continue } if c == '_' || c == '-' { continue } err(format!("invalid character `{}` in crate name: `{}`", c, s).as_slice()); } match sess { Some(sess) => sess.abort_if_errors(), None => {} } } fn visit_item(e: &Env, i: &ast::Item) { match i.node { ast::ItemForeignMod(ref fm) => { if fm.abi == abi::Rust || fm.abi == abi::RustIntrinsic { return; } // First, add all of the custom link_args attributes let link_args = i.attrs.iter() .filter_map(|at| if at.name().equiv(&("link_args")) { Some(at) } else { None }) .collect::<Vec<&ast::Attribute>>(); for m in link_args.iter() { match m.value_str() { Some(linkarg) => e.sess.cstore.add_used_link_args(linkarg.get()), None => { /* fallthrough */ } } } // Next, process all of the #[link(..)]-style arguments let link_args = i.attrs.iter() .filter_map(|at| if at.name().equiv(&("link")) { Some(at) } else { None }) .collect::<Vec<&ast::Attribute>>(); for m in link_args.iter() { match m.meta_item_list() { Some(items) => { let kind = items.iter().find(|k| { k.name().equiv(&("kind")) }).and_then(|a| a.value_str()); let kind = match kind { Some(k) => { if k.equiv(&("static")) { cstore::NativeStatic } else if (e.sess.targ_cfg.os == abi::OsMacos || e.sess.targ_cfg.os == abi::OsiOS) && k.equiv(&("framework")) { cstore::NativeFramework } else if k.equiv(&("framework")) { e.sess.span_err(m.span, "native frameworks are only available \ on OSX targets"); cstore::NativeUnknown } else { e.sess.span_err(m.span, format!("unknown kind: `{}`", k).as_slice()); cstore::NativeUnknown } } None => cstore::NativeUnknown }; let n = items.iter().find(|n| { n.name().equiv(&("name")) }).and_then(|a| a.value_str()); let n = match n { Some(n) => n, None => { e.sess.span_err(m.span, "#[link(...)] specified without \ `name = \"foo\"`"); InternedString::new("foo") } }; if n.get().is_empty() { e.sess.span_err(m.span, "#[link(name = \"\")] given with \ empty name"); } else { e.sess .cstore .add_used_library(n.get().to_string(), kind); } } None => {} } } } _ => { } } } fn existing_match(e: &Env, name: &str, hash: Option<&Svh>) -> Option<ast::CrateNum> { let mut ret = None; e.sess.cstore.iter_crate_data(|cnum, data| { if data.name.as_slice() != name { return } match hash { Some(hash) if *hash == data.hash() => { ret = Some(cnum); return } Some(..) => return, None => {} } // When the hash is None we're dealing with a top-level dependency in // which case we may have a specification on the command line for this // library. Even though an upstream library may have loaded something of // the same name, we have to make sure it was loaded from the exact same // location as well. // // We're also sure to compare *paths*, not actual byte slices. The // `source` stores paths which are normalized which may be different // from the strings on the command line. let source = e.sess.cstore.get_used_crate_source(cnum).unwrap(); match e.sess.opts.externs.find_equiv(&name) { Some(locs) => { let found = locs.iter().any(|l| { let l = fs::realpath(&Path::new(l.as_slice())).ok(); l == source.dylib || l == source.rlib }); if found { ret = Some(cnum); } } None => ret = Some(cnum), } }); return ret; } fn register_crate<'a>(e: &mut Env, root: &Option<CratePaths>, ident: &str, name: &str, span: Span, lib: loader::Library) -> (ast::CrateNum, Rc<cstore::crate_metadata>, cstore::CrateSource) { // Claim this crate number and cache it let cnum = e.next_crate_num; e.next_crate_num += 1; // Stash paths for top-most crate locally if necessary. let crate_paths = if root.is_none() { Some(CratePaths { ident: ident.to_string(), dylib: lib.dylib.clone(), rlib: lib.rlib.clone(), }) } else { None }; // Maintain a reference to the top most crate. let root = if root.is_some() { root } else { &crate_paths }; let cnum_map = resolve_crate_deps(e, root, lib.metadata.as_slice(), span); let loader::Library{ dylib, rlib, metadata } = lib; let cmeta = Rc::new( cstore::crate_metadata { name: name.to_string(), data: metadata, cnum_map: cnum_map, cnum: cnum, span: span, }); let source = cstore::CrateSource { dylib: dylib, rlib: rlib, cnum: cnum, }; e.sess.cstore.set_crate_data(cnum, cmeta.clone()); e.sess.cstore.add_used_crate_source(source.clone()); (cnum, cmeta, source) } fn resolve_crate<'a>(e: &mut Env, root: &Option<CratePaths>, ident: &str, name: &str, hash: Option<&Svh>, span: Span) -> (ast::CrateNum, Rc<cstore::crate_metadata>, cstore::CrateSource) { match existing_match(e, name, hash) { None => { let mut load_ctxt = loader::Context { sess: e.sess, span: span, ident: ident, crate_name: name, hash: hash.map(|a| &*a), filesearch: e.sess.target_filesearch(), os: e.sess.targ_cfg.os, triple: e.sess.targ_cfg.target_strs.target_triple.as_slice(), root: root, rejected_via_hash: vec!(), rejected_via_triple: vec!(), should_match_name: true, }; let library = load_ctxt.load_library_crate(); register_crate(e, root, ident, name, span, library) } Some(cnum) => (cnum, e.sess.cstore.get_crate_data(cnum), e.sess.cstore.get_used_crate_source(cnum).unwrap()) } } // Go through the crate metadata and load any crates that it references fn resolve_crate_deps(e: &mut Env, root: &Option<CratePaths>, cdata: &[u8], span : Span) -> cstore::cnum_map { debug!("resolving deps of external crate"); // The map from crate numbers in the crate we're resolving to local crate // numbers decoder::get_crate_deps(cdata).iter().map(|dep| { debug!("resolving dep crate {} hash: `{}`", dep.name, dep.hash); let (local_cnum, _, _) = resolve_crate(e, root, dep.name.as_slice(), dep.name.as_slice(), Some(&dep.hash), span); (dep.cnum, local_cnum) }).collect() } pub struct PluginMetadataReader<'a> { env: Env<'a>, } impl<'a> PluginMetadataReader<'a> { pub fn new(sess: &'a Session) -> PluginMetadataReader<'a> { PluginMetadataReader { env: Env { sess: sess, next_crate_num: sess.cstore.next_crate_num(), } } } pub fn read_plugin_metadata(&mut self, krate: &ast::ViewItem) -> PluginMetadata { let info = extract_crate_info(&self.env, krate).unwrap(); let target_triple = self.env.sess.targ_cfg.target_strs.target_triple.as_slice(); let is_cross = target_triple != driver::host_triple(); let mut should_link = info.should_link && !is_cross; let os = config::get_os(driver::host_triple()).unwrap(); let mut load_ctxt = loader::Context { sess: self.env.sess, span: krate.span, ident: info.ident.as_slice(), crate_name: info.name.as_slice(), hash: None, filesearch: self.env.sess.host_filesearch(), triple: driver::host_triple(), os: os, root: &None, rejected_via_hash: vec!(), rejected_via_triple: vec!(), should_match_name: true, }; let library = match load_ctxt.maybe_load_library_crate() { Some(l) => l, None if is_cross => { // try loading from target crates (only valid if there are // no syntax extensions) load_ctxt.triple = target_triple; load_ctxt.os = self.env.sess.targ_cfg.os; load_ctxt.filesearch = self.env.sess.target_filesearch(); let lib = load_ctxt.load_library_crate(); if decoder::get_plugin_registrar_fn(lib.metadata.as_slice()).is_some() { let message = format!("crate `{}` contains a plugin_registrar fn but \ only a version for triple `{}` could be found (need {})", info.ident, target_triple, driver::host_triple()); self.env.sess.span_err(krate.span, message.as_slice()); // need to abort now because the syntax expansion // code will shortly attempt to load and execute // code from the found library. self.env.sess.abort_if_errors(); } should_link = info.should_link; lib } None => { load_ctxt.report_load_errs(); unreachable!() }, }; let macros = decoder::get_exported_macros(library.metadata.as_slice()); let registrar = decoder::get_plugin_registrar_fn(library.metadata.as_slice()).map(|id| { decoder::get_symbol(library.metadata.as_slice(), id) }); if library.dylib.is_none() && registrar.is_some() { let message = format!("plugin crate `{}` only found in rlib format, \ but must be available in dylib format", info.ident); self.env.sess.span_err(krate.span, message.as_slice()); // No need to abort because the loading code will just ignore this // empty dylib. } let pc = PluginMetadata { lib: library.dylib.clone(), macros: macros, registrar_symbol: registrar, }; if should_link && existing_match(&self.env, info.name.as_slice(), None).is_none() { // register crate now to avoid double-reading metadata register_crate(&mut self.env, &None, info.ident.as_slice(), info.name.as_slice(), krate.span, library); } pc } }
37.166008
96
0.481761
2295cd9075789208e93a82fbcf77f19a6790f414
1,777
pub struct IconPhoneForwarded { props: crate::Props, } impl yew::Component for IconPhoneForwarded { type Properties = crate::Props; type Message = (); fn create(props: Self::Properties, _: yew::prelude::ComponentLink<Self>) -> Self { Self { props } } fn update(&mut self, _: Self::Message) -> yew::prelude::ShouldRender { true } fn change(&mut self, _: Self::Properties) -> yew::prelude::ShouldRender { false } fn view(&self) -> yew::prelude::Html { yew::prelude::html! { <svg class=self.props.class.unwrap_or("") width=self.props.size.unwrap_or(24).to_string() height=self.props.size.unwrap_or(24).to_string() viewBox="0 0 24 24" fill=self.props.fill.unwrap_or("none") stroke=self.props.color.unwrap_or("currentColor") stroke-width=self.props.stroke_width.unwrap_or(2).to_string() stroke-linecap=self.props.stroke_linecap.unwrap_or("round") stroke-linejoin=self.props.stroke_linejoin.unwrap_or("round") > <svg xmlns="http://www.w3.org/2000/svg" height="24" viewBox="0 0 24 24" width="24"><path d="M0 0h24v24H0V0z" fill="none"/><path d="M22.65 5.65l-3.79-3.79c-.32-.32-.86-.1-.86.35V4h-3.5c-.28 0-.5.22-.5.5v3c0 .28.22.5.5.5H18v1.79c0 .45.54.67.85.35l3.79-3.79c.2-.19.2-.51.01-.7zm-3.42 9.61l-2.54-.29c-.61-.07-1.21.14-1.64.57l-1.84 1.84c-2.83-1.44-5.15-3.75-6.59-6.59l1.85-1.85c.43-.43.64-1.03.57-1.64l-.29-2.52c-.12-1.01-.97-1.77-1.99-1.77H5.03c-1.13 0-2.07.94-2 2.07.53 8.54 7.36 15.36 15.89 15.89 1.13.07 2.07-.87 2.07-2v-1.73c.01-1.01-.75-1.86-1.76-1.98z"/></svg> </svg> } } }
38.630435
574
0.579629
6792b5df3f021463d49bff1515b940aa045f1a04
1,932
use amethyst_core::specs::prelude::{Component, DenseVecStorage}; use crate::{ event::{UiEvent, UiEventType}, event_retrigger::{EventRetrigger, EventRetriggerSystem}, EventReceiver, UiButtonAction, }; /// Provides an `EventRetriggerSystem` that will handle incoming `UiEvents` /// and trigger `UiButtonAction`s for `UiButton`s with an attached /// `UiButtonActionRetrigger` component. pub type UiButtonActionRetriggerSystem = EventRetriggerSystem<UiButtonActionRetrigger>; /// Attach this to an entity with a `UiButton` attached to it to /// trigger specific events when a user interaction happens. #[derive(Debug)] pub struct UiButtonActionRetrigger { /// The `UiButtonAction`s that should happen when the user begins a click /// on the `UiButton` pub on_click_start: Vec<UiButtonAction>, /// The `UiButtonAction`s that should happen when the user ends a click /// on the `UiButton` pub on_click_stop: Vec<UiButtonAction>, /// The `UiButtonAction`s that should happen when the user start hovering /// over the `UiButton` pub on_hover_start: Vec<UiButtonAction>, /// The `UiButtonAction`s that should happen when the user stops hovering /// over the `UiButton` pub on_hover_stop: Vec<UiButtonAction>, } impl Component for UiButtonActionRetrigger { type Storage = DenseVecStorage<Self>; } impl EventRetrigger for UiButtonActionRetrigger { type In = UiEvent; type Out = UiButtonAction; fn apply<R>(&self, event: &Self::In, out: &mut R) where R: EventReceiver<Self::Out>, { match event.event_type { UiEventType::ClickStart => out.receive(&self.on_click_start), UiEventType::ClickStop => out.receive(&self.on_click_stop), UiEventType::HoverStart => out.receive(&self.on_hover_start), UiEventType::HoverStop => out.receive(&self.on_hover_stop), _ => {} }; } }
36.45283
87
0.700311
03c2ea8391a534c79db2a7590779d4c1d42b7dc8
9,832
#[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::FCR { #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } } #[doc = "Values that can be written to the field `FIFOEN`"] pub enum FIFOENW { #[doc = "Must not be used in the application."] MUST_NOT_BE_USED_IN_, #[doc = "Active high enable for both UART1 Rx and TX FIFOs and FCR\\[7:1\\] access. This bit must be set for proper UART1 operation. Any transition on this bit will automatically clear the UART1 FIFOs."] ACTIVE_HIGH_ENABLE_F, } impl FIFOENW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { FIFOENW::MUST_NOT_BE_USED_IN_ => false, FIFOENW::ACTIVE_HIGH_ENABLE_F => true, } } } #[doc = r" Proxy"] pub struct _FIFOENW<'a> { w: &'a mut W, } impl<'a> _FIFOENW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: FIFOENW) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "Must not be used in the application."] #[inline] pub fn must_not_be_used_in_(self) -> &'a mut W { self.variant(FIFOENW::MUST_NOT_BE_USED_IN_) } #[doc = "Active high enable for both UART1 Rx and TX FIFOs and FCR\\[7:1\\] access. This bit must be set for proper UART1 operation. Any transition on this bit will automatically clear the UART1 FIFOs."] #[inline] pub fn active_high_enable_f(self) -> &'a mut W { self.variant(FIFOENW::ACTIVE_HIGH_ENABLE_F) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `RXFIFORES`"] pub enum RXFIFORESW { #[doc = "No impact on either of UART1 FIFOs."] NO_IMPACT_ON_EITHER_, #[doc = "Writing a logic 1 to FCR\\[1\\] will clear all bytes in UART1 Rx FIFO, reset the pointer logic. This bit is self-clearing."] WRITING_A_LOGIC_1_TO, } impl RXFIFORESW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { RXFIFORESW::NO_IMPACT_ON_EITHER_ => false, RXFIFORESW::WRITING_A_LOGIC_1_TO => true, } } } #[doc = r" Proxy"] pub struct _RXFIFORESW<'a> { w: &'a mut W, } impl<'a> _RXFIFORESW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: RXFIFORESW) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "No impact on either of UART1 FIFOs."] #[inline] pub fn no_impact_on_either_(self) -> &'a mut W { self.variant(RXFIFORESW::NO_IMPACT_ON_EITHER_) } #[doc = "Writing a logic 1 to FCR\\[1\\] will clear all bytes in UART1 Rx FIFO, reset the pointer logic. This bit is self-clearing."] #[inline] pub fn writing_a_logic_1_to(self) -> &'a mut W { self.variant(RXFIFORESW::WRITING_A_LOGIC_1_TO) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 1; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `TXFIFORES`"] pub enum TXFIFORESW { #[doc = "No impact on either of UART1 FIFOs."] NO_IMPACT_ON_EITHER_, #[doc = "Writing a logic 1 to FCR\\[2\\] will clear all bytes in UART1 TX FIFO, reset the pointer logic. This bit is self-clearing."] WRITING_A_LOGIC_1_TO, } impl TXFIFORESW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { TXFIFORESW::NO_IMPACT_ON_EITHER_ => false, TXFIFORESW::WRITING_A_LOGIC_1_TO => true, } } } #[doc = r" Proxy"] pub struct _TXFIFORESW<'a> { w: &'a mut W, } impl<'a> _TXFIFORESW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: TXFIFORESW) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "No impact on either of UART1 FIFOs."] #[inline] pub fn no_impact_on_either_(self) -> &'a mut W { self.variant(TXFIFORESW::NO_IMPACT_ON_EITHER_) } #[doc = "Writing a logic 1 to FCR\\[2\\] will clear all bytes in UART1 TX FIFO, reset the pointer logic. This bit is self-clearing."] #[inline] pub fn writing_a_logic_1_to(self) -> &'a mut W { self.variant(TXFIFORESW::WRITING_A_LOGIC_1_TO) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 2; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _DMAMODEW<'a> { w: &'a mut W, } impl<'a> _DMAMODEW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 3; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `RXTRIGLVL`"] pub enum RXTRIGLVLW { #[doc = "Trigger level 0 (1 character or 0x01)."] TRIGGER_LEVEL_0_1_C, #[doc = "Trigger level 1 (4 characters or 0x04)."] TRIGGER_LEVEL_1_4_C, #[doc = "Trigger level 2 (8 characters or 0x08)."] TRIGGER_LEVEL_2_8_C, #[doc = "Trigger level 3 (14 characters or 0x0E)."] TRIGGER_LEVEL_3_14_, } impl RXTRIGLVLW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> u8 { match *self { RXTRIGLVLW::TRIGGER_LEVEL_0_1_C => 0, RXTRIGLVLW::TRIGGER_LEVEL_1_4_C => 1, RXTRIGLVLW::TRIGGER_LEVEL_2_8_C => 2, RXTRIGLVLW::TRIGGER_LEVEL_3_14_ => 3, } } } #[doc = r" Proxy"] pub struct _RXTRIGLVLW<'a> { w: &'a mut W, } impl<'a> _RXTRIGLVLW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: RXTRIGLVLW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Trigger level 0 (1 character or 0x01)."] #[inline] pub fn trigger_level_0_1_c(self) -> &'a mut W { self.variant(RXTRIGLVLW::TRIGGER_LEVEL_0_1_C) } #[doc = "Trigger level 1 (4 characters or 0x04)."] #[inline] pub fn trigger_level_1_4_c(self) -> &'a mut W { self.variant(RXTRIGLVLW::TRIGGER_LEVEL_1_4_C) } #[doc = "Trigger level 2 (8 characters or 0x08)."] #[inline] pub fn trigger_level_2_8_c(self) -> &'a mut W { self.variant(RXTRIGLVLW::TRIGGER_LEVEL_2_8_C) } #[doc = "Trigger level 3 (14 characters or 0x0E)."] #[inline] pub fn trigger_level_3_14_(self) -> &'a mut W { self.variant(RXTRIGLVLW::TRIGGER_LEVEL_3_14_) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 0x03; const OFFSET: u8 = 6; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bit 0 - FIFO enable."] #[inline] pub fn fifoen(&mut self) -> _FIFOENW { _FIFOENW { w: self } } #[doc = "Bit 1 - RX FIFO Reset."] #[inline] pub fn rxfifores(&mut self) -> _RXFIFORESW { _RXFIFORESW { w: self } } #[doc = "Bit 2 - TX FIFO Reset."] #[inline] pub fn txfifores(&mut self) -> _TXFIFORESW { _TXFIFORESW { w: self } } #[doc = "Bit 3 - DMA Mode Select. When the FIFO enable bit (bit 0 of this register) is set, this bit selects the DMA mode. See Section 36.6.6.1."] #[inline] pub fn dmamode(&mut self) -> _DMAMODEW { _DMAMODEW { w: self } } #[doc = "Bits 6:7 - RX Trigger Level. These two bits determine how many receiver UART1 FIFO characters must be written before an interrupt is activated."] #[inline] pub fn rxtriglvl(&mut self) -> _RXTRIGLVLW { _RXTRIGLVLW { w: self } } }
30.918239
207
0.569162
268ca70f9f3b6628ff5b6744ae594607a4c886d5
12,743
#[macro_use] extern crate bitflags; extern crate byteorder; extern crate rustc_serialize; extern crate minifb; extern crate clock_ticks; use std::fs::File; use std::io::Read; use rustc_serialize::hex::ToHex; use byteorder::{ByteOrder, LittleEndian}; #[derive(Debug)] struct CH16Header { magic: String, reserved: u8, version: u8, size: u32, start: u16, crc32: u32, } impl<'a> From<&'a [u8]> for CH16Header { fn from(val: &[u8]) -> CH16Header { CH16Header { magic: String::from_utf8(val[..0x04].to_vec()).unwrap(), reserved: val[0x04], version: val[0x05], size: LittleEndian::read_u32(&val[0x06..0x0A]), start: LittleEndian::read_u16(&val[0x0A..0x0C]), crc32: LittleEndian::read_u32(&val[0x0C..0x10]), } } } #[allow(dead_code)] bitflags! { flags Flags: u8 { const CLEAR = 0b00000000, const CARRY = 0b00000010, const ZERO = 0b00000100, const OVERFLOW = 0b01000000, const NEGATIVE = 0b10000000, } } const STACK_START: u16 = 0xFDF0; #[allow(dead_code)] const IO_ADDR: u16 = 0xFFF0; const MEMORY: usize = 0xFFFF; #[allow(dead_code)] struct CHIP16 { memory: [u8; MEMORY], pc: u16, sp: u16, regs: [i16; 16], flags: Flags, bg: Color, fg: Color, spritew: u8, spriteh: u8, vblank: bool, } #[allow(dead_code)] #[derive(PartialEq)] pub enum State { Continue, Stop, } #[derive(Clone)] pub enum Color { Transparent, Black, Gray, Red, Pink, DarkBrown, Brown, Orange, Yelow, Green, LightGreen, DarkBlue, Blue, LightBlue, SkyBlue, White, } impl From<u8> for Color { fn from(val: u8) -> Color { match val { 0xF => Color::White, _ => Color::Transparent, } } } impl Into<u32> for Color { fn into(self: Color) -> u32 { match self { Color::White => 0xFFFFFFFF, _ => 0x00000000, } } } impl CHIP16 { fn new(header: &CH16Header, cart: &[u8]) -> CHIP16 { let mut ret = CHIP16 { memory: [0; MEMORY], pc: header.start, sp: STACK_START, regs: [0; 16], flags: CLEAR, bg: Color::Transparent, fg: Color::Transparent, spritew: 0, spriteh: 0, vblank: false, }; for i in 0..header.size { ret.memory[i as usize] = cart[i as usize]; } ret } fn cycle(&mut self, screen: &Arc<Mutex<Vec<u32>>>) -> State { // print!("{:#X}: ", self.pc); let instr = &self.memory[self.pc as usize..self.pc as usize + 4]; let opcode = instr[0]; let ll: u16 = instr[2] as u16; let hh: u16 = instr[3] as u16; let hhll: u16 = hh << 8 | ll; let val = self.memory[hhll as usize]; let x = instr[1] & 0x0F; let y = (instr[1] & 0xF0) >> 4; let z = instr[2] & 0x0F; let rx: i16 = self.regs[x as usize]; let ry: i16 = self.regs[y as usize]; self.pc += 4; match opcode { 0x01 => { self.fg = Color::Transparent; self.bg = Color::Transparent; let mut buff = screen.lock().unwrap(); for i in buff.iter_mut() { *i = self.bg.clone().into(); } // println!("CLS"); } 0x02 => { if !self.vblank { self.pc -= 4; } else { self.vblank = false; } // println!("VBLNK"); } 0x03 => { let c = instr[2] & 0x0F; self.bg = c.into(); // println!("BGC {}", c); } 0x04 => { let w = instr[2]; let h = instr[3]; self.spritew = w; self.spriteh = h; // println!("SPR w:{} h:{}", w, h); } 0x05 => { let mut buff = screen.lock().unwrap(); let mut xpos = rx as i16; let mut ypos = ry as i16; let mut addr = hhll as usize; // println!("DRW R{:X}, R{:X}, {:#X}", x, y, hhll); for j in 0..self.spriteh { ypos += j as i16; for i in 0..self.spritew { let color = self.memory[addr]; let left: Color = ((color & 0xF0) >> 4 as u8).into(); let right: Color = ((color & 0x0F) as u8).into(); let pos = (xpos as i64 + ypos as i64 * WIDTH as i64) as usize; buff[pos + 0] = left.into(); buff[pos + 1] = right.into(); addr += (i * j + self.spritew as u8) as usize; xpos += i as i16 * 2; // TODO: Check collision } } } 0x10 => { self.pc = hhll; // println!("JMP {:#X}", hhll); } 0x12 => { match x { 0x00 => { if (self.flags & ZERO) == ZERO { self.pc = hhll; } // println!("JZ {:#X}", hhll) } 0x09 => { if (self.flags & CARRY) == CARRY { self.pc = hhll; } // println!("JB {:#X}", hhll) } _ => panic!("J{:x} {:#X}", x, hhll), } } 0x13 => { if rx == ry { self.pc = hhll; } // println!("JME R{:X}, R{:X}, {:#X}", x, y, hhll); } 0x20 => { self.regs[x as usize] = hhll as i16; // println!("LDI R{:X}, {:#X}", x, hhll); } 0x24 => { self.regs[x as usize] = ry; // println!("MOV R{:X}, R{:X}", x, y); } 0x41 => { let (res, of) = rx.overflowing_add(ry); self.regs[x as usize] = res; // println!("ADD R{:X}, R{:X}", x, y); if of { self.flags |= CARRY; } else { self.flags |= CARRY; } if (rx < 0 && ry < 0 && res >= 0) || (rx >= 0 && ry >= 0 && res < 0) { self.flags |= OVERFLOW; } else { self.flags &= !OVERFLOW; } if res == 0 { self.flags |= ZERO; } else { self.flags &= !ZERO; } if res < 0 { self.flags |= NEGATIVE; } else { self.flags &= !NEGATIVE; } } 0x50 => { let (res, of) = rx.overflowing_sub(hhll as i16); self.regs[x as usize] = res; // println!("SUB R{:X}, {:#X}", x, hhll as i16); if rx < hhll as i16 { self.flags |= CARRY; } else { self.flags &= !CARRY; } if of { self.flags |= OVERFLOW; } else { self.flags &= !OVERFLOW; } if res < 0 { self.flags |= NEGATIVE; } else { self.flags &= !NEGATIVE; } if res == 0 { self.flags |= ZERO; } else { self.flags &= !ZERO; } } 0x51 => { let (res, of) = rx.overflowing_sub(ry); self.regs[x as usize] = res; // println!("SUB R{:X}, R{:X}", x, y); if rx < ry { self.flags |= CARRY; } else { self.flags &= !CARRY; } if of { self.flags |= OVERFLOW; } else { self.flags &= !OVERFLOW; } if res < 0 { self.flags |= NEGATIVE; } else { self.flags &= !NEGATIVE; } if res == 0 { self.flags |= ZERO; } else { self.flags &= !ZERO; } } 0x52 => { let (res, of) = rx.overflowing_sub(ry); self.regs[z as usize] = res; // println!("SUB R{:X}, R{:X}, R{:X}", x, y, z); if rx < ry { self.flags |= CARRY; } else { self.flags &= !CARRY; } if of { self.flags |= OVERFLOW; } else { self.flags &= !OVERFLOW; } if res < 0 { self.flags |= NEGATIVE; } else { self.flags &= !NEGATIVE; } if res == 0 { self.flags |= ZERO; } else { self.flags &= !ZERO; } } _ => { panic!("Unknown opcode: {:#x} instr: 0x{}", opcode, (*instr).to_hex().to_uppercase()) } } State::Continue } } const WIDTH: usize = 320; const HEIGHT: usize = 240; use minifb::{Key, Scale, WindowOptions}; use std::thread; use std::time::Duration; use std::sync::{Arc, Mutex}; pub fn draw_loop<F>(rate: u64, mut callback: F) where F: FnMut() -> State { let mut accumulator = 0; let mut previous_clock = clock_ticks::precise_time_ns(); let rate = 1_000_000_000 / rate; loop { match callback() { State::Stop => break, State::Continue => (), }; let now = clock_ticks::precise_time_ns(); accumulator += now - previous_clock; previous_clock = now; while accumulator >= rate { accumulator -= rate; } thread::sleep(Duration::from_millis(((rate - accumulator) / 1000000) as u64)); } } pub fn cpu_loop<F>(rate: u64, mut callback: F) where F: FnMut() -> State + Send + 'static { thread::spawn(move || { let mut accumulator = 0; let mut previous_clock = clock_ticks::precise_time_ns(); let rate = 1_000_000_000 / rate; loop { match callback() { State::Stop => break, State::Continue => (), }; let now = clock_ticks::precise_time_ns(); accumulator += now - previous_clock; previous_clock = now; while accumulator >= rate { accumulator -= rate; } thread::sleep(Duration::from_millis(((rate - accumulator) / 1000000) as u64)); } }); } fn main() { let mut file = File::open("Ball.c16").unwrap(); let mut cartridge: Vec<u8> = Vec::new(); file.read_to_end(&mut cartridge).unwrap(); let header: &CH16Header = &cartridge[..16].into(); let cart = &cartridge[16..]; let buffer = Arc::new(Mutex::new(vec![0; WIDTH * HEIGHT])); let mut window = match minifb::Window::new("chip-16 emulator in Rust", WIDTH, HEIGHT, WindowOptions { scale: Scale::X2, ..WindowOptions::default() }) { Ok(win) => win, Err(err) => panic!("Unable to create window {}", err), }; let chip16 = Arc::new(Mutex::new(CHIP16::new(header, cart))); let cpu_arc = buffer.clone(); let c_a = chip16.clone(); cpu_loop(1_000_000, move || c_a.lock().unwrap().cycle(&cpu_arc)); let c_b = chip16.clone(); draw_loop(60, || { if window.is_open() && !window.is_key_down(Key::Escape) { window.update_with_buffer(&buffer.lock().unwrap()); c_b.lock().unwrap().vblank = true; State::Continue } else { State::Stop } }); }
25.743434
99
0.398493
c19b457990fcfddc8cf963c08ac48ded4c7021ff
1,028
use std::collections::HashMap; pub fn post_process(input_vector: Vec<HashMap<String, i32>>) { //Generate final mapping let mut final_map = HashMap::new(); for mapping in input_vector.iter() { final_map.extend(mapping); } //Generate sorted vector: let mut final_map_vec: Vec<(&String, &i32)> = final_map.into_iter().collect(); final_map_vec.sort_by(|a, b| b.1.cmp(&a.1)); //Handle cases where result is less than 100: let final_map_vec_length = final_map_vec.len(); if final_map_vec_length >= 100 { print_metrics(final_map_vec, 100); } else { print_metrics(final_map_vec, final_map_vec_length); } } //print vector fn print_metrics(datavec: Vec<(&String, &i32)>, count: usize) { for (k, v) in &datavec[..count] { println!("{} - {}", k, v); } } #[test] fn test_print_metrics() { let test_string = "print metrics testing is happening here"; let count: usize = 1; print_metrics(vec![(&test_string.to_string(), &20)], count); }
27.783784
82
0.643969
56eefb8cc80d62825c9d34c6df427ed9b3043154
5,662
#[doc = "Register `CAPCTRL9` reader"] pub struct R(crate::R<CAPCTRL9_SPEC>); impl core::ops::Deref for R { type Target = crate::R<CAPCTRL9_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<CAPCTRL9_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<CAPCTRL9_SPEC>) -> Self { R(reader) } } #[doc = "Register `CAPCTRL9` writer"] pub struct W(crate::W<CAPCTRL9_SPEC>); impl core::ops::Deref for W { type Target = crate::W<CAPCTRL9_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<CAPCTRL9_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<CAPCTRL9_SPEC>) -> Self { W(writer) } } #[doc = "Field `CAPCONn_L` reader - If bit m is one, event m causes the CAPn_L (UNIFY = 0) or the CAPn (UNIFY = 1) register to be loaded (event 0 = bit 0, event 1 = bit 1, etc.). The number of bits = number of match/captures in this SCT."] pub struct CAPCONN_L_R(crate::FieldReader<u16, u16>); impl CAPCONN_L_R { #[inline(always)] pub(crate) fn new(bits: u16) -> Self { CAPCONN_L_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for CAPCONN_L_R { type Target = crate::FieldReader<u16, u16>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `CAPCONn_L` writer - If bit m is one, event m causes the CAPn_L (UNIFY = 0) or the CAPn (UNIFY = 1) register to be loaded (event 0 = bit 0, event 1 = bit 1, etc.). The number of bits = number of match/captures in this SCT."] pub struct CAPCONN_L_W<'a> { w: &'a mut W, } impl<'a> CAPCONN_L_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u16) -> &'a mut W { self.w.bits = (self.w.bits & !0xffff) | (value as u32 & 0xffff); self.w } } #[doc = "Field `CAPCONn_H` reader - If bit m is one, event m causes the CAPn_H (UNIFY = 0) register to be loaded (event 0 = bit 16, event 1 = bit 17, etc.). The number of bits = number of match/captures in this SCT."] pub struct CAPCONN_H_R(crate::FieldReader<u16, u16>); impl CAPCONN_H_R { #[inline(always)] pub(crate) fn new(bits: u16) -> Self { CAPCONN_H_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for CAPCONN_H_R { type Target = crate::FieldReader<u16, u16>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `CAPCONn_H` writer - If bit m is one, event m causes the CAPn_H (UNIFY = 0) register to be loaded (event 0 = bit 16, event 1 = bit 17, etc.). The number of bits = number of match/captures in this SCT."] pub struct CAPCONN_H_W<'a> { w: &'a mut W, } impl<'a> CAPCONN_H_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u16) -> &'a mut W { self.w.bits = (self.w.bits & !(0xffff << 16)) | ((value as u32 & 0xffff) << 16); self.w } } impl R { #[doc = "Bits 0:15 - If bit m is one, event m causes the CAPn_L (UNIFY = 0) or the CAPn (UNIFY = 1) register to be loaded (event 0 = bit 0, event 1 = bit 1, etc.). The number of bits = number of match/captures in this SCT."] #[inline(always)] pub fn capconn_l(&self) -> CAPCONN_L_R { CAPCONN_L_R::new((self.bits & 0xffff) as u16) } #[doc = "Bits 16:31 - If bit m is one, event m causes the CAPn_H (UNIFY = 0) register to be loaded (event 0 = bit 16, event 1 = bit 17, etc.). The number of bits = number of match/captures in this SCT."] #[inline(always)] pub fn capconn_h(&self) -> CAPCONN_H_R { CAPCONN_H_R::new(((self.bits >> 16) & 0xffff) as u16) } } impl W { #[doc = "Bits 0:15 - If bit m is one, event m causes the CAPn_L (UNIFY = 0) or the CAPn (UNIFY = 1) register to be loaded (event 0 = bit 0, event 1 = bit 1, etc.). The number of bits = number of match/captures in this SCT."] #[inline(always)] pub fn capconn_l(&mut self) -> CAPCONN_L_W { CAPCONN_L_W { w: self } } #[doc = "Bits 16:31 - If bit m is one, event m causes the CAPn_H (UNIFY = 0) register to be loaded (event 0 = bit 16, event 1 = bit 17, etc.). The number of bits = number of match/captures in this SCT."] #[inline(always)] pub fn capconn_h(&mut self) -> CAPCONN_H_W { CAPCONN_H_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "SCT capture control register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [capctrl9](index.html) module"] pub struct CAPCTRL9_SPEC; impl crate::RegisterSpec for CAPCTRL9_SPEC { type Ux = u32; } #[doc = "`read()` method returns [capctrl9::R](R) reader structure"] impl crate::Readable for CAPCTRL9_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [capctrl9::W](W) writer structure"] impl crate::Writable for CAPCTRL9_SPEC { type Writer = W; } #[doc = "`reset()` method sets CAPCTRL9 to value 0"] impl crate::Resettable for CAPCTRL9_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
40.156028
417
0.621159
33f99a4932420825762bf948fa768569c7073220
5,500
use anyhow::{anyhow, Context, Result}; use hyper::{net::HttpsConnector, Client, Url}; use hyper_native_tls::NativeTlsClient; use semver_rs::{Range, Version}; use serde_json::Value; use std::{ collections::{HashSet}, io::Read, path::{Path, PathBuf}, }; use tar::Archive; mod pack; use crate::pack::{gunzip, unpack_archive}; mod cache; pub use crate::cache::{cache, get_cache_dir, PATH_SEGMENT_ENCODE_SET}; pub mod deps; pub use deps::{calculate_depends, path_to_root_dependency, path_to_dependencies, Dependency}; use percent_encoding::utf8_percent_encode; use static_init::{dynamic}; #[dynamic] static CLIENT_CONNECTOR: Client = Client::with_connector(HttpsConnector::new(NativeTlsClient::new().unwrap())); static REGISTRY: &'static str = "https://registry.npmjs.org"; // static REGISTRY: &'static str = "http://127.0.0.1:5080"; pub fn install_dep(path: &Path, dep: &Dependency) -> Result<()> { let required_version = Range::new(&dep.version) .parse() .with_context(|| format!("Version {} of {} didn't parse", dep.version, dep.name))?; if dep.version.starts_with("git://") { use git2::Repository; let mut path = path.clone().to_path_buf(); path.push(dep.name.clone()); if let Some(x) = dep.version.rfind('#') { let (repo, hash) = dep.version.split_at(x); let repo_cloned = Repository::clone(repo, &path)?; let mut hash = hash.clone().to_string(); hash.remove(0); println!("hash: {}", hash); let obj = repo_cloned.revparse_single(&hash)?; repo_cloned.checkout_tree(&obj, None)?; } else { Repository::clone(&dep.version, &path)?; } return Ok(()) } let metadata = fetch_package_root_metadata(&dep)?; let versions = &metadata["versions"] .as_object() .ok_or(anyhow!("Versions was not a JSON object"))?; let mut next_paths: HashSet<PathBuf> = HashSet::new(); for version in versions.iter().rev() { if required_version.test( &Version::new(version.0.as_str()) .parse() .with_context(|| format!("{} didn't parse", version.0))?, ) { let dist = &version.1["dist"]; let tarball_url = Url::parse( &dist["tarball"] .as_str() .ok_or(anyhow!("tarball URL didn't convert to string"))?, ) .context("Couldn't parse URL")?; let tarball = gunzip(cache(&dep.name, &version.0, &tarball_url)?, &tarball_url)?; let mut archive = Archive::new(tarball.as_slice()); let mut path = path.to_path_buf(); path.push(&dep.name); unpack_archive(&mut archive, &path, &tarball_url)?; next_paths.insert(path); break; } } Ok(()) } /// Metadata for a specific version of a package pub fn fetch_package_version_metadata(dep: &Dependency, version: &String) -> Result<serde_json::Value> { let ssl = NativeTlsClient::new().context("Unable to create a NativeTlsClient")?; let connector = HttpsConnector::new(ssl); let client = Client::with_connector(connector); let url = format!("{}/{}/{}", REGISTRY, utf8_percent_encode(&dep.name, PATH_SEGMENT_ENCODE_SET), utf8_percent_encode(&version, PATH_SEGMENT_ENCODE_SET)); let mut body = String::new(); client .get(&url) .send() .with_context(|| format!("Couldn't GET URL: {}", url))? .read_to_string(&mut body) .with_context(|| format!("Couldn't ready body of: {}", url))?; let metadata: Value = serde_json::from_str(&body) .with_context(|| format!("Couldn't JSON parse metadata from {}", url))?; Ok(metadata) } /// Metadata for all versions pub fn fetch_package_root_metadata(dep: &Dependency) -> Result<serde_json::Value> { let url = format!("{}/{}", REGISTRY, utf8_percent_encode(&dep.name, PATH_SEGMENT_ENCODE_SET)); let mut body = String::new(); CLIENT_CONNECTOR .get(&url) .send() .with_context(|| format!("Couldn't GET URL: {}", url))? .read_to_string(&mut body) .with_context(|| format!("Couldn't ready body of: {}", url))?; let body: Value = serde_json::from_str(&body) .with_context(|| format!("Couldn't JSON parse metadata from {}", url))?; Ok(body) } pub fn fetch_matching_version_metadata<'a>(dep: &'a Dependency, root_metadata: &'a serde_json::Value) -> Result<(&'a String, &'a Value)> { let required_version = Range::new(&dep.version) .parse() .with_context(|| format!("Version {} of {} didn't parse", dep.version, dep.name))?; let versions = &root_metadata["versions"] .as_object() .ok_or(anyhow!("Versions was not a JSON object"))?; for version in versions.iter().rev() { if required_version.test( &Version::new(version.0.as_str()) .parse() .with_context(|| format!("{} didn't parse", version.0))?, ) { // let dist = &version.1["dist"]; // let tarball_url = Url::parse( // &dist["tarball"] // .as_str() // .ok_or(anyhow!("tarball URL didn't convert to string"))?, // ) // .context("Couldn't parse URL")?; return Ok(version); } } Err(anyhow!("ho matching version")) }
32.934132
138
0.587091
fe24c3f74901576daa1f251c2eed7242a90763c8
3,518
#[doc = "Reader of register FLASH_P_R_PV"] pub type R = crate::R<u32, super::FLASH_P_R_PV>; #[doc = "Writer for register FLASH_P_R_PV"] pub type W = crate::W<u32, super::FLASH_P_R_PV>; #[doc = "Register FLASH_P_R_PV `reset()`'s with value 0x02c1_0200"] impl crate::ResetValue for super::FLASH_P_R_PV { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0x02c1_0200 } } #[doc = "Reader of field `PH`"] pub type PH_R = crate::R<u8, u8>; #[doc = "Write proxy for field `PH`"] pub struct PH_W<'a> { w: &'a mut W, } impl<'a> PH_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0xff << 24)) | (((value as u32) & 0xff) << 24); self.w } } #[doc = "Reader of field `RH`"] pub type RH_R = crate::R<u8, u8>; #[doc = "Write proxy for field `RH`"] pub struct RH_W<'a> { w: &'a mut W, } impl<'a> RH_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0xff << 16)) | (((value as u32) & 0xff) << 16); self.w } } #[doc = "Reader of field `PVH`"] pub type PVH_R = crate::R<u8, u8>; #[doc = "Write proxy for field `PVH`"] pub struct PVH_W<'a> { w: &'a mut W, } impl<'a> PVH_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0xff << 8)) | (((value as u32) & 0xff) << 8); self.w } } #[doc = "Reader of field `PVH2`"] pub type PVH2_R = crate::R<u8, u8>; #[doc = "Write proxy for field `PVH2`"] pub struct PVH2_W<'a> { w: &'a mut W, } impl<'a> PVH2_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0xff) | ((value as u32) & 0xff); self.w } } impl R { #[doc = "Bits 24:31 - 31:24\\] Internal. Only to be used through TI provided API."] #[inline(always)] pub fn ph(&self) -> PH_R { PH_R::new(((self.bits >> 24) & 0xff) as u8) } #[doc = "Bits 16:23 - 23:16\\] Internal. Only to be used through TI provided API."] #[inline(always)] pub fn rh(&self) -> RH_R { RH_R::new(((self.bits >> 16) & 0xff) as u8) } #[doc = "Bits 8:15 - 15:8\\] Internal. Only to be used through TI provided API."] #[inline(always)] pub fn pvh(&self) -> PVH_R { PVH_R::new(((self.bits >> 8) & 0xff) as u8) } #[doc = "Bits 0:7 - 7:0\\] Internal. Only to be used through TI provided API."] #[inline(always)] pub fn pvh2(&self) -> PVH2_R { PVH2_R::new((self.bits & 0xff) as u8) } } impl W { #[doc = "Bits 24:31 - 31:24\\] Internal. Only to be used through TI provided API."] #[inline(always)] pub fn ph(&mut self) -> PH_W { PH_W { w: self } } #[doc = "Bits 16:23 - 23:16\\] Internal. Only to be used through TI provided API."] #[inline(always)] pub fn rh(&mut self) -> RH_W { RH_W { w: self } } #[doc = "Bits 8:15 - 15:8\\] Internal. Only to be used through TI provided API."] #[inline(always)] pub fn pvh(&mut self) -> PVH_W { PVH_W { w: self } } #[doc = "Bits 0:7 - 7:0\\] Internal. Only to be used through TI provided API."] #[inline(always)] pub fn pvh2(&mut self) -> PVH2_W { PVH2_W { w: self } } }
29.07438
86
0.548039
ddca790142b803c6934a2515a58b0b5fd4c6ec19
750
// Copyright (c) The XPeer Core Contributors // SPDX-License-Identifier: Apache-2.0 use admission_control_service::admission_control_node; use executable_helpers::helpers::{ setup_executable, ARG_CONFIG_PATH, ARG_DISABLE_LOGGING, ARG_PEER_ID, }; /// Run a Admission Control service in its own process. /// It will also setup global logger and initialize config. fn main() { let (config, _logger, _args) = setup_executable( "XPeer AdmissionControl node".to_string(), vec![ARG_PEER_ID, ARG_CONFIG_PATH, ARG_DISABLE_LOGGING], ); let admission_control_node = admission_control_node::AdmissionControlNode::new(config); admission_control_node .run() .expect("Unable to run AdmissionControl node"); }
32.608696
91
0.737333
6a3c8a362b260690905659b87c925526fe141309
856
/* automatically generated by rust-bindgen */ #![allow(dead_code, non_snake_case, non_camel_case_types, non_upper_case_globals)] pub const foo: _bindgen_ty_1 = _bindgen_ty_1::foo; pub const bar: _bindgen_ty_1 = _bindgen_ty_1::bar; #[repr(u32)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub enum _bindgen_ty_1 { foo = 4, bar = 8, } pub type EasyToOverflow = ::std::os::raw::c_ulonglong; pub const k: EasyToOverflow = 2147483648; pub const k_expr: EasyToOverflow = 0; extern "C" { #[link_name = "\u{1}wow"] pub static wow: EasyToOverflow; } pub const BAZ: ::std::os::raw::c_longlong = 24; pub const fuzz: f64 = 51.0; pub const BAZZ: ::std::os::raw::c_char = 53; pub const WAT: ::std::os::raw::c_char = 0; pub const bytestring: &'static [u8; 4usize] = b"Foo\0"; pub const NOT_UTF8: [u8; 5usize] = [240u8, 40u8, 140u8, 40u8, 0u8];
30.571429
82
0.688084
09098c18f9924f658bc89b53bd440466ee6827cf
2,516
/** * [98] Validate Binary Search Tree * * Given a binary tree, determine if it is a valid binary search tree (BST). * * Assume a BST is defined as follows: * * * The left subtree of a node contains only nodes with keys less than the node's key. * The right subtree of a node contains only nodes with keys greater than the node's key. * Both the left and right subtrees must also be binary search trees. * * * Example 1: * * * Input: * 2 * / \ * 1 3 * Output: true * * * Example 2: * * * 5 * / \ * 1 4 * / \ * 3 6 * Output: false * Explanation: The input is: [5,1,4,null,null,3,6]. The root node's value * is 5 but its right child's value is 4. * * */ pub struct Solution {} // problem: https://leetcode.com/problems/validate-binary-search-tree/ // discuss: https://leetcode.com/problems/validate-binary-search-tree/discuss/?currentPage=1&orderBy=most_votes&query= // submission codes start here // Definition for a binary tree node. use crate::util::tree::{to_tree, TreeNode}; use std::cell::RefCell; use std::rc::Rc; impl Solution { pub fn is_valid_bst(root: Option<Rc<RefCell<TreeNode>>>) -> bool { let mut vec = vec![]; Solution::preorder_traverse(root.as_ref(), &mut vec) } fn preorder_traverse( root: Option<&Rc<RefCell<TreeNode>>>, formers: &mut Vec<(i32, i32)>, ) -> bool { if let Some(node) = root { let root_val = root.as_ref().unwrap().borrow().val; for former in formers.iter() { if (former.0 < 0 && root_val >= former.1) || (former.0 > 0 && root_val <= former.1) { return false; } } let mut to_right = formers.clone(); formers.push((-1, root_val)); to_right.push((1, root_val)); Solution::preorder_traverse(node.borrow().left.as_ref(), formers) && Solution::preorder_traverse(node.borrow().right.as_ref(), &mut to_right) } else { true } } } // submission codes end #[cfg(test)] mod tests { use super::*; #[test] fn test_98() { assert_eq!( Solution::is_valid_bst(tree![5, 1, 4, null, null, 3, 6]), false ); assert_eq!(Solution::is_valid_bst(tree![2, 1, 3]), true); assert_eq!( Solution::is_valid_bst(tree![10, 5, 15, null, null, 6, 20]), false ); } }
25.673469
118
0.558029
760a6681c79e670b8c25b5e14633b9bf6447d1dc
21,744
// Generated from definition io.k8s.api.storage.v1beta1.CSINode /// DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode. See the release notes for more information. CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object. #[derive(Clone, Debug, Default, PartialEq)] pub struct CSINode { /// metadata.name must be the Kubernetes node name. pub metadata: crate::apimachinery::pkg::apis::meta::v1::ObjectMeta, /// spec is the specification of CSINode pub spec: crate::api::storage::v1beta1::CSINodeSpec, } // Begin storage.k8s.io/v1beta1/CSINode // Generated from operation createStorageV1beta1CSINode impl CSINode { /// create a CSINode /// /// Use the returned [`crate::ResponseBody`]`<`[`crate::CreateResponse`]`<Self>>` constructor, or [`crate::CreateResponse`]`<Self>` directly, to parse the HTTP response. /// /// # Arguments /// /// * `body` /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn create_csi_node( body: &crate::api::storage::v1beta1::CSINode, optional: crate::CreateOptional<'_>, ) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::CreateResponse<Self>>), crate::RequestError> { let __url = "/apis/storage.k8s.io/v1beta1/csinodes?".to_owned(); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); optional.__serialize(&mut __query_pairs); let __url = __query_pairs.finish(); let __request = crate::http::Request::post(__url); let __body = crate::serde_json::to_vec(body).map_err(crate::RequestError::Json)?; let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static("application/json")); match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } // Generated from operation deleteStorageV1beta1CSINode impl CSINode { /// delete a CSINode /// /// Use the returned [`crate::ResponseBody`]`<`[`crate::DeleteResponse`]`<Self>>` constructor, or [`crate::DeleteResponse`]`<Self>` directly, to parse the HTTP response. /// /// # Arguments /// /// * `name` /// /// name of the CSINode /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn delete_csi_node( name: &str, optional: crate::DeleteOptional<'_>, ) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::DeleteResponse<Self>>), crate::RequestError> { let __url = format!("/apis/storage.k8s.io/v1beta1/csinodes/{name}", name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET), ); let __request = crate::http::Request::delete(__url); let __body = crate::serde_json::to_vec(&optional).map_err(crate::RequestError::Json)?; let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static("application/json")); match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } // Generated from operation deleteStorageV1beta1CollectionCSINode impl CSINode { /// delete collection of CSINode /// /// Use the returned [`crate::ResponseBody`]`<`[`crate::DeleteResponse`]`<`[`crate::List`]`<Self>>>` constructor, or [`crate::DeleteResponse`]`<`[`crate::List`]`<Self>>` directly, to parse the HTTP response. /// /// # Arguments /// /// * `delete_optional` /// /// Delete options. Use `Default::default()` to not pass any. /// /// * `list_optional` /// /// List options. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn delete_collection_csi_node( delete_optional: crate::DeleteOptional<'_>, list_optional: crate::ListOptional<'_>, ) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::DeleteResponse<crate::List<Self>>>), crate::RequestError> { let __url = "/apis/storage.k8s.io/v1beta1/csinodes?".to_owned(); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); list_optional.__serialize(&mut __query_pairs); let __url = __query_pairs.finish(); let __request = crate::http::Request::delete(__url); let __body = crate::serde_json::to_vec(&delete_optional).map_err(crate::RequestError::Json)?; let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static("application/json")); match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } // Generated from operation listStorageV1beta1CSINode impl CSINode { /// list or watch objects of kind CSINode /// /// This operation only supports listing all items of this type. /// /// Use the returned [`crate::ResponseBody`]`<`[`crate::ListResponse`]`<Self>>` constructor, or [`crate::ListResponse`]`<Self>` directly, to parse the HTTP response. /// /// # Arguments /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn list_csi_node( optional: crate::ListOptional<'_>, ) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::ListResponse<Self>>), crate::RequestError> { let __url = "/apis/storage.k8s.io/v1beta1/csinodes?".to_owned(); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); optional.__serialize(&mut __query_pairs); let __url = __query_pairs.finish(); let __request = crate::http::Request::get(__url); let __body = vec![]; match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } // Generated from operation patchStorageV1beta1CSINode impl CSINode { /// partially update the specified CSINode /// /// Use the returned [`crate::ResponseBody`]`<`[`crate::PatchResponse`]`<Self>>` constructor, or [`crate::PatchResponse`]`<Self>` directly, to parse the HTTP response. /// /// # Arguments /// /// * `name` /// /// name of the CSINode /// /// * `body` /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn patch_csi_node( name: &str, body: &crate::apimachinery::pkg::apis::meta::v1::Patch, optional: crate::PatchOptional<'_>, ) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::PatchResponse<Self>>), crate::RequestError> { let __url = format!("/apis/storage.k8s.io/v1beta1/csinodes/{name}?", name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET), ); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); optional.__serialize(&mut __query_pairs); let __url = __query_pairs.finish(); let __request = crate::http::Request::patch(__url); let __body = crate::serde_json::to_vec(body).map_err(crate::RequestError::Json)?; let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static(match body { crate::apimachinery::pkg::apis::meta::v1::Patch::Json(_) => "application/json-patch+json", crate::apimachinery::pkg::apis::meta::v1::Patch::Merge(_) => "application/merge-patch+json", crate::apimachinery::pkg::apis::meta::v1::Patch::StrategicMerge(_) => "application/strategic-merge-patch+json", })); match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } // Generated from operation readStorageV1beta1CSINode impl CSINode { /// read the specified CSINode /// /// Use the returned [`crate::ResponseBody`]`<`[`ReadCSINodeResponse`]`>` constructor, or [`ReadCSINodeResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `name` /// /// name of the CSINode /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn read_csi_node( name: &str, optional: ReadCSINodeOptional<'_>, ) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<ReadCSINodeResponse>), crate::RequestError> { let ReadCSINodeOptional { exact, export, pretty, } = optional; let __url = format!("/apis/storage.k8s.io/v1beta1/csinodes/{name}?", name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET), ); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); if let Some(exact) = exact { __query_pairs.append_pair("exact", &exact.to_string()); } if let Some(export) = export { __query_pairs.append_pair("export", &export.to_string()); } if let Some(pretty) = pretty { __query_pairs.append_pair("pretty", pretty); } let __url = __query_pairs.finish(); let __request = crate::http::Request::get(__url); let __body = vec![]; match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Optional parameters of [`CSINode::read_csi_node`] #[cfg(feature = "api")] #[derive(Clone, Copy, Debug, Default)] pub struct ReadCSINodeOptional<'a> { /// Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18. pub exact: Option<bool>, /// Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18. pub export: Option<bool>, /// If 'true', then the output is pretty printed. pub pretty: Option<&'a str>, } /// Use `<ReadCSINodeResponse as Response>::try_from_parts` to parse the HTTP response body of [`CSINode::read_csi_node`] #[cfg(feature = "api")] #[derive(Debug)] pub enum ReadCSINodeResponse { Ok(crate::api::storage::v1beta1::CSINode), Other(Result<Option<crate::serde_json::Value>, crate::serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for ReadCSINodeResponse { fn try_from_parts(status_code: crate::http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { crate::http::StatusCode::OK => { let result = match crate::serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((ReadCSINodeResponse::Ok(result), buf.len())) }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match crate::serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((ReadCSINodeResponse::Other(result), read)) }, } } } // Generated from operation replaceStorageV1beta1CSINode impl CSINode { /// replace the specified CSINode /// /// Use the returned [`crate::ResponseBody`]`<`[`crate::ReplaceResponse`]`<Self>>` constructor, or [`crate::ReplaceResponse`]`<Self>` directly, to parse the HTTP response. /// /// # Arguments /// /// * `name` /// /// name of the CSINode /// /// * `body` /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn replace_csi_node( name: &str, body: &crate::api::storage::v1beta1::CSINode, optional: crate::ReplaceOptional<'_>, ) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::ReplaceResponse<Self>>), crate::RequestError> { let __url = format!("/apis/storage.k8s.io/v1beta1/csinodes/{name}?", name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET), ); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); optional.__serialize(&mut __query_pairs); let __url = __query_pairs.finish(); let __request = crate::http::Request::put(__url); let __body = crate::serde_json::to_vec(body).map_err(crate::RequestError::Json)?; let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static("application/json")); match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } // Generated from operation watchStorageV1beta1CSINode impl CSINode { /// list or watch objects of kind CSINode /// /// This operation only supports watching one item, or a list of items, of this type for changes. /// /// Use the returned [`crate::ResponseBody`]`<`[`crate::WatchResponse`]`<Self>>` constructor, or [`crate::WatchResponse`]`<Self>` directly, to parse the HTTP response. /// /// # Arguments /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn watch_csi_node( optional: crate::WatchOptional<'_>, ) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::WatchResponse<Self>>), crate::RequestError> { let __url = "/apis/storage.k8s.io/v1beta1/csinodes?".to_owned(); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); optional.__serialize(&mut __query_pairs); let __url = __query_pairs.finish(); let __request = crate::http::Request::get(__url); let __body = vec![]; match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } // End storage.k8s.io/v1beta1/CSINode impl crate::Resource for CSINode { const API_VERSION: &'static str = "storage.k8s.io/v1beta1"; const GROUP: &'static str = "storage.k8s.io"; const KIND: &'static str = "CSINode"; const VERSION: &'static str = "v1beta1"; } impl crate::ListableResource for CSINode { const LIST_KIND: &'static str = concat!("CSINode", "List"); } impl crate::Metadata for CSINode { type Ty = crate::apimachinery::pkg::apis::meta::v1::ObjectMeta; fn metadata(&self) -> &<Self as crate::Metadata>::Ty { &self.metadata } fn metadata_mut(&mut self) -> &mut<Self as crate::Metadata>::Ty { &mut self.metadata } } impl<'de> crate::serde::Deserialize<'de> for CSINode { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> { #[allow(non_camel_case_types)] enum Field { Key_api_version, Key_kind, Key_metadata, Key_spec, Other, } impl<'de> crate::serde::Deserialize<'de> for Field { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> { struct Visitor; impl<'de> crate::serde::de::Visitor<'de> for Visitor { type Value = Field; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("field identifier") } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: crate::serde::de::Error { Ok(match v { "apiVersion" => Field::Key_api_version, "kind" => Field::Key_kind, "metadata" => Field::Key_metadata, "spec" => Field::Key_spec, _ => Field::Other, }) } } deserializer.deserialize_identifier(Visitor) } } struct Visitor; impl<'de> crate::serde::de::Visitor<'de> for Visitor { type Value = CSINode; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str(<Self::Value as crate::Resource>::KIND) } fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: crate::serde::de::MapAccess<'de> { let mut value_metadata: Option<crate::apimachinery::pkg::apis::meta::v1::ObjectMeta> = None; let mut value_spec: Option<crate::api::storage::v1beta1::CSINodeSpec> = None; while let Some(key) = crate::serde::de::MapAccess::next_key::<Field>(&mut map)? { match key { Field::Key_api_version => { let value_api_version: String = crate::serde::de::MapAccess::next_value(&mut map)?; if value_api_version != <Self::Value as crate::Resource>::API_VERSION { return Err(crate::serde::de::Error::invalid_value(crate::serde::de::Unexpected::Str(&value_api_version), &<Self::Value as crate::Resource>::API_VERSION)); } }, Field::Key_kind => { let value_kind: String = crate::serde::de::MapAccess::next_value(&mut map)?; if value_kind != <Self::Value as crate::Resource>::KIND { return Err(crate::serde::de::Error::invalid_value(crate::serde::de::Unexpected::Str(&value_kind), &<Self::Value as crate::Resource>::KIND)); } }, Field::Key_metadata => value_metadata = Some(crate::serde::de::MapAccess::next_value(&mut map)?), Field::Key_spec => value_spec = Some(crate::serde::de::MapAccess::next_value(&mut map)?), Field::Other => { let _: crate::serde::de::IgnoredAny = crate::serde::de::MapAccess::next_value(&mut map)?; }, } } Ok(CSINode { metadata: value_metadata.ok_or_else(|| crate::serde::de::Error::missing_field("metadata"))?, spec: value_spec.ok_or_else(|| crate::serde::de::Error::missing_field("spec"))?, }) } } deserializer.deserialize_struct( <Self as crate::Resource>::KIND, &[ "apiVersion", "kind", "metadata", "spec", ], Visitor, ) } } impl crate::serde::Serialize for CSINode { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: crate::serde::Serializer { let mut state = serializer.serialize_struct( <Self as crate::Resource>::KIND, 4, )?; crate::serde::ser::SerializeStruct::serialize_field(&mut state, "apiVersion", <Self as crate::Resource>::API_VERSION)?; crate::serde::ser::SerializeStruct::serialize_field(&mut state, "kind", <Self as crate::Resource>::KIND)?; crate::serde::ser::SerializeStruct::serialize_field(&mut state, "metadata", &self.metadata)?; crate::serde::ser::SerializeStruct::serialize_field(&mut state, "spec", &self.spec)?; crate::serde::ser::SerializeStruct::end(state) } }
43.662651
718
0.592715
d95a0c23d59a7110327f94e059445c55305b04d3
5,966
use std::fmt::{Debug, Formatter}; use bitflags::_core::cmp::Ordering; use crate::codec::{Decode, Encode, ReadExt, WriteExt}; use crate::stream::Stream; use crate::{BuildJobError, Data}; /// `Merge` is a [`Binary`] operator that merge the data of the two input streams into one /// output stream. /// /// [`Binary`]: crate::api::binary::Binary pub trait Merge<D: Data> { /// Merge two input streams of the **same** data type. /// /// # Example /// ``` /// # use pegasus::{JobConf}; /// # use pegasus::api::{Sink, Map, Merge, Collect}; /// /// # let conf = JobConf::new("reduce_example"); /// let mut results = pegasus::run(conf, || { /// let id = pegasus::get_current_worker().index; /// move |input, output| { /// let src1 = input.input_from(vec![1, 3, 5, 7, 9])?; /// let (src1, src2) = src1.copied()?; /// src1 /// .merge(src2.map(|d| Ok(d + 1))?)? /// .collect::<Vec<u32>>()? /// .sink_into(output) /// } /// }) /// .expect("build job failure"); /// /// let mut expected = results.next().unwrap().unwrap(); /// expected.sort(); /// assert_eq!(expected, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); /// ``` fn merge(self, other: Stream<D>) -> Result<Stream<D>, BuildJobError>; /// Merge two input streams of different data types `D` and `T` into one output stream. /// The output stream carries the data of type [`Either<D, T>`]. If it takes a data /// from the `D` side, it outputs a data of type `Either::A<D>`, otherwise, it outputs /// a data of type `Either::B<T>`. /// /// [`Either<D, T>`]: crate::api::merge::Either /// /// # Example /// ``` /// /* /// # use pegasus::{JobConf}; /// # use pegasus::api::{Sink, Map, Merge, Either}; /// /// # let mut conf = JobConf::new("reduce_example"); /// # conf.plan_print = false; /// let mut results = pegasus::run(conf, || { /// let id = pegasus::get_current_worker().index; /// move |input, output| { /// let src1 = input.input_from(vec![1_u32, 3])?; /// let (src1, src2) = src1.copied()?; /// src1 /// .merge_isomer(src2.map(|d| Ok(d as u64 - 1))?)? /// .sink_into(output) /// } /// }) /// .expect("build job failure") /// .map(|x| x.unwrap_or(Either::A(0_u32))) /// .collect::<Vec<Either<u32, u64>>>(); /// /// results.sort(); /// assert_eq!(results, [Either::A(1_u32), Either::A(3_u32), Either::B(0_u64), Either::B(2_u64)]); /// */ /// ``` fn merge_isomer<T: Data>(self, isomer: Stream<T>) -> Result<Stream<Either<D, T>>, BuildJobError>; } pub enum Either<A, B> { A(A), B(B), } impl<A: PartialEq, B: PartialEq> PartialEq for Either<A, B> { fn eq(&self, other: &Self) -> bool { match self { Either::A(a1) => { if let Either::A(a2) = other { a1 == a2 } else { false } } Either::B(b1) => { if let Either::B(b2) = other { b1 == b2 } else { false } } } } } impl<A: PartialEq + Eq, B: PartialEq + Eq> Eq for Either<A, B> {} impl<A: PartialOrd, B: PartialOrd> PartialOrd for Either<A, B> { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { match self { Either::A(a1) => { if let Either::A(a2) = other { a1.partial_cmp(a2) } else { None } } Either::B(b1) => { if let Either::B(b2) = other { b1.partial_cmp(b2) } else { None } } } } } impl<A: PartialOrd + Ord, B: PartialOrd + Ord> Ord for Either<A, B> { fn cmp(&self, other: &Self) -> Ordering { match self { Either::A(a1) => { if let Either::A(a2) = other { a1.cmp(a2) } else { // A is by default less than B Ordering::Less } } Either::B(b1) => { if let Either::B(b2) = other { b1.cmp(b2) } else { Ordering::Greater } } } } } impl<A: Clone, B: Clone> Clone for Either<A, B> { fn clone(&self) -> Self { match self { Either::A(a) => Either::A(a.clone()), Either::B(b) => Either::B(b.clone()), } } } impl<A: Debug, B: Debug> Debug for Either<A, B> { fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { match self { Either::A(a) => write!(f, "A({:?})", a), Either::B(b) => write!(f, "B({:?})", b), } } } impl<A: Encode, B: Encode> Encode for Either<A, B> { fn write_to<W: WriteExt>(&self, writer: &mut W) -> std::io::Result<()> { match self { Either::A(a) => { writer.write_u8(0)?; a.write_to(writer) } Either::B(b) => { writer.write_u8(1)?; b.write_to(writer) } } } } impl<A: Decode, B: Decode> Decode for Either<A, B> { fn read_from<R: ReadExt>(reader: &mut R) -> std::io::Result<Self> { let m = reader.read_u8()?; let e = if m == 0 { let a = A::read_from(reader)?; Either::A(a) } else { let b = B::read_from(reader)?; Either::B(b) }; Ok(e) } }
30.752577
106
0.433121
3a46cd65c08727ecb27ecf9db8bd344e20d906eb
16,554
#[cfg(test)] #[path = "../../../tests/unit/format/solution/writer_test.rs"] mod writer_test; use crate::format::coord_index::CoordIndex; use crate::format::solution::model::Timing; use crate::format::solution::*; use crate::format::*; use crate::format_time; use std::io::{BufWriter, Write}; use vrp_core::construction::constraints::route_intervals; use vrp_core::models::common::*; use vrp_core::models::problem::Multi; use vrp_core::models::solution::{Activity, Route}; use vrp_core::models::{Problem, Solution}; use vrp_core::solver::Metrics; type ApiActivity = crate::format::solution::model::Activity; type ApiSolution = crate::format::solution::model::Solution; type ApiSchedule = crate::format::solution::model::Schedule; type ApiMetrics = crate::format::solution::model::Metrics; type ApiGeneration = crate::format::solution::model::Generation; type AppPopulation = crate::format::solution::model::Population; type ApiIndividual = crate::format::solution::model::Individual; type DomainSchedule = vrp_core::models::common::Schedule; type DomainLocation = vrp_core::models::common::Location; type DomainExtras = vrp_core::models::Extras; /// A trait to serialize solution in pragmatic format. pub trait PragmaticSolution<W: Write> { /// Serializes solution in pragmatic json format. fn write_pragmatic_json(&self, problem: &Problem, writer: BufWriter<W>) -> Result<(), String>; /// Serializes solution in pragmatic geo json format. fn write_geo_json(&self, problem: &Problem, writer: BufWriter<W>) -> Result<(), String>; } impl<W: Write> PragmaticSolution<W> for Solution { fn write_pragmatic_json(&self, problem: &Problem, writer: BufWriter<W>) -> Result<(), String> { let solution = create_solution(problem, &self, None); serialize_solution(writer, &solution).map_err(|err| err.to_string())?; Ok(()) } fn write_geo_json(&self, problem: &Problem, writer: BufWriter<W>) -> Result<(), String> { let solution = create_solution(problem, &self, None); serialize_solution_as_geojson(writer, problem, &solution).map_err(|err| err.to_string())?; Ok(()) } } impl<W: Write> PragmaticSolution<W> for (Solution, Metrics) { fn write_pragmatic_json(&self, problem: &Problem, writer: BufWriter<W>) -> Result<(), String> { let solution = create_solution(problem, &self.0, Some(&self.1)); serialize_solution(writer, &solution).map_err(|err| err.to_string())?; Ok(()) } fn write_geo_json(&self, problem: &Problem, writer: BufWriter<W>) -> Result<(), String> { self.0.write_geo_json(problem, writer) } } struct Leg { pub last_detail: Option<(DomainLocation, Timestamp)>, pub load: Option<MultiDimLoad>, pub statistic: Statistic, } impl Leg { fn new(last_detail: Option<(DomainLocation, Timestamp)>, load: Option<MultiDimLoad>, statistic: Statistic) -> Self { Self { last_detail, load, statistic } } fn empty() -> Self { Self { last_detail: None, load: None, statistic: Statistic::default() } } } /// Creates solution. pub fn create_solution(problem: &Problem, solution: &Solution, metrics: Option<&Metrics>) -> ApiSolution { let coord_index = get_coord_index(problem); let tours = solution.routes.iter().map(|r| create_tour(problem, r, coord_index)).collect::<Vec<Tour>>(); let statistic = tours.iter().fold(Statistic::default(), |acc, tour| acc + tour.statistic.clone()); let unassigned = create_unassigned(solution); let violations = create_violations(solution); let extras = create_extras(solution, metrics); ApiSolution { statistic, tours, unassigned, violations, extras } } fn create_tour(problem: &Problem, route: &Route, coord_index: &CoordIndex) -> Tour { let is_multi_dimen = has_multi_dimensional_capacity(problem.extras.as_ref()); let actor = route.actor.as_ref(); let vehicle = actor.vehicle.as_ref(); let mut tour = Tour { vehicle_id: vehicle.dimens.get_id().unwrap().clone(), type_id: vehicle.dimens.get_value::<String>("type_id").unwrap().to_string(), shift_index: *vehicle.dimens.get_value::<usize>("shift_index").unwrap(), stops: vec![], statistic: Statistic::default(), }; let intervals = route_intervals(route, Box::new(|a| get_activity_type(a).map_or(false, |t| t == "reload"))); let mut leg = intervals.into_iter().fold(Leg::empty(), |leg, (start_idx, end_idx)| { let (start_delivery, end_pickup) = route.tour.activities_slice(start_idx, end_idx).iter().fold( (leg.load.unwrap_or_else(MultiDimLoad::default), MultiDimLoad::default()), |acc, activity| { let (delivery, pickup) = activity .job .as_ref() .and_then(|job| get_capacity(&job.dimens, is_multi_dimen).map(|d| (d.delivery.0, d.pickup.0))) .unwrap_or((MultiDimLoad::default(), MultiDimLoad::default())); (acc.0 + delivery, acc.1 + pickup) }, ); let (start_idx, start) = if start_idx == 0 { let start = route.tour.start().unwrap(); let (has_dispatch, is_same_location) = route.tour.get(1).map_or((false, false), |activity| { let has_dispatch = activity .retrieve_job() .and_then(|job| job.dimens().get_value::<String>("type").cloned()) .map_or(false, |job_type| job_type == "dispatch"); let is_same_location = start.place.location == activity.place.location; (has_dispatch, is_same_location) }); tour.stops.push(Stop { location: coord_index.get_by_idx(start.place.location).unwrap(), time: format_schedule(&start.schedule), load: if has_dispatch { vec![0] } else { start_delivery.as_vec() }, distance: 0, activities: vec![ApiActivity { job_id: "departure".to_string(), activity_type: "departure".to_string(), location: None, time: if is_same_location { Some(Interval { start: format_time(start.schedule.arrival), end: format_time(start.schedule.departure), }) } else { None }, job_tag: None, }], }); (start_idx + 1, start) } else { (start_idx, route.tour.get(start_idx - 1).unwrap()) }; let mut leg = route.tour.activities_slice(start_idx, end_idx).iter().fold( Leg::new(Some((start.place.location, start.schedule.departure)), Some(start_delivery), leg.statistic), |leg, act| { let activity_type = get_activity_type(act).cloned(); let (prev_location, prev_departure) = leg.last_detail.unwrap(); let prev_load = if activity_type.is_some() { leg.load.unwrap() } else { // NOTE arrival must have zero load let dimen_size = leg.load.unwrap().size; MultiDimLoad::new(vec![0; dimen_size]) }; let activity_type = activity_type.unwrap_or_else(|| "arrival".to_string()); let is_break = activity_type == "break"; let job_tag = act.job.as_ref().and_then(|job| job.dimens.get_value::<String>("tag").cloned()); let job_id = match activity_type.as_str() { "pickup" | "delivery" | "replacement" | "service" => { let single = act.job.as_ref().unwrap(); let id = single.dimens.get_id().cloned(); id.unwrap_or_else(|| Multi::roots(&single).unwrap().dimens.get_id().unwrap().clone()) } _ => activity_type.clone(), }; let driving = problem.transport.duration(&vehicle.profile, prev_location, act.place.location, prev_departure); let arrival = prev_departure + driving; let start = act.schedule.arrival.max(act.place.time.start); let waiting = start - act.schedule.arrival; let serving = act.place.duration; let departure = start + serving; // total cost and distance let cost = leg.statistic.cost + problem.activity.cost(actor, act, act.schedule.arrival) + problem.transport.cost(actor, prev_location, act.place.location, prev_departure); let distance = leg.statistic.distance + problem.transport.distance(&vehicle.profile, prev_location, act.place.location, prev_departure) as i64; let is_new_location = prev_location != act.place.location; if is_new_location { tour.stops.push(Stop { location: coord_index.get_by_idx(act.place.location).unwrap(), time: format_as_schedule(&(arrival, departure)), load: prev_load.as_vec(), distance, activities: vec![], }); } let load = calculate_load(prev_load, act, is_multi_dimen); let last = tour.stops.len() - 1; let mut last = tour.stops.get_mut(last).unwrap(); last.time.departure = format_time(departure); last.load = load.as_vec(); last.activities.push(ApiActivity { job_id, activity_type: activity_type.clone(), location: if !is_new_location && activity_type == "dispatch" { None } else { Some(coord_index.get_by_idx(act.place.location).unwrap()) }, time: Some(Interval { start: format_time(arrival), end: format_time(departure) }), job_tag, }); Leg { last_detail: Some((act.place.location, act.schedule.departure)), statistic: Statistic { cost, distance, duration: leg.statistic.duration + departure as i64 - prev_departure as i64, times: Timing { driving: leg.statistic.times.driving + driving as i64, serving: leg.statistic.times.serving + (if is_break { 0 } else { serving as i64 }), waiting: leg.statistic.times.waiting + waiting as i64, break_time: leg.statistic.times.break_time + (if is_break { serving as i64 } else { 0 }), }, }, load: Some(load), } }, ); leg.load = Some(leg.load.unwrap() - end_pickup); leg }); // NOTE remove redundant info tour.stops .iter_mut() .filter(|stop| stop.activities.len() == 1) .flat_map(|stop| stop.activities.iter_mut()) .for_each(|activity| { activity.location = None; activity.time = None; }); leg.statistic.cost += vehicle.costs.fixed; tour.vehicle_id = vehicle.dimens.get_id().unwrap().clone(); tour.type_id = vehicle.dimens.get_value::<String>("type_id").unwrap().clone(); tour.statistic = leg.statistic; tour } fn format_schedule(schedule: &DomainSchedule) -> ApiSchedule { ApiSchedule { arrival: format_time(schedule.arrival), departure: format_time(schedule.departure) } } fn format_as_schedule(schedule: &(f64, f64)) -> ApiSchedule { format_schedule(&DomainSchedule::new(schedule.0, schedule.1)) } fn calculate_load(current: MultiDimLoad, act: &Activity, is_multi_dimen: bool) -> MultiDimLoad { let job = act.job.as_ref(); let demand = job.and_then(|job| get_capacity(&job.dimens, is_multi_dimen)).unwrap_or_default(); current - demand.delivery.0 - demand.delivery.1 + demand.pickup.0 + demand.pickup.1 } fn create_unassigned(solution: &Solution) -> Option<Vec<UnassignedJob>> { let unassigned = solution .unassigned .iter() .filter(|(job, _)| job.dimens().get_value::<String>("vehicle_id").is_none()) .map(|(job, code)| { let (code, reason) = map_code_reason(*code); UnassignedJob { job_id: job.dimens().get_id().expect("job id expected").clone(), reasons: vec![UnassignedJobReason { code: code.to_string(), description: reason.to_string() }], } }) .collect::<Vec<_>>(); if unassigned.is_empty() { None } else { Some(unassigned) } } fn create_violations(solution: &Solution) -> Option<Vec<Violation>> { // NOTE at the moment only break violation is mapped let violations = solution .unassigned .iter() .filter(|(job, _)| job.dimens().get_value::<String>("type").map_or(false, |t| t == "break")) .map(|(job, _)| Violation::Break { vehicle_id: job.dimens().get_value::<String>("vehicle_id").expect("vehicle id").clone(), shift_index: *job.dimens().get_value::<usize>("shift_index").expect("shift index"), }) .collect::<Vec<_>>(); if violations.is_empty() { None } else { Some(violations) } } fn get_activity_type(activity: &Activity) -> Option<&String> { activity.job.as_ref().and_then(|single| single.dimens.get_value::<String>("type")) } fn get_capacity(dimens: &Dimensions, is_multi_dimen: bool) -> Option<Demand<MultiDimLoad>> { if is_multi_dimen { dimens.get_demand().cloned() } else { let create_capacity = |capacity: SingleDimLoad| { if capacity.value == 0 { MultiDimLoad::default() } else { MultiDimLoad::new(vec![capacity.value]) } }; dimens.get_demand().map(|demand: &Demand<SingleDimLoad>| Demand { pickup: (create_capacity(demand.pickup.0), create_capacity(demand.pickup.1)), delivery: (create_capacity(demand.delivery.0), create_capacity(demand.delivery.1)), }) } } fn has_multi_dimensional_capacity(extras: &DomainExtras) -> bool { let capacity_type = extras .get("capacity_type") .and_then(|s| s.downcast_ref::<String>()) .unwrap_or_else(|| panic!("Cannot get capacity type!")); match capacity_type.as_str() { "multi" => true, "single" => false, _ => panic!("Unknown capacity type: '{}'", capacity_type), } } fn create_extras(_solution: &Solution, metrics: Option<&Metrics>) -> Option<Extras> { if let Some(metrics) = metrics { Some(Extras { metrics: Some(ApiMetrics { duration: metrics.duration, generations: metrics.generations, speed: metrics.speed, evolution: metrics .evolution .iter() .map(|g| ApiGeneration { number: g.number, timestamp: g.timestamp, i_all_ratio: g.i_all_ratio, i_1000_ratio: g.i_1000_ratio, is_improvement: g.is_improvement, population: AppPopulation { individuals: g .population .individuals .iter() .map(|i| ApiIndividual { tours: i.tours, unassigned: i.unassigned, cost: i.cost, improvement: i.improvement, fitness: i.fitness.clone(), }) .collect(), }, }) .collect(), }), }) } else { None } }
40.975248
120
0.557448
4a0c612020194e57e2a4925cdf4940980efbbccb
502
// ignore-tidy-linelength // aux-build:two_macros.rs // compile-flags:--extern non_existent mod n { extern crate two_macros; } mod m { fn check() { two_macros::m!(); //~ ERROR failed to resolve: use of undeclared crate or module `two_macros` } } macro_rules! define_std_as_non_existent { () => { extern crate std as non_existent; //~^ ERROR `extern crate` items cannot shadow names passed with `--extern` } } define_std_as_non_existent!(); fn main() {}
20.08
101
0.649402
711550c27d16ff916350a2399f776fb2872d6f0b
10,952
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use rustc::dep_graph::{DepGraph, DepKind}; use rustc::hir::def_id::DefId; use rustc::hir::svh::Svh; use rustc::ich::Fingerprint; use rustc::middle::cstore::EncodedMetadataHashes; use rustc::session::Session; use rustc::ty::TyCtxt; use rustc::util::common::time; use rustc::util::nodemap::DefIdMap; use rustc_data_structures::fx::FxHashMap; use rustc_serialize::Encodable as RustcEncodable; use rustc_serialize::opaque::Encoder; use std::io::{self, Cursor, Write}; use std::fs::{self, File}; use std::path::PathBuf; use super::data::*; use super::fs::*; use super::dirty_clean; use super::file_format; use super::work_product; use super::load::load_prev_metadata_hashes; pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, metadata_hashes: &EncodedMetadataHashes, svh: Svh) { debug!("save_dep_graph()"); let _ignore = tcx.dep_graph.in_ignore(); let sess = tcx.sess; if sess.opts.incremental.is_none() { return; } // We load the previous metadata hashes now before overwriting the file // (if we need them for testing). let prev_metadata_hashes = if tcx.sess.opts.debugging_opts.query_dep_graph { load_prev_metadata_hashes(tcx) } else { DefIdMap() }; let mut current_metadata_hashes = FxHashMap(); if sess.opts.debugging_opts.incremental_cc || sess.opts.debugging_opts.query_dep_graph { save_in(sess, metadata_hash_export_path(sess), |e| encode_metadata_hashes(tcx, svh, metadata_hashes, &mut current_metadata_hashes, e)); } time(sess.time_passes(), "persist query result cache", || { save_in(sess, query_cache_path(sess), |e| encode_query_cache(tcx, e)); }); time(sess.time_passes(), "persist dep-graph", || { save_in(sess, dep_graph_path(sess), |e| encode_dep_graph(tcx, e)); }); dirty_clean::check_dirty_clean_annotations(tcx); dirty_clean::check_dirty_clean_metadata(tcx, &prev_metadata_hashes, &current_metadata_hashes); } pub fn save_work_products(sess: &Session, dep_graph: &DepGraph) { if sess.opts.incremental.is_none() { return; } debug!("save_work_products()"); let _ignore = dep_graph.in_ignore(); let path = work_products_path(sess); save_in(sess, path, |e| encode_work_products(dep_graph, e)); // We also need to clean out old work-products, as not all of them are // deleted during invalidation. Some object files don't change their // content, they are just not needed anymore. let new_work_products = dep_graph.work_products(); let previous_work_products = dep_graph.previous_work_products(); for (id, wp) in previous_work_products.iter() { if !new_work_products.contains_key(id) { work_product::delete_workproduct_files(sess, wp); debug_assert!(wp.saved_files.iter().all(|&(_, ref file_name)| { !in_incr_comp_dir_sess(sess, file_name).exists() })); } } // Check that we did not delete one of the current work-products: debug_assert!({ new_work_products.iter() .flat_map(|(_, wp)| wp.saved_files .iter() .map(|&(_, ref name)| name)) .map(|name| in_incr_comp_dir_sess(sess, name)) .all(|path| path.exists()) }); } fn save_in<F>(sess: &Session, path_buf: PathBuf, encode: F) where F: FnOnce(&mut Encoder) -> io::Result<()> { debug!("save: storing data in {}", path_buf.display()); // delete the old dep-graph, if any // Note: It's important that we actually delete the old file and not just // truncate and overwrite it, since it might be a shared hard-link, the // underlying data of which we don't want to modify if path_buf.exists() { match fs::remove_file(&path_buf) { Ok(()) => { debug!("save: remove old file"); } Err(err) => { sess.err(&format!("unable to delete old dep-graph at `{}`: {}", path_buf.display(), err)); return; } } } // generate the data in a memory buffer let mut wr = Cursor::new(Vec::new()); file_format::write_file_header(&mut wr).unwrap(); match encode(&mut Encoder::new(&mut wr)) { Ok(()) => {} Err(err) => { sess.err(&format!("could not encode dep-graph to `{}`: {}", path_buf.display(), err)); return; } } // write the data out let data = wr.into_inner(); match File::create(&path_buf).and_then(|mut file| file.write_all(&data)) { Ok(_) => { debug!("save: data written to disk successfully"); } Err(err) => { sess.err(&format!("failed to write dep-graph to `{}`: {}", path_buf.display(), err)); return; } } } fn encode_dep_graph(tcx: TyCtxt, encoder: &mut Encoder) -> io::Result<()> { // First encode the commandline arguments hash tcx.sess.opts.dep_tracking_hash().encode(encoder)?; // Encode the graph data. let serialized_graph = tcx.dep_graph.serialize(); if tcx.sess.opts.debugging_opts.incremental_info { #[derive(Clone)] struct Stat { kind: DepKind, node_counter: u64, edge_counter: u64, } let total_node_count = serialized_graph.nodes.len(); let total_edge_count = serialized_graph.edge_list_data.len(); let mut counts: FxHashMap<_, Stat> = FxHashMap(); for (i, &(node, _)) in serialized_graph.nodes.iter_enumerated() { let stat = counts.entry(node.kind).or_insert(Stat { kind: node.kind, node_counter: 0, edge_counter: 0, }); stat.node_counter += 1; let (edge_start, edge_end) = serialized_graph.edge_list_indices[i]; stat.edge_counter += (edge_end - edge_start) as u64; } let mut counts: Vec<_> = counts.values().cloned().collect(); counts.sort_by_key(|s| -(s.node_counter as i64)); let percentage_of_all_nodes: Vec<f64> = counts.iter().map(|s| { (100.0 * (s.node_counter as f64)) / (total_node_count as f64) }).collect(); let average_edges_per_kind: Vec<f64> = counts.iter().map(|s| { (s.edge_counter as f64) / (s.node_counter as f64) }).collect(); println!("[incremental]"); println!("[incremental] DepGraph Statistics"); const SEPARATOR: &str = "[incremental] --------------------------------\ ----------------------------------------------\ ------------"; println!("{}", SEPARATOR); println!("[incremental]"); println!("[incremental] Total Node Count: {}", total_node_count); println!("[incremental] Total Edge Count: {}", total_edge_count); println!("[incremental]"); println!("[incremental] {:<36}| {:<17}| {:<12}| {:<17}|", "Node Kind", "Node Frequency", "Node Count", "Avg. Edge Count"); println!("[incremental] -------------------------------------\ |------------------\ |-------------\ |------------------|"); for (i, stat) in counts.iter().enumerate() { println!("[incremental] {:<36}|{:>16.1}% |{:>12} |{:>17.1} |", format!("{:?}", stat.kind), percentage_of_all_nodes[i], stat.node_counter, average_edges_per_kind[i]); } println!("{}", SEPARATOR); println!("[incremental]"); } serialized_graph.encode(encoder)?; Ok(()) } fn encode_metadata_hashes(tcx: TyCtxt, svh: Svh, metadata_hashes: &EncodedMetadataHashes, current_metadata_hashes: &mut FxHashMap<DefId, Fingerprint>, encoder: &mut Encoder) -> io::Result<()> { assert_eq!(metadata_hashes.hashes.len(), metadata_hashes.hashes.iter().map(|x| (x.def_index, ())).collect::<FxHashMap<_,_>>().len()); let mut serialized_hashes = SerializedMetadataHashes { entry_hashes: metadata_hashes.hashes.to_vec(), index_map: FxHashMap() }; if tcx.sess.opts.debugging_opts.query_dep_graph { for serialized_hash in &serialized_hashes.entry_hashes { let def_id = DefId::local(serialized_hash.def_index); // Store entry in the index_map let def_path_hash = tcx.def_path_hash(def_id); serialized_hashes.index_map.insert(def_id.index, def_path_hash); // Record hash in current_metadata_hashes current_metadata_hashes.insert(def_id, serialized_hash.hash); } debug!("save: stored index_map (len={}) for serialized hashes", serialized_hashes.index_map.len()); } // Encode everything. svh.encode(encoder)?; serialized_hashes.encode(encoder)?; Ok(()) } fn encode_work_products(dep_graph: &DepGraph, encoder: &mut Encoder) -> io::Result<()> { let work_products: Vec<_> = dep_graph .work_products() .iter() .map(|(id, work_product)| { SerializedWorkProduct { id: id.clone(), work_product: work_product.clone(), } }) .collect(); work_products.encode(encoder) } fn encode_query_cache(tcx: TyCtxt, encoder: &mut Encoder) -> io::Result<()> { tcx.serialize_query_result_cache(encoder) }
34.990415
100
0.543188
1d56ecee868a1576a0605a03a6c169ff51e81d1e
126
// run-rustfix fn func() -> u8 { 0 } fn main() { match () { () => func() //~ ERROR mismatched types } }
10.5
47
0.428571
fe0215b4528fcf5bbf084970c539d1e035748d4b
347
use crate::neat::unit::resolve_unit; use crate::neat::Namespace; use cjc_parser::parser::parse_program; pub fn program(input: &str, _filename: &str, namespace: &mut Namespace) { let parse_ast = parse_program(input); match parse_ast { Ok(unit) => { resolve_unit(unit, namespace); } Err(_) => {} } }
24.785714
73
0.613833
f940880b0973a90d2c51c7770b87f29e80f939a0
2,525
#[doc = "Reader of register TMR"] pub type R = crate::R<u32, super::TMR>; #[doc = "Writer for register TMR"] pub type W = crate::W<u32, super::TMR>; #[doc = "Register TMR `reset()`'s with value 0"] impl crate::ResetValue for super::TMR { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Transmit Word Mask\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u32)] pub enum TWM_A { #[doc = "0: Word N is enabled."] _0 = 0, #[doc = "1: Word N is masked. The transmit data pins are tri-stated when masked."] _1 = 1, } impl From<TWM_A> for u32 { #[inline(always)] fn from(variant: TWM_A) -> Self { variant as _ } } #[doc = "Reader of field `TWM`"] pub type TWM_R = crate::R<u32, TWM_A>; impl TWM_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> crate::Variant<u32, TWM_A> { use crate::Variant::*; match self.bits { 0 => Val(TWM_A::_0), 1 => Val(TWM_A::_1), i => Res(i), } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == TWM_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == TWM_A::_1 } } #[doc = "Write proxy for field `TWM`"] pub struct TWM_W<'a> { w: &'a mut W, } impl<'a> TWM_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: TWM_A) -> &'a mut W { unsafe { self.bits(variant.into()) } } #[doc = "Word N is enabled."] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(TWM_A::_0) } #[doc = "Word N is masked. The transmit data pins are tri-stated when masked."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(TWM_A::_1) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u32) -> &'a mut W { self.w.bits = (self.w.bits & !0xffff_ffff) | ((value as u32) & 0xffff_ffff); self.w } } impl R { #[doc = "Bits 0:31 - Transmit Word Mask"] #[inline(always)] pub fn twm(&self) -> TWM_R { TWM_R::new((self.bits & 0xffff_ffff) as u32) } } impl W { #[doc = "Bits 0:31 - Transmit Word Mask"] #[inline(always)] pub fn twm(&mut self) -> TWM_W { TWM_W { w: self } } }
27.150538
86
0.542574
793bb919a72973ef6fc9f0d626aa65173da3da43
3,027
use aorist_primitives::AVec; use crate::code::Preamble; #[cfg(feature = "python")] use crate::python::PythonPreamble; use crate::r::r_import::RImport; use extendr_api::prelude::*; use std::hash::Hash; #[derive(Clone, PartialEq, Hash, Eq)] pub struct RPreamble { pub libraries: AVec<RImport>, pub body: AString, } impl Preamble for RPreamble { type ImportType = RImport; fn get_imports(&self) -> AVec<Self::ImportType> { self.libraries.clone() } } impl<'a> RPreamble { // Assumes R has already been started pub fn new(body: AString) -> RPreamble { eval_string( r#" to.preamble <- function(body) { x <- as.list(parse(text=body)) is.library <- sapply(x, function(y) { if (class(y) == "call") { return(y[[1]] == "library") } return(FALSE) }) call.idx <- which(is.library) calls <- x[call.idx] not.calls <- x[which(!is.library)] body <- paste(sapply( not.calls, function(x) paste(deparse(x), collapse="\n") ), collapse="\n\n") libraries <- sapply(calls, function(x) x[[2]]) list(body=body, libraries=libraries) } "#, ) .unwrap(); let res = call!("to.preamble", body).unwrap(); let body_no_imports = res.index(1).unwrap(); let libraries = res.index(2).unwrap(); Self { libraries: match libraries.as_string_vector() { Some(v) => v.into_iter().map(|x| RImport::new(x)).collect(), None => AVec::new(), }, body: body_no_imports.as_str().unwrap().to_string(), } } #[cfg(feature = "python")] pub fn from_python(var_name: AString, body: AString) -> RPreamble { let python_preamble = PythonPreamble::new(body); let formatted = python_preamble.to_string().replace("'", "\\'"); Self { libraries: vec![RImport::new("reticulate".into())], body: format!("{} <- '\n{}'", var_name, formatted).to_string(), } } pub fn get_body(&self) -> String { self.body.clone() } } #[allow(unused_imports)] mod r_test_preamble { use crate::r::preamble::RPreamble; use extendr_api::prelude::*; #[test] fn test_basic_preamble() { test! { let body = r#" library('ggplot2') library('igraph') c(1) f <- function(a, b) { a + b } "#; let preamble = RPreamble::new(body.to_string()); assert_eq!(preamble.libraries.get(0).unwrap().library, "ggplot2"); assert_eq!(preamble.libraries.get(1).unwrap().library, "igraph"); assert_eq!(preamble.body, r#"c(1) f <- function(a, b) { a + b }"#); } } }
30.27
78
0.502147
e44b166ae381bff782b92f4a29e9cb5f4e07b496
1,309
use super::app_config::CKBAppConfig; use ckb_chain_spec::consensus::Consensus; use ckb_instrument::Format; use ckb_miner::MinerConfig; use ckb_pow::PowEngine; use std::path::PathBuf; use std::sync::Arc; pub struct ExportArgs { pub config: Box<CKBAppConfig>, pub consensus: Consensus, pub format: Format, pub target: PathBuf, } pub struct ImportArgs { pub config: Box<CKBAppConfig>, pub consensus: Consensus, pub format: Format, pub source: PathBuf, } pub struct RunArgs { pub config: Box<CKBAppConfig>, pub consensus: Consensus, } pub struct ProfArgs { pub config: Box<CKBAppConfig>, pub consensus: Consensus, pub from: u64, pub to: u64, } pub struct MinerArgs { pub config: MinerConfig, pub pow_engine: Arc<dyn PowEngine>, } pub struct StatsArgs { pub config: Box<CKBAppConfig>, pub consensus: Consensus, pub from: Option<u64>, pub to: Option<u64>, } pub struct InitArgs { pub root_dir: PathBuf, pub chain: String, pub rpc_port: String, pub p2p_port: String, pub log_to_file: bool, pub log_to_stdout: bool, pub list_chains: bool, pub force: bool, pub block_assembler_code_hash: Option<String>, pub block_assembler_args: Vec<String>, pub block_assembler_data: Option<String>, }
21.816667
50
0.693659
081f64bed7a60631d783faf59f4703a2bdf451d5
1,851
use std::cell::RefCell; use std::time::{SystemTime, UNIX_EPOCH}; /// Rolling Hash /// - 法: 2^61 - 1 /// - 基数: 実行時ランダム /// # Example /// ``` /// use my_library_rs::*; /// /// // 文字列 /// let s = "abrakadabra"; /// let hash = RollingHash::new(&s.as_bytes()); /// assert_eq!(hash.get(0, 4), hash.get(7, 11)); // abra == abra /// assert_ne!(hash.get(0, 4), hash.get(6, 10)); // abra != dabr /// /// // 数列 /// let a = vec![3,1,4,2,8,5,7,1,4,2,8]; /// let hash = RollingHash::new(&a); /// assert_eq!(hash.get(1, 5), hash.get(7, 11)); // [1,4,2,8] == [1,4,2,8] /// assert_ne!(hash.get(0, 5), hash.get(7, 11)); // [3,1,4,2] != [1,4,2,8] /// ``` pub struct RollingHash { hash: Vec<u64>, pow: Vec<u64>, } impl RollingHash { pub fn new(s: &[u8]) -> Self { let n = s.len(); let (mut hash, mut pow) = (Vec::with_capacity(n + 1), Vec::with_capacity(n + 1)); hash.push(0); pow.push(1); ROLLINGHASH_BASE.with(|b| { let base = *b.borrow(); for i in 0..n { hash.push(modulo(mul(hash[i], base) + s[i] as u64)); pow.push(mul(pow[i], base)); } }); Self { hash, pow } } pub fn get(&self, l: usize, r: usize) -> u64 { modulo(self.hash[r] + MOD - mul(self.hash[l], self.pow[r - l])) } } const MOD: u64 = (1 << 61) - 1; fn mul(x: u64, y: u64) -> u64 { let t = x as u128 * y as u128; let t = (t >> 61) + (t & MOD as u128); modulo(t as u64) } fn modulo(x: u64) -> u64 { assert!(x < 2 * MOD); if x >= MOD { x - MOD } else { x } } thread_local!(static ROLLINGHASH_BASE: RefCell<u64> = { let t = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() .as_nanos(); let t = (t >> 61) + (t & MOD as u128); RefCell::new(modulo(t as u64)) });
26.442857
89
0.487844
016b38826e5324c0578ff090d675ca27633a6758
947
// ignore-fast // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /* Test that exporting a class also exports its public fields and methods */ use kitty::cat; mod kitty { pub struct cat { meows: uint, name: ~str, } impl cat { pub fn get_name(&self) -> ~str { self.name.clone() } } pub fn cat(in_name: ~str) -> cat { cat { name: in_name, meows: 0u } } } pub fn main() { assert_eq!(cat(~"Spreckles").get_name(), ~"Spreckles"); }
24.282051
69
0.630412
21fbbf8cfb5a8d6c8f6654be893d1f9284127bd7
377
// compile-fail pub const unsafe fn fake_type<T>() -> T { hint_unreachable() } pub const unsafe fn hint_unreachable() -> ! { fake_type() //~ ERROR any use of this value will cause an error } trait Const { const CONSTANT: i32 = unsafe { fake_type() }; } impl <T> Const for T {} pub fn main() -> () { dbg!(i32::CONSTANT); //~ ERROR erroneous constant used }
18.85
67
0.628647
11455677759b8c255aa54e70e6eb4ba4117f5662
21,443
use super::*; use rand; use rand::seq::SliceRandom; use rand::Rng; use std::collections::{HashMap, VecDeque}; use std::sync::mpsc; use std::sync::{Arc, Mutex}; use std::thread; use std::time; #[derive(Debug)] pub struct Society { config: Arc<Config>, cities: Vec<Arc<Mutex<City>>>, //city_relations: Vec<Vec<u8>>, deaths: u32, recoveries: u32, infections: u32, new_infections: u32, } #[derive(Debug)] pub enum Event { Death, Recovery, Infection, } enum InternalEvent { Infection(usize), Encounter(usize, usize), } const THREAD_NUMBER: u8 = 7; impl Society { pub fn new(cfg: Config) -> Self { // TODO: Refactor. let begin = time::Instant::now(); let cities = Arc::new(Mutex::new(Vec::new())); let mut handles = Vec::new(); let size = cfg.population.size; let city_no = (size / cfg.population.city_size) as usize; eprintln!("{}", city_no); let (sq, rq) = mpsc::channel(); for i in 0..city_no { sq.send(i).unwrap(); } drop(sq); let receiver = Arc::new(Mutex::new(rq)); let cfg = Arc::new(cfg); for _ in 0..THREAD_NUMBER { let cities = Arc::clone(&cities); let receiver = Arc::clone(&receiver); let cfg = Arc::clone(&cfg); handles.push(thread::spawn(move || loop { let inst = (*receiver).lock().unwrap().recv(); if let Err(_) = inst { return; } let city_mut = Mutex::new(City::new(Arc::clone(&cfg))); (*cities).lock().unwrap().push(city_mut); })); } for h in handles { h.join().unwrap(); } eprintln!("Built all cities in {}s", begin.elapsed().as_secs_f32()); let mut cities_unlocked = Vec::new(); let it = Arc::try_unwrap(cities).unwrap().into_inner().unwrap(); for c in it.into_iter() { cities_unlocked.push(Arc::new(c)); } // TODO: Reintroduce city relations. //let begin2 = time::Instant::now(); //let mut city_relations = Vec::new(); //for _ in 0..city_no { // city_relations.push(vec![0; city_no as usize]); //} //eprintln!( // "Built city relations placeholder in {}s", // begin2.elapsed().as_secs_f32() //); //let begin2 = time::Instant::now(); //let mut rng = rand::thread_rng(); //for i in 1..city_no { // for j in 0..i { // let r: u8 = rng.gen(); // city_relations[i][j] = r; // city_relations[j][i] = r; // } //} //eprintln!( // "Built city relations in {}s", // begin2.elapsed().as_secs_f32() //); eprintln!("Built society in {}s", begin.elapsed().as_secs_f32()); Society { new_infections: 0, config: cfg, cities: cities_unlocked, //city_relations: city_relations, deaths: 0, infections: 0, recoveries: 0, } } pub fn init(&mut self) { (*self.cities)[0].lock().unwrap().infect(0); self.infections += 1; } pub fn next_day(&mut self) { let before = self.infections; self.calculate_internal_change(); self.calculate_national_infections(); self.new_infections = self.infections - before; } fn calculate_internal_change(&mut self) { let (sx, rx) = mpsc::channel(); let mut handles = Vec::new(); for c in self.cities.iter() { let c = Arc::clone(c); let sx = sx.clone(); handles.push(thread::spawn(move || { let mut lock = (*c).lock().unwrap(); (*lock).next_day(Some(&sx)); (*lock).calculate_infections(Some(&sx)); (*lock).update_hosptials(Some(&sx)); })); } drop(sx); self.handle_events(rx); } fn calculate_national_infections(&mut self) { let mut per_city = Vec::new(); for c in self.cities.iter() { let city = (*c).lock().unwrap(); per_city.push((*city).get_national_mobile()); } let mut all_con_c = Vec::new(); for (i, ps) in per_city.iter().enumerate() { let mut con_c = 0; let city = self.cities[i].lock().unwrap(); for p in ps { if (*city).people[*p].is_contagious(&self.config) { con_c += 1; } } all_con_c.push(con_c); } let mut rev_con_c = vec![0; all_con_c.len()]; for (i, con_c) in all_con_c.iter().enumerate() { for j in 0..rev_con_c.len() { if j != i { rev_con_c[j] += con_c; } } } let mut rng = rand::thread_rng(); let (sx, rx) = mpsc::channel(); let total = per_city.iter().map(|x| x.len()).sum::<usize>() as f32; for (i, c) in self.cities.iter().enumerate() { let (ev_s, ev_r) = mpsc::channel(); let mut city = (*c).lock().unwrap(); let p = rev_con_c[i] as f32 / (total - per_city[i].len() as f32); for j in 0..per_city[i].len() { if rng.gen::<f32>() < p { ev_s.send(InternalEvent::Infection(per_city[i][j])).unwrap(); } } drop(ev_s); (*city).handle_internal_events(ev_r, Some(&sx)); } drop(sx); self.handle_events(rx); } fn handle_events(&mut self, rx: mpsc::Receiver<Event>) { for e in rx { match e { Event::Infection => self.infections += 1, Event::Death => self.deaths += 1, Event::Recovery => self.recoveries += 1, } } } pub fn active(&self) -> u32 { self.infections - self.deaths - self.recoveries } pub fn to_string(&self) -> String { format!( "{},{},{},{},{},{},{}", self.infections, self.deaths, self.recoveries, self.active(), self.new_infections, self.in_treatment(), self.in_treatmet_queue(), ) } pub fn csv_header(&self) -> String { format!("Infections,Deaths,Recoveries,Active,New Infections,Hospital,Hospital Queue") } fn in_treatment(&self) -> usize { let mut total = 0; for c in self.cities.iter() { let lock = (*c).lock().unwrap(); total += (*lock).hospital.len(); } total } fn in_treatmet_queue(&self) -> usize { let mut total = 0; for c in self.cities.iter() { let lock = (*c).lock().unwrap(); total += (*lock).hospital_queue.len(); } total } } #[derive(Debug)] pub struct City { infections: u32, dead: u32, recovered: u32, active: u32, people: Vec<Person>, people_relations: HashMap<(u32, u32), u8>, //district_relations: Vec<Vec<u8>>, //household_relations: Vec<Vec<u8>>, config: Arc<Config>, hospital: VecDeque<usize>, hospital_queue: VecDeque<usize>, } impl City { pub fn new(cfg: Arc<Config>) -> Self { let begin = time::Instant::now(); let size = cfg.population.city_size; let mut people = Vec::new(); let mut age: i8 = -1; let mut left = 0; for _ in 0..size as i32 { if left == 0 { if age < 9 { age += 1; } left = (cfg.population.age_distribution[age as usize] * size as f32) as i32; } people.push(Person::new(age)); left -= 1; } people.shuffle(&mut rand::thread_rng()); let mut people_relations = HashMap::new(); let h_size = cfg.population.household_size; let h_no = size / h_size; for h in 0..h_no { for i in 1..h_size { for j in 0..i { let r = rand::random(); people_relations.insert((h * h_size + i, h * h_size + j), r); people_relations.insert((h * h_size + j, h * h_size + i), r); } } } // TODO: Reintroduce those relations. //let mut household_relations = Vec::new(); //let h_no = h_no as usize; //for _ in 0..h_no { // household_relations.push(vec![0; h_no]); //} //for i in 1..h_no as usize { // for j in 0..i { // let r = rand::random(); // household_relations[i][j] = r; // household_relations[j][i] = r; // } //} //let mut district_relations = Vec::new(); //let d_size = cfg.population.district_size; //let d_no = (size / d_size) as usize; //for _ in 0..d_no { // district_relations.push(vec![0; d_no]); //} //for i in 1..d_no as usize { // for j in 0..i { // let r = rand::random(); // district_relations[i][j] = r; // district_relations[j][i] = r; // } //} eprintln!("Built city in {}s", begin.elapsed().as_secs_f64()); City { infections: 0, active: 0, dead: 0, recovered: 0, people: people, people_relations: people_relations, //district_relations: district_relations, //household_relations: household_relations, config: cfg, hospital: VecDeque::new(), hospital_queue: VecDeque::new(), } } fn get_national_mobile(&self) -> Vec<usize> { let mut rng = rand::thread_rng(); let mut mobile = Vec::new(); for i in 0..self.people.len() { // We don't allow people to leave the city if they are in quarantine... if self.people[i].in_treatment { continue; } if rng.gen::<f32>() < self.config.population.mean_national_mobility { mobile.push(i); } } mobile } fn infect(&mut self, a: u32) { if a > self.config.population.city_size { panic!("Given index exceeds city size."); } self.people[a as usize].infect(&*self.config, None); } fn handle_internal_events( &mut self, ev_r: mpsc::Receiver<InternalEvent>, sx: Option<&mpsc::Sender<Event>>, ) { for e in ev_r { match e { InternalEvent::Encounter(a, b) => self.handle_encounter(a, b, sx), InternalEvent::Infection(a) => self.handle_infection(a, sx), } } } fn calculate_infections(&mut self, sx: Option<&mpsc::Sender<Event>>) { // We're using such a channel in case we want to make this concurrent later on. let (ev_s, ev_r) = mpsc::channel(); self.calculate_household_infections(&ev_s); self.calculate_district_infections(&ev_s); self.calculate_city_infections(&ev_s); drop(ev_s); self.handle_internal_events(ev_r, sx); } fn handle_infection(&mut self, a: usize, sx: Option<&mpsc::Sender<Event>>) { if self.people[a].infect(&self.config, sx) { //if self.infections == 0 { // eprintln!("First infected in new city"); //} self.infections += 1; self.hospital_queue.push_back(a); } } fn handle_encounter(&mut self, a: usize, b: usize, sx: Option<&mpsc::Sender<Event>>) { if !self.is_relevant_encounter(a, b) { return; } let res; match self.people_relations.get(&(a as u32, b as u32)) { Some(v) => res = *v, None => panic!("Relation between {} and {} does not exist!", a, b), } let r: u8 = rand::thread_rng().gen(); if r < res { // In households we're doing 1:1 infections. // This is unlike the model in the bigger partitions where we're using pools. self.handle_infection(a, sx); self.handle_infection(b, sx); } } fn calculate_household_infections(&mut self, ev_q: &mpsc::Sender<InternalEvent>) { let mut rng = rand::thread_rng(); let h_size = self.config.population.household_size as usize; let h_no = self.config.population.city_size as usize / h_size; for h in 0..h_no { let mut is_mobile = Vec::new(); for i in 0..h_size { let real_mobility = self.config.population.mean_household_mobility * { if self.people[h * h_size + i].in_treatment { 1.0 - self.config.virus.treatment_quarantine_efficiency } else { 1.0 } }; let m = real_mobility > rng.gen(); is_mobile.push(m); if !m { continue; } for j in 0..i { if !is_mobile[j] { continue; } let a = h * h_size + i; let b = h * h_size + j; ev_q.send(InternalEvent::Encounter(a, b)).unwrap(); } } } } fn calculate_district_infections(&mut self, ev_q: &mpsc::Sender<InternalEvent>) { let mut rng = rand::thread_rng(); let d_size = self.config.population.district_size as usize; let d_no = self.config.population.city_size as usize / d_size; for d in 0..d_no { let mut total_mobile = Vec::new(); for i in 0..d_size { let real_mobility = self.config.population.mean_district_mobility * { if self.people[d * d_size + i].in_treatment { 1.0 - self.config.virus.treatment_quarantine_efficiency } else { 1.0 } }; if real_mobility <= rng.gen() { continue; } total_mobile.push(i); } for i in total_mobile.iter() { let h_size = self.config.population.household_size as usize; let h = *i / h_size; let foreign_mobile: Vec<usize> = total_mobile .iter() .map(|x| *x) .filter(|x| h != *x / h_size) .collect(); let total = foreign_mobile.len(); let mut con_c = 0; for x in foreign_mobile { if self.people[x].is_contagious(&self.config) { con_c += 1; } } let p = con_c as f32 / total as f32; if p < rng.gen() { continue; } ev_q.send(InternalEvent::Infection(d * d_size + *i)) .unwrap(); } } } fn calculate_city_infections(&mut self, ev_q: &mpsc::Sender<InternalEvent>) { let mut rng = rand::thread_rng(); let c_size = self.config.population.city_size as usize; let mut total_mobile = Vec::new(); for i in 0..c_size { let real_mobility = self.config.population.mean_city_mobility * { if self.people[i].in_treatment { 1.0 - self.config.virus.treatment_quarantine_efficiency } else { 1.0 } }; if real_mobility <= rng.gen() { continue; } total_mobile.push(i); } for i in total_mobile.iter() { let d_size = self.config.population.district_size as usize; let d = *i / d_size; let foreign_mobile: Vec<usize> = total_mobile .iter() .map(|x| *x) .filter(|x| d != *x / d_size) .collect(); let total = foreign_mobile.len(); let mut con_c = 0; for x in foreign_mobile { if self.people[x].is_contagious(&self.config) { con_c += 1; } } let p = con_c as f32 / total as f32; if p < rng.gen() { continue; } ev_q.send(InternalEvent::Infection(*i)).unwrap(); } } fn update_hosptials(&mut self, _sx: Option<&mpsc::Sender<Event>>) { while let Some(p) = self.hospital.front() { if self.people[*p].recovered || self.people[*p].dead { self.hospital.pop_front(); } else { break; } } while self.hospital.len() < self.config.population.city_medical_capacity { if let Some(p) = self.hospital_queue.pop_front() { self.hospital.push_back(p); self.people[p].hospitalize(&self.config); } else { break; } } } fn is_relevant_encounter(&self, a: usize, b: usize) -> bool { let pa = &self.people[a]; let pb = &self.people[b]; if pb.dead || pa.dead { return false; } if pa.infected == pb.infected { return false; } if pa.is_contagious(&*self.config) == pb.is_contagious(&*self.config) { return false; } return true; } fn next_day(&mut self, sx: Option<&mpsc::Sender<Event>>) { for i in 0..self.people.len() { self.people[i].next_day(&self.config, sx); } } } #[derive(Debug)] pub struct Person { age: i8, infected: bool, infected_for: Option<i32>, recovered: bool, doomed: Option<bool>, severity: u8, dead: bool, in_treatment: bool, in_treatment_for: Option<i32>, } impl Person { pub fn new(mut age: i8) -> Self { // We only consider 90+, not 100+, 110+, etc. if age > 9 { age = 9; } Person { age: age, infected: false, infected_for: None, recovered: false, doomed: None, severity: 0, dead: false, in_treatment: false, in_treatment_for: None, } } fn recover(&mut self, sx: Option<&mpsc::Sender<Event>>) { if let Some(s) = sx { s.send(Event::Recovery).unwrap(); } self.recovered = true; } fn kill(&mut self, sx: Option<&mpsc::Sender<Event>>) { if let Some(s) = sx { s.send(Event::Death).unwrap(); } self.dead = true; } fn infect(&mut self, cfg: &Config, sx: Option<&mpsc::Sender<Event>>) -> bool { if self.infected { return false; } let mut rng = rand::thread_rng(); if rng.gen::<f32>() >= cfg.virus.contagiousness { return false; } if let Some(s) = sx { s.send(Event::Infection).unwrap(); } self.infected = true; self.infected_for = Some(0); self.severity = rng.gen(); true } fn hospitalize(&mut self, _cfg: &Config) { self.in_treatment = true; self.in_treatment_for = Some(0); } fn is_doomed(&mut self, cfg: &Config) -> bool { if let Some(b) = self.doomed { return b; } let t = if let Some(it) = self.in_treatment_for { let inf = self.infected_for.unwrap() as f32; let delay = inf - it as f32; 1.0 - cfg.virus.treatment_efficiency * (1.0 - cfg.virus.treatment_decay.powf(-delay)) } else { 1.0 }; let real_lethality = cfg.virus.lethality[self.age as usize] * t; let b = self.severity as f32 > std::u8::MAX as f32 * (1.0 - real_lethality); self.doomed = Some(b); b } pub fn next_day(&mut self, cfg: &Config, sx: Option<&mpsc::Sender<Event>>) { let inf; match self.infected_for { None => return, Some(t) => inf = t + 1, } self.infected_for = Some(inf); if let Some(t) = self.in_treatment_for { self.in_treatment_for = Some(t + 1); } if self.recovered || self.dead { return; } if inf > cfg.virus.sick_for { if self.is_doomed(cfg) { self.kill(sx); } else { //eprintln!( // "Recovered with severity {} ({}%)", // self.severity, // 100.0 * self.severity as f32 / std::u8::MAX as f32 //); self.recover(sx); } } } pub fn is_contagious(&self, cfg: &Config) -> bool { if let Some(time) = self.infected_for { time < cfg.virus.contagious_for } else { false } } }
32.489394
97
0.481882
09a415b39ba1a03ccd0237836ce6b7462a5f7966
23,384
use log::{debug, info, warn}; use crate::{ AudioBus, AudioBusBuffer, AudioDeviceInfo, AudioServerInfo, BufferSizeRange, Config, DeviceIndex, FatalErrorHandler, FatalStreamError, MidiController, MidiControllerBuffer, MidiDeviceInfo, MidiServerInfo, ProcessInfo, RtProcessHandler, SpawnRtThreadError, StreamInfo, }; pub fn refresh_audio_server(server: &mut AudioServerInfo) { info!("Refreshing list of available Jack audio devices..."); server.devices.clear(); match jack::Client::new("rustydaw_io_dummy_client", jack::ClientOptions::empty()) { Ok((client, _status)) => { let system_audio_in_ports: Vec<String> = client.ports( None, Some("32 bit float mono audio"), jack::PortFlags::IS_OUTPUT, ); let system_audio_out_ports: Vec<String> = client.ports( None, Some("32 bit float mono audio"), jack::PortFlags::IS_INPUT, ); if system_audio_out_ports.len() == 0 { // This crate only allows devices with playback. server.available = false; warn!("Jack server is unavailable: Jack system device has no available audio outputs."); } else { // Find index of default in ports. let mut default_in_port = 0; // Fallback to first available port. for (i, port) in system_audio_in_ports.iter().enumerate() { if port == "system:capture_1" { default_in_port = i; break; } } // Find index of default out left port. let mut default_out_port_left = 0; // Fallback to first available port. for (i, port) in system_audio_out_ports.iter().enumerate() { if port == "system:playback_1" { default_out_port_left = i; break; } } // Find index of default out right port. let mut default_out_port_right = 1.min(system_audio_out_ports.len() - 1); // Fallback to second available port if stereo, first if mono. for (i, port) in system_audio_out_ports.iter().enumerate() { if port == "system:playback_2" { default_out_port_right = i; break; } } // Jack only ever has one "device". server.devices.push(AudioDeviceInfo { name: String::from("Jack Device"), in_ports: system_audio_in_ports, out_ports: system_audio_out_ports, sample_rates: vec![client.sample_rate() as u32], // Only one sample rate is available. buffer_size_range: BufferSizeRange { // Only one buffer size is available. min: client.buffer_size() as u32, max: client.buffer_size() as u32, }, default_in_port, default_out_port_left, default_out_port_right, default_sample_rate_index: 0, // Only one sample rate is available. default_buffer_size: client.buffer_size() as u32, // Only one buffer size is available. }); server.available = true; } } Err(e) => { server.available = false; info!("Jack server is unavailable: {}", e); } } } pub fn refresh_midi_server(server: &mut MidiServerInfo) { info!("Refreshing list of available Jack MIDI devices..."); server.in_devices.clear(); server.out_devices.clear(); match jack::Client::new("rustydaw_io_dummy_client", jack::ClientOptions::empty()) { Ok((client, _status)) => { let system_midi_in_ports: Vec<String> = client.ports(None, Some("8 bit raw midi"), jack::PortFlags::IS_OUTPUT); let system_midi_out_ports: Vec<String> = client.ports(None, Some("8 bit raw midi"), jack::PortFlags::IS_INPUT); for system_port_name in system_midi_in_ports.iter() { server.in_devices.push(MidiDeviceInfo { name: system_port_name.clone(), }); } for system_port_name in system_midi_out_ports.iter() { server.out_devices.push(MidiDeviceInfo { name: system_port_name.clone(), }); } // Find index of default in port. let mut default_in_port = 0; // Fallback to first available port. for (i, port) in system_midi_in_ports.iter().enumerate() { // "system:midi_capture_1" is usually Jack's built-in `Midi-Through` device. // What we usually want is first available port of the user's hardware MIDI controller, which is // commonly mapped to "system:midi_capture_2". if port == "system:midi_capture_2" { default_in_port = i; break; } } server.default_in_port = default_in_port; server.available = true; } Err(e) => { server.available = false; info!("Jack server is unavailable: {}", e); } } } pub struct JackRtThreadHandle<P: RtProcessHandler, E: FatalErrorHandler> { _async_client: jack::AsyncClient<JackNotificationHandler<E>, JackProcessHandler<P>>, } pub fn spawn_rt_thread<P: RtProcessHandler, E: FatalErrorHandler>( config: &Config, mut rt_process_handler: P, fatal_error_handler: E, use_client_name: Option<String>, ) -> Result<(StreamInfo, JackRtThreadHandle<P, E>), SpawnRtThreadError> { info!("Spawning Jack thread..."); let client_name = use_client_name.unwrap_or(String::from("rusty-daw-io")); info!("Registering Jack client with name {}", &client_name); let (client, _status) = jack::Client::new(&client_name, jack::ClientOptions::empty())?; // Find system ports let system_audio_in_ports: Vec<String> = client.ports( None, Some("32 bit float mono audio"), jack::PortFlags::IS_OUTPUT, ); let system_audio_out_ports: Vec<String> = client.ports( None, Some("32 bit float mono audio"), jack::PortFlags::IS_INPUT, ); // Register new ports. let mut audio_in_ports = Vec::<jack::Port<jack::AudioIn>>::new(); let mut audio_in_port_names = Vec::<String>::new(); let mut audio_in_connected_port_names = Vec::<String>::new(); let mut audio_in_busses = Vec::<AudioBus>::new(); for (bus_i, bus) in config.audio_in_busses.iter().enumerate() { if bus.system_ports.len() == 0 { return Err(SpawnRtThreadError::NoSystemPortsGiven(bus.id.clone())); } audio_in_busses.push(AudioBus { id_name: bus.id.clone(), id_index: DeviceIndex::new(bus_i), system_device: String::from("Jack"), system_half_duplex_device: None, system_ports: bus.system_ports.clone(), channels: bus.system_ports.len() as u16, }); for (i, system_port) in bus.system_ports.iter().enumerate() { if !system_audio_in_ports.contains(&system_port) { return Err(SpawnRtThreadError::SystemPortNotFound( system_port.clone(), bus.id.clone(), )); } let user_port_name = format!("{}_{}", &bus.id, i + 1); let user_port = client.register_port(&user_port_name, jack::AudioIn::default())?; audio_in_port_names.push(user_port.name()?); audio_in_connected_port_names.push(system_port.clone()); audio_in_ports.push(user_port); } } let mut audio_out_ports = Vec::<jack::Port<jack::AudioOut>>::new(); let mut audio_out_port_names = Vec::<String>::new(); let mut audio_out_connected_port_names = Vec::<String>::new(); let mut audio_out_busses = Vec::<AudioBus>::new(); for (bus_i, bus) in config.audio_out_busses.iter().enumerate() { if bus.system_ports.len() == 0 { return Err(SpawnRtThreadError::NoSystemPortsGiven(bus.id.clone())); } audio_out_busses.push(AudioBus { id_name: bus.id.clone(), id_index: DeviceIndex::new(bus_i), system_device: String::from("Jack"), system_half_duplex_device: None, system_ports: bus.system_ports.clone(), channels: bus.system_ports.len() as u16, }); for (i, system_port) in bus.system_ports.iter().enumerate() { if !system_audio_out_ports.contains(&system_port) { return Err(SpawnRtThreadError::SystemPortNotFound( system_port.clone(), bus.id.clone(), )); } let user_port_name = format!("{}_{}", &bus.id, i + 1); let user_port = client.register_port(&user_port_name, jack::AudioOut::default())?; audio_out_port_names.push(user_port.name()?); audio_out_connected_port_names.push(system_port.clone()); audio_out_ports.push(user_port); } } let mut midi_in_ports = Vec::<jack::Port<jack::MidiIn>>::new(); let mut midi_in_port_names = Vec::<String>::new(); let mut midi_in_connected_port_names = Vec::<String>::new(); let mut midi_in_controllers = Vec::<MidiController>::new(); let mut midi_out_ports = Vec::<jack::Port<jack::MidiOut>>::new(); let mut midi_out_port_names = Vec::<String>::new(); let mut midi_out_connected_port_names = Vec::<String>::new(); let mut midi_out_controllers = Vec::<MidiController>::new(); if let Some(midi_server) = &config.midi_server { if midi_server == "Jack" { for (controller_i, controller) in config.midi_in_controllers.iter().enumerate() { let system_port_name = &controller.system_port; midi_in_controllers.push(MidiController { id_name: controller.id.clone(), id_index: DeviceIndex::new(controller_i), system_port: String::from(system_port_name), }); let port = client.register_port(&controller.id, jack::MidiIn::default())?; midi_in_port_names.push(port.name()?); midi_in_connected_port_names.push(String::from(system_port_name)); midi_in_ports.push(port); } for (controller_i, controller) in config.midi_out_controllers.iter().enumerate() { let system_port_name = &controller.system_port; midi_out_controllers.push(MidiController { id_name: controller.id.clone(), id_index: DeviceIndex::new(controller_i), system_port: String::from(system_port_name), }); let port = client.register_port(&controller.id, jack::MidiOut::default())?; midi_out_port_names.push(port.name()?); midi_out_connected_port_names.push(String::from(system_port_name)); midi_out_ports.push(port); } } } let sample_rate = client.sample_rate() as u32; let max_audio_buffer_size = client.buffer_size() as u32; let stream_info = StreamInfo { server_name: String::from("Jack"), audio_in: audio_in_busses, audio_out: audio_out_busses, midi_in: midi_in_controllers, midi_out: midi_out_controllers, sample_rate: sample_rate as u32, max_audio_buffer_size, }; rt_process_handler.init(&stream_info); let process = JackProcessHandler::new( rt_process_handler, audio_in_ports, audio_out_ports, midi_in_ports, midi_out_ports, stream_info.clone(), max_audio_buffer_size, ); info!("Activating Jack client..."); // Activate the client, which starts the processing. let async_client = client.activate_async( JackNotificationHandler { fatal_error_handler: Some(fatal_error_handler), }, process, )?; // Try to automatically connect to system inputs/outputs. for (in_port, system_in_port) in audio_in_port_names .iter() .zip(audio_in_connected_port_names) { async_client .as_client() .connect_ports_by_name(&system_in_port, in_port)?; } for (out_port, system_out_port) in audio_out_port_names .iter() .zip(audio_out_connected_port_names) { async_client .as_client() .connect_ports_by_name(out_port, &system_out_port)?; } for (in_port, system_in_port) in midi_in_port_names.iter().zip(midi_in_connected_port_names) { async_client .as_client() .connect_ports_by_name(&system_in_port, in_port)?; } for (out_port, system_out_port) in midi_out_port_names .iter() .zip(midi_out_connected_port_names) { async_client .as_client() .connect_ports_by_name(out_port, &system_out_port)?; } info!( "Successfully spawned Jack thread. Sample rate: {}, Max audio buffer size: {}", sample_rate, max_audio_buffer_size ); Ok(( stream_info, JackRtThreadHandle { _async_client: async_client, }, )) } struct JackProcessHandler<P: RtProcessHandler> { rt_process_handler: P, audio_in_ports: Vec<jack::Port<jack::AudioIn>>, audio_out_ports: Vec<jack::Port<jack::AudioOut>>, audio_in_buffers: Vec<AudioBusBuffer>, audio_out_buffers: Vec<AudioBusBuffer>, midi_in_ports: Vec<jack::Port<jack::MidiIn>>, midi_out_ports: Vec<jack::Port<jack::MidiOut>>, midi_in_buffers: Vec<MidiControllerBuffer>, midi_out_buffers: Vec<MidiControllerBuffer>, stream_info: StreamInfo, max_audio_buffer_size: usize, } impl<P: RtProcessHandler> JackProcessHandler<P> { fn new( rt_process_handler: P, audio_in_ports: Vec<jack::Port<jack::AudioIn>>, audio_out_ports: Vec<jack::Port<jack::AudioOut>>, midi_in_ports: Vec<jack::Port<jack::MidiIn>>, midi_out_ports: Vec<jack::Port<jack::MidiOut>>, stream_info: StreamInfo, max_audio_buffer_size: u32, ) -> Self { let mut audio_in_buffers = Vec::<AudioBusBuffer>::new(); let mut audio_out_buffers = Vec::<AudioBusBuffer>::new(); for bus in stream_info.audio_in.iter() { audio_in_buffers.push(AudioBusBuffer::new(bus.channels, max_audio_buffer_size)) } for bus in stream_info.audio_out.iter() { audio_out_buffers.push(AudioBusBuffer::new(bus.channels, max_audio_buffer_size)) } let mut midi_in_buffers = Vec::<MidiControllerBuffer>::new(); let mut midi_out_buffers = Vec::<MidiControllerBuffer>::new(); for _ in 0..stream_info.midi_in.len() { midi_in_buffers.push(MidiControllerBuffer::new()) } for _ in 0..stream_info.midi_out.len() { midi_out_buffers.push(MidiControllerBuffer::new()) } Self { rt_process_handler, audio_in_ports, audio_out_ports, audio_in_buffers, audio_out_buffers, midi_in_ports, midi_out_ports, midi_in_buffers, midi_out_buffers, stream_info, max_audio_buffer_size: max_audio_buffer_size as usize, } } } impl<P: RtProcessHandler> jack::ProcessHandler for JackProcessHandler<P> { fn process(&mut self, _: &jack::Client, ps: &jack::ProcessScope) -> jack::Control { let mut audio_frames = 0; // Collect Audio Inputs let mut port = 0; // Ports are in order. for audio_buffer in self.audio_in_buffers.iter_mut() { for channel in audio_buffer.channel_buffers.iter_mut() { let port_slice = self.audio_in_ports[port].as_slice(ps); audio_frames = port_slice.len(); // Sanity check. if audio_frames > self.max_audio_buffer_size { warn!("Warning: Jack sent a buffer size of {} when the max buffer size was said to be {}", audio_frames, self.max_audio_buffer_size); } // The compiler should in-theory optimize by not filling in zeros before copying // the slice. This should never allocate because each buffer was given a capacity of // the maximum buffer size that jack will send. channel.resize(audio_frames, 0.0); channel.copy_from_slice(port_slice); port += 1; } audio_buffer.frames = audio_frames; } if self.audio_in_buffers.len() == 0 { // Check outputs for number of frames instead. if let Some(out_port) = self.audio_out_ports.first_mut() { audio_frames = out_port.as_mut_slice(ps).len(); } } // Clear Audio Outputs for audio_buffer in self.audio_out_buffers.iter_mut() { audio_buffer.clear_and_resize(audio_frames); } // Collect MIDI Inputs for (midi_buffer, port) in self .midi_in_buffers .iter_mut() .zip(self.midi_in_ports.iter()) { midi_buffer.clear(); for event in port.iter(ps) { if let Err(e) = midi_buffer.push_raw(event.time, event.bytes) { warn!( "Warning: Dropping midi event because of the push error: {}", e ); } } } // Clear MIDI Outputs for midi_buffer in self.midi_out_buffers.iter_mut() { midi_buffer.clear(); } self.rt_process_handler.process(ProcessInfo { audio_in: self.audio_in_buffers.as_slice(), audio_out: self.audio_out_buffers.as_mut_slice(), audio_frames, midi_in: self.midi_in_buffers.as_slice(), midi_out: self.midi_out_buffers.as_mut_slice(), sample_rate: self.stream_info.sample_rate, }); // TODO: Properly mix outputs in the case where a system port is connected to more than one bus/controller. // Copy processed data to Audio Outputs let mut port = 0; // Ports are in order. for audio_buffer in self.audio_out_buffers.iter() { for channel in audio_buffer.channel_buffers.iter() { let port_slice = self.audio_out_ports[port].as_mut_slice(ps); // Just in case the user resized the output buffer for some reason. let len = channel.len().min(port_slice.len()); if len != audio_frames { warn!( "Warning: An audio output buffer was resized from {} to {} by the user", audio_frames, len ); } &mut port_slice[0..len].copy_from_slice(&channel[0..len]); port += 1; } } // Copy processed data to MIDI Outputs for (midi_buffer, port) in self .midi_out_buffers .iter() .zip(self.midi_out_ports.iter_mut()) { let mut port_writer = port.writer(ps); for event in midi_buffer.events() { if let Err(e) = port_writer.write(&jack::RawMidi { time: event.delta_frames, bytes: &event.data(), }) { warn!("Warning: Could not copy midi data to Jack output: {}", e); } } } jack::Control::Continue } } struct JackNotificationHandler<E: FatalErrorHandler> { fatal_error_handler: Option<E>, } impl<E: FatalErrorHandler> jack::NotificationHandler for JackNotificationHandler<E> { fn thread_init(&self, _: &jack::Client) { debug!("JACK: thread init"); } fn shutdown(&mut self, status: jack::ClientStatus, reason: &str) { let msg = format!( "JACK: shutdown with status {:?} because \"{}\"", status, reason ); info!("{}", msg); if let Some(fatal_error_handler) = self.fatal_error_handler.take() { fatal_error_handler.fatal_stream_error(FatalStreamError::AudioServerDisconnected(msg)) } } fn freewheel(&mut self, _: &jack::Client, is_enabled: bool) { debug!( "JACK: freewheel mode is {}", if is_enabled { "on" } else { "off" } ); } fn sample_rate(&mut self, _: &jack::Client, srate: jack::Frames) -> jack::Control { debug!("JACK: sample rate changed to {}", srate); jack::Control::Continue } fn client_registration(&mut self, _: &jack::Client, name: &str, is_reg: bool) { debug!( "JACK: {} client with name \"{}\"", if is_reg { "registered" } else { "unregistered" }, name ); } fn port_registration(&mut self, _: &jack::Client, port_id: jack::PortId, is_reg: bool) { debug!( "JACK: {} port with id {}", if is_reg { "registered" } else { "unregistered" }, port_id ); } fn port_rename( &mut self, _: &jack::Client, port_id: jack::PortId, old_name: &str, new_name: &str, ) -> jack::Control { debug!( "JACK: port with id {} renamed from {} to {}", port_id, old_name, new_name ); jack::Control::Continue } fn ports_connected( &mut self, _: &jack::Client, port_id_a: jack::PortId, port_id_b: jack::PortId, are_connected: bool, ) { debug!( "JACK: ports with id {} and {} are {}", port_id_a, port_id_b, if are_connected { "connected" } else { "disconnected" } ); } fn graph_reorder(&mut self, _: &jack::Client) -> jack::Control { debug!("JACK: graph reordered"); jack::Control::Continue } fn xrun(&mut self, _: &jack::Client) -> jack::Control { warn!("JACK: xrun occurred"); jack::Control::Continue } fn latency(&mut self, _: &jack::Client, mode: jack::LatencyType) { debug!( "JACK: {} latency has changed", match mode { jack::LatencyType::Capture => "capture", jack::LatencyType::Playback => "playback", } ); } } impl From<jack::Error> for SpawnRtThreadError { fn from(e: jack::Error) -> Self { SpawnRtThreadError::PlatformSpecific(Box::new(e)) } }
34.901493
153
0.569064
4af31f42061255dc1c0942696cdfc6d70f90717a
20,845
use super::methods::*; use crate::rpc::{ codec::{ base::{BaseInboundCodec, BaseOutboundCodec}, ssz_snappy::{SSZSnappyInboundCodec, SSZSnappyOutboundCodec}, InboundCodec, OutboundCodec, }, methods::{MaxErrorLen, ResponseTermination, MAX_ERROR_LEN}, MaxRequestBlocks, MAX_REQUEST_BLOCKS, }; use futures::future::BoxFuture; use futures::prelude::*; use futures::prelude::{AsyncRead, AsyncWrite}; use libp2p::core::{InboundUpgrade, OutboundUpgrade, ProtocolName, UpgradeInfo}; use ssz::Encode; use ssz_types::VariableList; use std::io; use std::marker::PhantomData; use std::time::Duration; use tokio_io_timeout::TimeoutStream; use tokio_util::{ codec::Framed, compat::{Compat, FuturesAsyncReadCompatExt}, }; use types::{BeaconBlock, EthSpec, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock}; lazy_static! { // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is // same across different `EthSpec` implementations. pub static ref SIGNED_BEACON_BLOCK_MIN: usize = SignedBeaconBlock::<MainnetEthSpec> { message: BeaconBlock::empty(&MainnetEthSpec::default_spec()), signature: Signature::empty(), } .as_ssz_bytes() .len(); pub static ref SIGNED_BEACON_BLOCK_MAX: usize = SignedBeaconBlock::<MainnetEthSpec> { message: BeaconBlock::full(&MainnetEthSpec::default_spec()), signature: Signature::empty(), } .as_ssz_bytes() .len(); pub static ref BLOCKS_BY_ROOT_REQUEST_MIN: usize = VariableList::<Hash256, MaxRequestBlocks>::from(Vec::<Hash256>::new()) .as_ssz_bytes() .len(); pub static ref BLOCKS_BY_ROOT_REQUEST_MAX: usize = VariableList::<Hash256, MaxRequestBlocks>::from(vec![ Hash256::zero(); MAX_REQUEST_BLOCKS as usize ]) .as_ssz_bytes() .len(); pub static ref ERROR_TYPE_MIN: usize = VariableList::<u8, MaxErrorLen>::from(Vec::<u8>::new()) .as_ssz_bytes() .len(); pub static ref ERROR_TYPE_MAX: usize = VariableList::<u8, MaxErrorLen>::from(vec![ 0u8; MAX_ERROR_LEN as usize ]) .as_ssz_bytes() .len(); } /// The maximum bytes that can be sent across the RPC. const MAX_RPC_SIZE: usize = 1_048_576; // 1M /// The protocol prefix the RPC protocol id. const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; /// Time allowed for the first byte of a request to arrive before we time out (Time To First Byte). const TTFB_TIMEOUT: u64 = 5; /// The number of seconds to wait for the first bytes of a request once a protocol has been /// established before the stream is terminated. const REQUEST_TIMEOUT: u64 = 15; /// Protocol names to be used. #[derive(Debug, Clone, Copy)] pub enum Protocol { /// The Status protocol name. Status, /// The Goodbye protocol name. Goodbye, /// The `BlocksByRange` protocol name. BlocksByRange, /// The `BlocksByRoot` protocol name. BlocksByRoot, /// The `Ping` protocol name. Ping, /// The `MetaData` protocol name. MetaData, } /// RPC Versions #[derive(Debug, Clone, PartialEq, Eq)] pub enum Version { /// Version 1 of RPC V1, } /// RPC Encondings supported. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Encoding { SSZSnappy, } impl std::fmt::Display for Protocol { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let repr = match self { Protocol::Status => "status", Protocol::Goodbye => "goodbye", Protocol::BlocksByRange => "beacon_blocks_by_range", Protocol::BlocksByRoot => "beacon_blocks_by_root", Protocol::Ping => "ping", Protocol::MetaData => "metadata", }; f.write_str(repr) } } impl std::fmt::Display for Encoding { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let repr = match self { Encoding::SSZSnappy => "ssz_snappy", }; f.write_str(repr) } } impl std::fmt::Display for Version { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let repr = match self { Version::V1 => "1", }; f.write_str(repr) } } #[derive(Debug, Clone)] pub struct RPCProtocol<TSpec: EthSpec> { pub phantom: PhantomData<TSpec>, } impl<TSpec: EthSpec> UpgradeInfo for RPCProtocol<TSpec> { type Info = ProtocolId; type InfoIter = Vec<Self::Info>; /// The list of supported RPC protocols for Lighthouse. fn protocol_info(&self) -> Self::InfoIter { vec![ ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy), ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZSnappy), ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy), ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy), ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), ] } } /// Represents the ssz length bounds for RPC messages. #[derive(Debug, PartialEq)] pub struct RpcLimits { pub min: usize, pub max: usize, } impl RpcLimits { pub fn new(min: usize, max: usize) -> Self { Self { min, max } } /// Returns true if the given length is out of bounds, false otherwise. pub fn is_out_of_bounds(&self, length: usize) -> bool { length > self.max || length < self.min } } /// Tracks the types in a protocol id. #[derive(Clone, Debug)] pub struct ProtocolId { /// The RPC message type/name. pub message_name: Protocol, /// The version of the RPC. pub version: Version, /// The encoding of the RPC. pub encoding: Encoding, /// The protocol id that is formed from the above fields. protocol_id: String, } impl ProtocolId { /// Returns min and max size for messages of given protocol id requests. pub fn rpc_request_limits(&self) -> RpcLimits { match self.message_name { Protocol::Status => RpcLimits::new( <StatusMessage as Encode>::ssz_fixed_len(), <StatusMessage as Encode>::ssz_fixed_len(), ), Protocol::Goodbye => RpcLimits::new( <GoodbyeReason as Encode>::ssz_fixed_len(), <GoodbyeReason as Encode>::ssz_fixed_len(), ), Protocol::BlocksByRange => RpcLimits::new( <BlocksByRangeRequest as Encode>::ssz_fixed_len(), <BlocksByRangeRequest as Encode>::ssz_fixed_len(), ), Protocol::BlocksByRoot => { RpcLimits::new(*BLOCKS_BY_ROOT_REQUEST_MIN, *BLOCKS_BY_ROOT_REQUEST_MAX) } Protocol::Ping => RpcLimits::new( <Ping as Encode>::ssz_fixed_len(), <Ping as Encode>::ssz_fixed_len(), ), Protocol::MetaData => RpcLimits::new(0, 0), // Metadata requests are empty } } /// Returns min and max size for messages of given protocol id responses. pub fn rpc_response_limits<T: EthSpec>(&self) -> RpcLimits { match self.message_name { Protocol::Status => RpcLimits::new( <StatusMessage as Encode>::ssz_fixed_len(), <StatusMessage as Encode>::ssz_fixed_len(), ), Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response Protocol::BlocksByRange => { RpcLimits::new(*SIGNED_BEACON_BLOCK_MIN, *SIGNED_BEACON_BLOCK_MAX) } Protocol::BlocksByRoot => { RpcLimits::new(*SIGNED_BEACON_BLOCK_MIN, *SIGNED_BEACON_BLOCK_MAX) } Protocol::Ping => RpcLimits::new( <Ping as Encode>::ssz_fixed_len(), <Ping as Encode>::ssz_fixed_len(), ), Protocol::MetaData => RpcLimits::new( <MetaData<T> as Encode>::ssz_fixed_len(), <MetaData<T> as Encode>::ssz_fixed_len(), ), } } } /// An RPC protocol ID. impl ProtocolId { pub fn new(message_name: Protocol, version: Version, encoding: Encoding) -> Self { let protocol_id = format!( "{}/{}/{}/{}", PROTOCOL_PREFIX, message_name, version, encoding ); ProtocolId { message_name, version, encoding, protocol_id, } } } impl ProtocolName for ProtocolId { fn protocol_name(&self) -> &[u8] { self.protocol_id.as_bytes() } } /* Inbound upgrade */ // The inbound protocol reads the request, decodes it and returns the stream to the protocol // handler to respond to once ready. pub type InboundOutput<TSocket, TSpec> = (RPCRequest<TSpec>, InboundFramed<TSocket, TSpec>); pub type InboundFramed<TSocket, TSpec> = Framed<TimeoutStream<Compat<TSocket>>, InboundCodec<TSpec>>; impl<TSocket, TSpec> InboundUpgrade<TSocket> for RPCProtocol<TSpec> where TSocket: AsyncRead + AsyncWrite + Unpin + Send + 'static, TSpec: EthSpec, { type Output = InboundOutput<TSocket, TSpec>; type Error = RPCError; type Future = BoxFuture<'static, Result<Self::Output, Self::Error>>; fn upgrade_inbound(self, socket: TSocket, protocol: ProtocolId) -> Self::Future { async move { let protocol_name = protocol.message_name; // convert the socket to tokio compatible socket let socket = socket.compat(); let codec = match protocol.encoding { Encoding::SSZSnappy => { let ssz_snappy_codec = BaseInboundCodec::new(SSZSnappyInboundCodec::new(protocol, MAX_RPC_SIZE)); InboundCodec::SSZSnappy(ssz_snappy_codec) } }; let mut timed_socket = TimeoutStream::new(socket); timed_socket.set_read_timeout(Some(Duration::from_secs(TTFB_TIMEOUT))); let socket = Framed::new(timed_socket, codec); // MetaData requests should be empty, return the stream match protocol_name { Protocol::MetaData => Ok((RPCRequest::MetaData(PhantomData), socket)), _ => { match tokio::time::timeout( Duration::from_secs(REQUEST_TIMEOUT), socket.into_future(), ) .await { Err(e) => Err(RPCError::from(e)), Ok((Some(Ok(request)), stream)) => Ok((request, stream)), Ok((Some(Err(e)), _)) => Err(e), Ok((None, _)) => Err(RPCError::IncompleteStream), } } } } .boxed() } } /* Outbound request */ // Combines all the RPC requests into a single enum to implement `UpgradeInfo` and // `OutboundUpgrade` #[derive(Debug, Clone, PartialEq)] pub enum RPCRequest<TSpec: EthSpec> { Status(StatusMessage), Goodbye(GoodbyeReason), BlocksByRange(BlocksByRangeRequest), BlocksByRoot(BlocksByRootRequest), Ping(Ping), MetaData(PhantomData<TSpec>), } impl<TSpec: EthSpec> UpgradeInfo for RPCRequest<TSpec> { type Info = ProtocolId; type InfoIter = Vec<Self::Info>; // add further protocols as we support more encodings/versions fn protocol_info(&self) -> Self::InfoIter { self.supported_protocols() } } /// Implements the encoding per supported protocol for `RPCRequest`. impl<TSpec: EthSpec> RPCRequest<TSpec> { pub fn supported_protocols(&self) -> Vec<ProtocolId> { match self { // add more protocols when versions/encodings are supported RPCRequest::Status(_) => vec![ProtocolId::new( Protocol::Status, Version::V1, Encoding::SSZSnappy, )], RPCRequest::Goodbye(_) => vec![ProtocolId::new( Protocol::Goodbye, Version::V1, Encoding::SSZSnappy, )], RPCRequest::BlocksByRange(_) => vec![ProtocolId::new( Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy, )], RPCRequest::BlocksByRoot(_) => vec![ProtocolId::new( Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy, )], RPCRequest::Ping(_) => vec![ProtocolId::new( Protocol::Ping, Version::V1, Encoding::SSZSnappy, )], RPCRequest::MetaData(_) => vec![ProtocolId::new( Protocol::MetaData, Version::V1, Encoding::SSZSnappy, )], } } /* These functions are used in the handler for stream management */ /// Number of responses expected for this request. pub fn expected_responses(&self) -> u64 { match self { RPCRequest::Status(_) => 1, RPCRequest::Goodbye(_) => 0, RPCRequest::BlocksByRange(req) => req.count, RPCRequest::BlocksByRoot(req) => req.block_roots.len() as u64, RPCRequest::Ping(_) => 1, RPCRequest::MetaData(_) => 1, } } /// Gives the corresponding `Protocol` to this request. pub fn protocol(&self) -> Protocol { match self { RPCRequest::Status(_) => Protocol::Status, RPCRequest::Goodbye(_) => Protocol::Goodbye, RPCRequest::BlocksByRange(_) => Protocol::BlocksByRange, RPCRequest::BlocksByRoot(_) => Protocol::BlocksByRoot, RPCRequest::Ping(_) => Protocol::Ping, RPCRequest::MetaData(_) => Protocol::MetaData, } } /// Returns the `ResponseTermination` type associated with the request if a stream gets /// terminated. pub fn stream_termination(&self) -> ResponseTermination { match self { // this only gets called after `multiple_responses()` returns true. Therefore, only // variants that have `multiple_responses()` can have values. RPCRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange, RPCRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, RPCRequest::Status(_) => unreachable!(), RPCRequest::Goodbye(_) => unreachable!(), RPCRequest::Ping(_) => unreachable!(), RPCRequest::MetaData(_) => unreachable!(), } } } /* RPC Response type - used for outbound upgrades */ /* Outbound upgrades */ pub type OutboundFramed<TSocket, TSpec> = Framed<Compat<TSocket>, OutboundCodec<TSpec>>; impl<TSocket, TSpec> OutboundUpgrade<TSocket> for RPCRequest<TSpec> where TSpec: EthSpec + Send + 'static, TSocket: AsyncRead + AsyncWrite + Unpin + Send + 'static, { type Output = OutboundFramed<TSocket, TSpec>; type Error = RPCError; type Future = BoxFuture<'static, Result<Self::Output, Self::Error>>; fn upgrade_outbound(self, socket: TSocket, protocol: Self::Info) -> Self::Future { // convert to a tokio compatible socket let socket = socket.compat(); let codec = match protocol.encoding { Encoding::SSZSnappy => { let ssz_snappy_codec = BaseOutboundCodec::new(SSZSnappyOutboundCodec::new(protocol, MAX_RPC_SIZE)); OutboundCodec::SSZSnappy(ssz_snappy_codec) } }; let mut socket = Framed::new(socket, codec); async { socket.send(self).await?; socket.close().await?; Ok(socket) } .boxed() } } /// Error in RPC Encoding/Decoding. #[derive(Debug, Clone, PartialEq)] pub enum RPCError { /// Error when decoding the raw buffer from ssz. // NOTE: in the future a ssz::DecodeError should map to an InvalidData error SSZDecodeError(ssz::DecodeError), /// IO Error. IoError(String), /// The peer returned a valid response but the response indicated an error. ErrorResponse(RPCResponseErrorCode, String), /// Timed out waiting for a response. StreamTimeout, /// Peer does not support the protocol. UnsupportedProtocol, /// Stream ended unexpectedly. IncompleteStream, /// Peer sent invalid data. InvalidData, /// An error occurred due to internal reasons. Ex: timer failure. InternalError(&'static str), /// Negotiation with this peer timed out. NegotiationTimeout, /// Handler rejected this request. HandlerRejected, } impl From<ssz::DecodeError> for RPCError { #[inline] fn from(err: ssz::DecodeError) -> Self { RPCError::SSZDecodeError(err) } } impl From<tokio::time::error::Elapsed> for RPCError { fn from(_: tokio::time::error::Elapsed) -> Self { RPCError::StreamTimeout } } impl From<io::Error> for RPCError { fn from(err: io::Error) -> Self { RPCError::IoError(err.to_string()) } } // Error trait is required for `ProtocolsHandler` impl std::fmt::Display for RPCError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match *self { RPCError::SSZDecodeError(ref err) => write!(f, "Error while decoding ssz: {:?}", err), RPCError::InvalidData => write!(f, "Peer sent unexpected data"), RPCError::IoError(ref err) => write!(f, "IO Error: {}", err), RPCError::ErrorResponse(ref code, ref reason) => write!( f, "RPC response was an error: {} with reason: {}", code, reason ), RPCError::StreamTimeout => write!(f, "Stream Timeout"), RPCError::UnsupportedProtocol => write!(f, "Peer does not support the protocol"), RPCError::IncompleteStream => write!(f, "Stream ended unexpectedly"), RPCError::InternalError(ref err) => write!(f, "Internal error: {}", err), RPCError::NegotiationTimeout => write!(f, "Negotiation timeout"), RPCError::HandlerRejected => write!(f, "Handler rejected the request"), } } } impl std::error::Error for RPCError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match *self { // NOTE: this does have a source RPCError::SSZDecodeError(_) => None, RPCError::IoError(_) => None, RPCError::StreamTimeout => None, RPCError::UnsupportedProtocol => None, RPCError::IncompleteStream => None, RPCError::InvalidData => None, RPCError::InternalError(_) => None, RPCError::ErrorResponse(_, _) => None, RPCError::NegotiationTimeout => None, RPCError::HandlerRejected => None, } } } impl<TSpec: EthSpec> std::fmt::Display for RPCRequest<TSpec> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { RPCRequest::Status(status) => write!(f, "Status Message: {}", status), RPCRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason), RPCRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req), RPCRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), RPCRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), RPCRequest::MetaData(_) => write!(f, "MetaData request"), } } } impl RPCError { /// Get a `str` representation of the error. /// Used for metrics. pub fn as_static_str(&self) -> &'static str { match self { RPCError::SSZDecodeError { .. } => "decode_error", RPCError::IoError { .. } => "io_error", RPCError::ErrorResponse(ref code, ..) => match code { RPCResponseErrorCode::RateLimited => "rate_limited", RPCResponseErrorCode::InvalidRequest => "invalid_request", RPCResponseErrorCode::ServerError => "server_error", RPCResponseErrorCode::Unknown => "unknown_response_code", }, RPCError::StreamTimeout => "stream_timeout", RPCError::UnsupportedProtocol => "unsupported_protocol", RPCError::IncompleteStream => "incomplete_stream", RPCError::InvalidData => "invalid_data", RPCError::InternalError { .. } => "internal_error", RPCError::NegotiationTimeout => "negotiation_timeout", RPCError::HandlerRejected => "handler_rejected", } } }
35.211149
99
0.593716
e4938272dcd9f4f97467119cc24a6263aebc5b4b
1,175
use async_trait::async_trait; use ezsockets::ClientConfig; use std::io::BufRead; use url::Url; struct Client {} #[async_trait] impl ezsockets::ClientExt for Client { type Params = (); async fn text(&mut self, text: String) -> Result<(), ezsockets::Error> { tracing::info!("received message: {text}"); Ok(()) } async fn binary(&mut self, bytes: Vec<u8>) -> Result<(), ezsockets::Error> { tracing::info!("received bytes: {bytes:?}"); Ok(()) } async fn call(&mut self, params: Self::Params) -> Result<(), ezsockets::Error> { let () = params; Ok(()) } } #[tokio::main] async fn main() { tracing_subscriber::fmt::init(); let url = Url::parse("ws://localhost:8080/websocket").unwrap(); let config = ClientConfig::new(url); let (handle, future) = ezsockets::connect(|_client| Client {}, config).await; tokio::spawn(async move { future.await.unwrap(); }); let stdin = std::io::stdin(); let lines = stdin.lock().lines(); for line in lines { let line = line.unwrap(); tracing::info!("sending {line}"); handle.text(line).await; } }
26.111111
84
0.580426
9c736690085710027bba18e84e6610007de6a273
1,146
use std::fs::File; use pdb::{FallibleIterator, Result, PDB}; #[test] fn test_symbol_depth() -> Result<()> { let file = File::open("fixtures/self/foo.pdb")?; let mut pdb = PDB::open(file)?; let dbi = pdb.debug_information()?; let mut modules = dbi.modules()?; while let Some(module) = modules.next()? { let module_info = match pdb.module_info(&module)? { Some(module_info) => module_info, None => continue, }; let mut depth = 0isize; let mut symbols = module_info.symbols()?; while let Some(symbol) = symbols.next()? { if symbol.starts_scope() { depth += 1; } else if symbol.ends_scope() { depth -= 1; } // The most common case here will be that we forgot to add a raw kind to `starts_scope`. // PDBs seem to use `S_END` for most symbols with inline sites being the notable // exception. In case we forgot a start scope symbol, the depth will become negative. assert!(depth >= 0, "depth must not be negative"); } } Ok(()) }
30.972973
100
0.559337
0a4b5fc9223a62db80b3d2dab0de182e2757a150
8,432
//! # Pre shared keys. //! ```text //! enum { //! reserved(0), //! external(1), //! reinit(2), //! branch(3), //! (255) //! } PSKType; //! //! struct { //! PSKType psktype; //! select (PreSharedKeyID.psktype) { //! case external: //! opaque psk_id<0..255>; //! //! case reinit: //! opaque psk_group_id<0..255>; //! uint64 psk_epoch; //! //! case branch: //! opaque psk_group_id<0..255>; //! uint64 psk_epoch; //! } //! opaque psk_nonce<0..255>; //! } PreSharedKeyID; //! //! struct { //! PreSharedKeyID psks<0..2^16-1>; //! } PreSharedKeys; //! ``` use super::*; use crate::group::{GroupEpoch, GroupId}; use serde::{Deserialize, Serialize}; use std::convert::TryFrom; use tls_codec::{Serialize as TlsSerializeTrait, TlsByteVecU8, TlsVecU16}; /// Type of PSK. /// ```text /// enum { /// reserved(0), /// external(1), /// reinit(2), /// branch(3), /// (255) /// } PSKType; /// ``` #[derive( Debug, PartialEq, Clone, Copy, Serialize, Deserialize, TlsDeserialize, TlsSerialize, TlsSize, )] #[repr(u8)] pub enum PskType { External = 1, Reinit = 2, Branch = 3, } impl TryFrom<u8> for PskType { type Error = &'static str; fn try_from(value: u8) -> Result<Self, Self::Error> { match value { 1 => Ok(PskType::External), 2 => Ok(PskType::Reinit), 3 => Ok(PskType::Branch), _ => Err("Unknown PSK type."), } } } /// External PSK. #[derive( Debug, PartialEq, Clone, Serialize, Deserialize, TlsDeserialize, TlsSerialize, TlsSize, )] pub struct ExternalPsk { psk_id: TlsByteVecU8, } impl ExternalPsk { /// Create a new `ExternalPsk` from a PSK ID pub fn new(psk_id: Vec<u8>) -> Self { Self { psk_id: psk_id.into(), } } /// Return the PSK ID pub fn psk_id(&self) -> &[u8] { self.psk_id.as_slice() } } /// External PSK Bundle. This contains the secret part of the PSK as well as the /// public part that is used as a marker for injection into the key schedule. pub struct ExternalPskBundle { secret: Secret, nonce: Vec<u8>, external_psk: ExternalPsk, } impl ExternalPskBundle { /// Create a new bundle pub fn new(ciphersuite: &Ciphersuite, secret: Secret, psk_id: Vec<u8>) -> Self { Self { secret, nonce: ciphersuite.randombytes(ciphersuite.hash_length()), external_psk: ExternalPsk { psk_id: psk_id.into(), }, } } /// Return the `PreSharedKeyID` pub fn to_presharedkey_id(&self) -> PreSharedKeyId { PreSharedKeyId { psk_type: PskType::External, psk: Psk::External(self.external_psk.clone()), psk_nonce: self.nonce.clone().into(), } } /// Return the secret pub fn secret(&self) -> &Secret { &self.secret } } /// ReInit PSK. #[derive( Debug, PartialEq, Clone, Serialize, Deserialize, TlsDeserialize, TlsSerialize, TlsSize, )] pub struct ReinitPsk { pub(crate) psk_group_id: GroupId, pub(crate) psk_epoch: GroupEpoch, } impl ReinitPsk { /// Return the `GroupId` pub fn psk_group_id(&self) -> &GroupId { &self.psk_group_id } /// Return the `GroupEpoch` pub fn psk_epoch(&self) -> GroupEpoch { self.psk_epoch } } /// Branch PSK #[derive( Debug, PartialEq, Clone, Serialize, Deserialize, TlsDeserialize, TlsSerialize, TlsSize, )] pub struct BranchPsk { pub(crate) psk_group_id: GroupId, pub(crate) psk_epoch: GroupEpoch, } impl BranchPsk { /// Return the `GroupId` pub fn psk_group_id(&self) -> &GroupId { &self.psk_group_id } /// Return the `GroupEpoch` pub fn psk_epoch(&self) -> GroupEpoch { self.psk_epoch } } /// PSK enum that can contain the different PSK types #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub enum Psk { External(ExternalPsk), Reinit(ReinitPsk), Branch(BranchPsk), } /// A `PreSharedKeyID` is used to uniquely identify the PSKs that get injected /// in the key schedule. /// ```text /// struct { /// PSKType psktype; /// select (PreSharedKeyID.psktype) { /// case external: /// opaque psk_id<0..255>; /// /// case reinit: /// opaque psk_group_id<0..255>; /// uint64 psk_epoch; /// /// case branch: /// opaque psk_group_id<0..255>; /// uint64 psk_epoch; /// } /// opaque psk_nonce<0..255>; /// } PreSharedKeyID; /// ``` #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct PreSharedKeyId { pub(crate) psk_type: PskType, pub(crate) psk: Psk, pub(crate) psk_nonce: TlsByteVecU8, } impl PreSharedKeyId { /// Create a new `PreSharedKeyID` pub fn new(psk_type: PskType, psk: Psk, psk_nonce: Vec<u8>) -> Self { Self { psk_type, psk, psk_nonce: psk_nonce.into(), } } /// Return the type of the PSK pub fn psktype(&self) -> &PskType { &self.psk_type } /// Return the PSK pub fn psk(&self) -> &Psk { &self.psk } /// Return the PSK nonce pub fn psk_nonce(&self) -> &[u8] { self.psk_nonce.as_slice() } } /// `PreSharedKeys` is a vector of `PreSharedKeyID`s. /// struct { /// PreSharedKeyID psks<0..2^16-1>; /// } PreSharedKeys; #[derive(TlsDeserialize, TlsSerialize, TlsSize)] pub struct PreSharedKeys { pub(crate) psks: TlsVecU16<PreSharedKeyId>, } impl PreSharedKeys { /// Return the `PreSharedKeyID`s pub fn psks(&self) -> &[PreSharedKeyId] { self.psks.as_slice() } } /// `PskLabel` is used in the final concatentation of PSKs before they are /// injected in the key schedule. struct { /// PreSharedKeyID id; /// uint16 index; /// uint16 count; /// } PSKLabel; #[derive(TlsSerialize, TlsSize)] pub(crate) struct PskLabel<'a> { pub(crate) id: &'a PreSharedKeyId, pub(crate) index: u16, pub(crate) count: u16, } impl<'a> PskLabel<'a> { /// Create a new `PskLabel` fn new(id: &'a PreSharedKeyId, index: u16, count: u16) -> Self { Self { id, index, count } } } /// This contains the `psk-secret` calculated from the PSKs contained in a /// Commit or a PreSharedKey proposal. pub struct PskSecret { secret: Secret, } impl PskSecret { /// Create a new `PskSecret` from PSK IDs and PSKs pub fn new( ciphersuite: &'static Ciphersuite, psk_ids: &[PreSharedKeyId], psks: &[Secret], ) -> Result<Self, PskSecretError> { if psk_ids.len() != psks.len() { return Err(PskSecretError::DifferentLength); } if psks.len() > u16::MAX as usize { return Err(PskSecretError::TooManyKeys); } let mut secret = vec![]; let mls_version = ProtocolVersion::default(); for (index, psk) in psks.iter().enumerate() { let zero_secret = Secret::zero(ciphersuite, mls_version); let psk_input = zero_secret.hkdf_extract(psk); let psk_label = PskLabel::new(&psk_ids[index], index as u16, psks.len() as u16) .tls_serialize_detached() .map_err(|_| PskSecretError::EncodingError)?; // FIXME: remove unwrap let psk_secret = psk_input .kdf_expand_label("derived psk", &psk_label, ciphersuite.hash_length()) .unwrap(); secret.extend_from_slice(psk_secret.as_slice()); } Ok(Self { secret: Secret::from_slice(&secret, mls_version, ciphersuite), }) } /// Return the inner secret pub fn secret(&self) -> &Secret { &self.secret } #[cfg(any(feature = "test-utils", test))] pub(crate) fn random(ciphersuite: &'static Ciphersuite) -> Self { Self { secret: Secret::random(ciphersuite, None /* MLS version */), } } #[cfg(any(feature = "test-utils", test))] pub(crate) fn as_slice(&self) -> &[u8] { self.secret.as_slice() } #[cfg(any(feature = "test-utils", test))] pub(crate) fn clone(&self) -> Self { Self { secret: self.secret.clone(), } } #[cfg(any(feature = "test-utils", test))] pub(crate) fn from_slice(b: &[u8]) -> Self { Self { secret: b.into() } } }
25.865031
97
0.579696
1ae83868ec1322200e021d79aadae9ad9966d9ca
40,457
//! The session, can open and manage substreams #[cfg(not(target_arch = "wasm32"))] use std::time::Instant; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap, HashSet, VecDeque}, io, pin::Pin, task::{Context, Poll}, time::Duration, }; #[cfg(target_arch = "wasm32")] use timer::Instant; use futures::{ channel::mpsc::{channel, unbounded, Receiver, Sender, UnboundedReceiver, UnboundedSender}, Sink, Stream, }; use log::{debug, log_enabled, trace}; use nohash_hasher::IntMap; use tokio::io::{AsyncRead, AsyncWrite}; use tokio_util::codec::Framed; use crate::{ config::Config, control::{Command, Control}, error::Error, frame::{Flag, Flags, Frame, FrameCodec, GoAwayCode, Type}, stream::{StreamEvent, StreamHandle, StreamState}, StreamId, }; use timer::{interval, Interval}; const BUF_SHRINK_THRESHOLD: usize = u8::MAX as usize; const TIMEOUT: Duration = Duration::from_secs(30); /// wasm doesn't support time get, must use browser timer instead /// But we can simulate it with `futures-timer`. /// So, I implemented a global time dependent on `futures-timer`, /// Because in the browser environment, it is always single-threaded, so feel free to be unsafe #[cfg(target_arch = "wasm32")] static mut TIME: Instant = Instant::from_f64(0.0); /// The session pub struct Session<T> { // Framed low level raw stream framed_stream: Framed<T, FrameCodec>, // Got EOF from low level raw stream eof: bool, // remoteGoAway indicates the remote side does // not want further connections. Must be first for alignment. remote_go_away: bool, // localGoAway indicates that we should stop // accepting further connections. Must be first for alignment. local_go_away: bool, // nextStreamID is the next stream we should // send. This depends if we are a client/server. next_stream_id: StreamId, ty: SessionType, // config holds our configuration config: Config, // pings is used to track inflight pings pings: BTreeMap<u32, Instant>, ping_id: u32, // streams maps a stream id to a sender of stream, streams: IntMap<StreamId, Sender<Frame>>, // The StreamHandle not yet been polled pending_streams: VecDeque<StreamHandle>, // The buffer which will send to underlying network write_pending_frames: VecDeque<Frame>, // The buffer which will distribute to sub streams read_pending_frames: VecDeque<Frame>, // Why can unbound channel be used here? // // The only reason for the unbound channel being rejected is // that there is a potential memory explosion problem. // We just need to prove that there is no potential infinite // write problem here to use it safely. // // As a network library, it has two influencers, remote behavior and local behavior, // we discuss separately: // // remote: // This unbound channel cannot be used by the remote end, only for local transmission // // local: // Since each stream has a limit such as `send window`, when the upper limit is reached, // it will return to pending and can no longer send data to the channel // // The only problem is that if the stream is opened infinitely, the upper limit of the total // buffer will increase linearly. This behavior can be controlled by the user // For receive events from sub streams (for clone to new stream) event_sender: UnboundedSender<StreamEvent>, // For receive events from sub streams event_receiver: UnboundedReceiver<StreamEvent>, /// use to async open stream/close session control_sender: Sender<Command>, control_receiver: Receiver<Command>, keepalive: Option<Interval>, } /// Session type, client or server #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] pub enum SessionType { /// The session is a client Client, /// The session is a server (typical low level stream is an accepted TcpStream) Server, } impl SessionType { /// If this is a client type (inbound connection) pub fn is_client(self) -> bool { self == SessionType::Client } /// If this is a server type (outbound connection) pub fn is_server(self) -> bool { self == SessionType::Server } } impl<T> Session<T> where T: AsyncRead + AsyncWrite + Unpin, { /// Create a new session from a low level stream pub fn new(raw_stream: T, config: Config, ty: SessionType) -> Session<T> { let next_stream_id = match ty { SessionType::Client => 1, SessionType::Server => 2, }; let (event_sender, event_receiver) = unbounded(); let (control_sender, control_receiver) = channel(32); let framed_stream = Framed::new( raw_stream, FrameCodec::default().max_frame_size(config.max_stream_window_size), ); let keepalive = if config.enable_keepalive { Some(interval(config.keepalive_interval)) } else { None }; Session { framed_stream, eof: false, remote_go_away: false, local_go_away: false, next_stream_id, ty, config, pings: BTreeMap::default(), ping_id: 0, streams: HashMap::default(), pending_streams: VecDeque::default(), write_pending_frames: VecDeque::default(), read_pending_frames: VecDeque::default(), event_sender, event_receiver, control_sender, control_receiver, keepalive, } } /// Create a server session (typical raw_stream is an accepted TcpStream) pub fn new_server(raw_stream: T, config: Config) -> Session<T> { Self::new(raw_stream, config, SessionType::Server) } /// Create a client session pub fn new_client(raw_stream: T, config: Config) -> Session<T> { Self::new(raw_stream, config, SessionType::Client) } /// shutdown is used to close the session and all streams. /// Attempts to send a GoAway before closing the connection. pub fn shutdown(&mut self, cx: &mut Context) -> Result<(), io::Error> { if self.is_dead() { return Ok(()); } // Ignore frames remaining in pending queue self.write_pending_frames.clear(); self.send_go_away(cx)?; Ok(()) } // Send all pending frames to remote streams fn flush(&mut self, cx: &mut Context) -> Result<(), io::Error> { if !self.read_pending_frames.is_empty() || !self.write_pending_frames.is_empty() { self.send_all(cx)?; self.distribute_to_substream(cx)?; } Ok(()) } fn is_dead(&self) -> bool { self.remote_go_away && self.local_go_away || self.eof } fn send_ping(&mut self, cx: &mut Context, ping_id: Option<u32>) -> Result<u32, io::Error> { let (flag, ping_id) = match ping_id { Some(ping_id) => (Flag::Ack, ping_id), None => { self.ping_id = self.ping_id.overflowing_add(1).0; (Flag::Syn, self.ping_id) } }; let frame = Frame::new_ping(Flags::from(flag), ping_id); self.send_frame(cx, frame).map(|_| ping_id) } /// GoAway can be used to prevent accepting further /// connections. It does not close the underlying conn. pub fn send_go_away(&mut self, cx: &mut Context) -> Result<(), io::Error> { self.send_go_away_with_code(cx, GoAwayCode::Normal) } fn send_go_away_with_code( &mut self, cx: &mut Context, code: GoAwayCode, ) -> Result<(), io::Error> { // clear all pending write and then send go away to close session self.write_pending_frames.clear(); let frame = Frame::new_go_away(code); self.send_frame(cx, frame)?; self.local_go_away = true; let mut new_timer = interval(self.config.connection_write_timeout); // force registration of new timer to driver let _ignore = Pin::new(&mut new_timer).as_mut().poll_next(cx); // Reuse the keepalive timer to set a time out. If remote peer does not respond // within the time out, consider this session as remote gone away. self.keepalive = Some(new_timer); Ok(()) } /// Open a new stream to remote session pub fn open_stream(&mut self) -> Result<StreamHandle, Error> { if self.is_dead() { Err(Error::SessionShutdown) } else if self.remote_go_away { Err(Error::RemoteGoAway) } else { let stream = self.create_stream(None)?; Ok(stream) } } /// Return a control to async open stream/close session pub fn control(&self) -> Control { Control::new(self.control_sender.clone()) } fn keep_alive(&mut self, cx: &mut Context, ping_at: Instant) -> Result<(), io::Error> { // If the remote peer does not follow the protocol, doesn't ack ping message, // there may be a memory leak, yamux does not clearly define how this should be handled. // According to the authoritative [spec](https://tools.ietf.org/html/rfc6455#section-5.5.2) // of websocket, the keep alive message **must** respond. If it is not responding, // it is a protocol exception and should be disconnected. if self .pings .iter() .any(|(_id, time)| time.elapsed() > TIMEOUT) { return Err(io::ErrorKind::TimedOut.into()); } let ping_id = self.send_ping(cx, None)?; debug!("[{:?}] sent keep_alive ping (id={:?})", self.ty, ping_id); self.pings.insert(ping_id, ping_at); Ok(()) } fn create_stream(&mut self, stream_id: Option<StreamId>) -> Result<StreamHandle, Error> { let (stream_id, state) = match stream_id { Some(stream_id) => (stream_id, StreamState::SynReceived), None => { let next_id = self.next_stream_id; self.next_stream_id = self .next_stream_id .checked_add(2) .ok_or(Error::StreamsExhausted)?; (next_id, StreamState::Init) } }; let (frame_sender, frame_receiver) = channel(8); match self.streams.entry(stream_id) { Entry::Occupied(_) => return Err(Error::DuplicateStream), Entry::Vacant(entry) => entry.insert(frame_sender), }; let mut stream = StreamHandle::new( stream_id, self.event_sender.clone(), frame_receiver, state, self.config.max_stream_window_size, self.config.max_stream_window_size, ); if let Err(err) = stream.send_window_update() { debug!("[{:?}] stream.send_window_update error={:?}", self.ty, err); } Ok(stream) } /// Sink `start_send` Ready -> data send to buffer /// Sink `start_send` NotReady -> buffer full need poll complete #[inline] fn send_all(&mut self, cx: &mut Context) -> Result<bool, io::Error> { while let Some(frame) = self.write_pending_frames.pop_front() { if self.is_dead() { break; } let mut sink = Pin::new(&mut self.framed_stream); match sink.as_mut().poll_ready(cx)? { Poll::Ready(()) => { sink.as_mut().start_send(frame)?; } Poll::Pending => { debug!("[{:?}] framed_stream NotReady, frame: {:?}", self.ty, frame); self.write_pending_frames.push_front(frame); if self.poll_complete(cx)? { return Ok(true); } } } } self.poll_complete(cx)?; Ok(false) } /// https://docs.rs/tokio/0.1.19/tokio/prelude/trait.Sink.html /// Must use poll complete to ensure data send to lower-level /// /// Sink `poll_complete` Ready -> no buffer remain, flush all /// Sink `poll_complete` NotReady -> there is more work left to do, may wake up next poll fn poll_complete(&mut self, cx: &mut Context) -> Result<bool, io::Error> { match Pin::new(&mut self.framed_stream).poll_flush(cx) { Poll::Pending => Ok(true), Poll::Ready(res) => res.map(|_| false), } } fn send_frame(&mut self, cx: &mut Context, frame: Frame) -> Result<(), io::Error> { self.write_pending_frames.push_back(frame); if self.send_all(cx)? { debug!("[{:?}] Session::send_frame() finished", self.ty); } Ok(()) } fn handle_frame(&mut self, cx: &mut Context, frame: Frame) -> Result<(), io::Error> { match frame.ty() { Type::Data | Type::WindowUpdate => { self.handle_stream_message(cx, frame)?; } Type::Ping => { self.handle_ping(cx, &frame)?; } Type::GoAway => { self.handle_go_away(cx, &frame)?; } } Ok(()) } /// Try send buffer to all sub streams fn distribute_to_substream(&mut self, cx: &mut Context) -> Result<(), io::Error> { let mut block_substream = HashSet::new(); let new = if self.read_pending_frames.len() > BUF_SHRINK_THRESHOLD { VecDeque::with_capacity(BUF_SHRINK_THRESHOLD) } else { VecDeque::new() }; let buf = ::std::mem::replace(&mut self.read_pending_frames, new); for frame in buf { let stream_id = frame.stream_id(); // Guarantee the order in which messages are sent if block_substream.contains(&stream_id) { trace!("substream({}) blocked", stream_id); self.read_pending_frames.push_back(frame); continue; } if frame.flags().contains(Flag::Syn) { if self.local_go_away { let flags = Flags::from(Flag::Rst); let frame = Frame::new_window_update(flags, stream_id, 0); self.send_frame(cx, frame)?; debug!( "substream({}) local go away send Reset to remote, session.ty={:?}", stream_id, self.ty ); // TODO: should report error? return Ok(()); } if self.streams.len() < self.config.max_stream_count && self.pending_streams.len() < self.config.accept_backlog { debug!( "substream({}) accepted, session.ty={:?}", stream_id, self.ty ); let stream = match self.create_stream(Some(stream_id)) { Ok(stream) => stream, Err(_) => { self.send_go_away_with_code(cx, GoAwayCode::ProtocolError)?; return Ok(()); } }; self.pending_streams.push_back(stream); } else { // close the stream immediately debug!("substream({}) closed, session.ty={:?}", stream_id, self.ty); let mut flags = Flags::from(Flag::Ack); flags.add(Flag::Rst); let frame = Frame::new_window_update(flags, stream_id, 0); self.write_pending_frames.push_back(frame); } } let disconnected = { if let Some(frame_sender) = self.streams.get_mut(&stream_id) { match frame_sender.poll_ready(cx) { Poll::Ready(Ok(())) => match frame_sender.try_send(frame) { Ok(_) => false, Err(err) => { if err.is_full() { trace!("substream({}) try_send but full", stream_id); self.read_pending_frames.push_back(err.into_inner()); block_substream.insert(stream_id); false } else { debug!("substream({}) try_send but failed: {}", stream_id, err); true } } }, Poll::Pending => { trace!("substream({}) poll_ready but pending", stream_id); self.read_pending_frames.push_back(frame); block_substream.insert(stream_id); false } Poll::Ready(Err(err)) => { debug!("substream({}) poll_ready but failed: {}", stream_id, err); true } } } else { // TODO: stream already closed ? debug!( "substream({}) should exist but not, may drop by self", stream_id ); false } }; if disconnected { debug!("substream({}) removed, session.ty={:?}", stream_id, self.ty); self.streams.remove(&stream_id); } } Ok(()) } // Send message to stream (Data/WindowUpdate) fn handle_stream_message(&mut self, cx: &mut Context, frame: Frame) -> Result<(), io::Error> { self.read_pending_frames.push_back(frame); self.distribute_to_substream(cx)?; Ok(()) } fn handle_ping(&mut self, cx: &mut Context, frame: &Frame) -> Result<(), io::Error> { let flags = frame.flags(); if flags.contains(Flag::Syn) { // Send ping back self.send_ping(cx, Some(frame.length()))?; } else if flags.contains(Flag::Ack) { self.pings.remove(&frame.length()); // If the remote peer does not follow the protocol, // there may be a memory leak, so here need to discard all ping ids below the ack. self.pings = self.pings.split_off(&frame.length()); } else { // TODO: unexpected case, send a GoAwayCode::ProtocolError ? } Ok(()) } fn handle_go_away(&mut self, cx: &mut Context, frame: &Frame) -> Result<(), io::Error> { let mut close = || -> Result<(), io::Error> { self.remote_go_away = true; self.write_pending_frames.clear(); if !self.local_go_away { self.send_go_away(cx)?; } Ok(()) }; match GoAwayCode::from(frame.length()) { GoAwayCode::Normal => close(), GoAwayCode::ProtocolError => { // TODO: report error close() } GoAwayCode::InternalError => { // TODO: report error close() } } } // Receive frames from low level stream fn recv_frames(&mut self, cx: &mut Context) -> Poll<Option<Result<(), io::Error>>> { debug!("[{:?}] poll from framed_stream", self.ty); match Pin::new(&mut self.framed_stream).as_mut().poll_next(cx) { Poll::Ready(Some(Ok(frame))) => { self.handle_frame(cx, frame)?; Poll::Ready(Some(Ok(()))) } Poll::Ready(None) => { self.eof = true; Poll::Ready(None) } Poll::Pending => { debug!("[{:?}] poll framed_stream NotReady", self.ty); Poll::Pending } Poll::Ready(Some(Err(err))) => { debug!("[{:?}] Session recv_frames error: {:?}", self.ty, err); Poll::Ready(Some(Err(err))) } } } fn handle_event(&mut self, cx: &mut Context, event: StreamEvent) -> Result<(), io::Error> { match event { StreamEvent::Frame(frame) => { self.send_frame(cx, frame)?; } StreamEvent::Closed(stream_id) => { self.streams.remove(&stream_id); if self.streams.capacity() - self.streams.len() > BUF_SHRINK_THRESHOLD { self.streams.shrink_to_fit(); } } StreamEvent::GoAway => self.send_go_away_with_code(cx, GoAwayCode::ProtocolError)?, } Ok(()) } // Receive events from sub streams fn recv_events(&mut self, cx: &mut Context) -> Poll<Option<Result<(), io::Error>>> { match Pin::new(&mut self.event_receiver).as_mut().poll_next(cx) { Poll::Ready(Some(event)) => { self.handle_event(cx, event)?; Poll::Ready(Some(Ok(()))) } Poll::Ready(None) => { // Since session hold one event sender, // the channel can not be disconnected. unreachable!() } Poll::Pending => Poll::Pending, } } fn control_poll(&mut self, cx: &mut Context) -> Poll<Option<Result<(), io::Error>>> { match Pin::new(&mut self.control_receiver).as_mut().poll_next(cx) { Poll::Ready(Some(event)) => { match event { Command::OpenStream(tx) => { let _ignore = tx.send(self.open_stream()); } Command::Shutdown(tx) => { self.shutdown(cx)?; let _ignore = tx.send(()); } } Poll::Ready(Some(Ok(()))) } Poll::Ready(None) => { // Since session hold one event sender, // the channel can not be disconnected. unreachable!() } Poll::Pending => Poll::Pending, } } } impl<T> Stream for Session<T> where T: AsyncRead + AsyncWrite + Unpin, { type Item = Result<StreamHandle, io::Error>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> { if self.is_dead() { debug!("yamux::Session finished because is_dead"); return Poll::Ready(None); } if log_enabled!(log::Level::Trace) && !(self.write_pending_frames.is_empty() && self.read_pending_frames.is_empty()) { trace!( "yamux::Session write_pending_frames: {}, read_pending_frames: {}", self.write_pending_frames.len(), self.read_pending_frames.len() ); } if let Some(ref mut interval) = self.keepalive { match Pin::new(interval).as_mut().poll_next(cx) { Poll::Ready(Some(_)) => { if self.local_go_away { // The remote peer has not responded to our sent go away code. // Assume that remote peer has gone away and this session should be closed. self.remote_go_away = true; } else { self.keep_alive(cx, Instant::now())?; } } Poll::Ready(None) => { debug!("yamux::Session poll keepalive interval finished"); } Poll::Pending => (), } } let mut need_wake = false; for _ in 0..16 { if self.is_dead() { debug!("yamux::Session finished because is_dead, end"); return Poll::Ready(None); } // Reset initial value need_wake = false; self.flush(cx)?; self.poll_complete(cx)?; // Open stream as soon as possible if let Some(stream) = self.pending_streams.pop_front() { debug!("yamux::Session [{:?}] A stream is ready", self.ty); return Poll::Ready(Some(Ok(stream))); } let mut is_pending = self.control_poll(cx)?.is_pending(); is_pending &= self.recv_frames(cx)?.is_pending(); is_pending &= self.recv_events(cx)?.is_pending(); if is_pending { break; } else { need_wake = true; } } if need_wake { // To ensure we do not starve other tasks waiting on the executor, // we yield here, but immediately wake ourselves up to continue. cx.waker().wake_by_ref() } Poll::Pending } } mod timer { #[cfg(feature = "generic-timer")] pub use generic_time::{interval, Interval}; #[cfg(feature = "tokio-timer")] pub use inter::{interval, Interval}; #[cfg(feature = "tokio-timer")] mod inter { use futures::Stream; use std::{ pin::Pin, task::{Context, Poll}, time::Duration, }; use tokio::time::{interval_at, Instant, Interval as Inner}; pub struct Interval(Inner); impl Interval { fn new(period: Duration) -> Self { Self(interval_at(Instant::now() + period, period)) } } impl Stream for Interval { type Item = (); fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<()>> { match self.0.poll_tick(cx) { Poll::Ready(_) => Poll::Ready(Some(())), Poll::Pending => Poll::Pending, } } fn size_hint(&self) -> (usize, Option<usize>) { (std::usize::MAX, None) } } pub fn interval(period: Duration) -> Interval { Interval::new(period) } } #[cfg(target_arch = "wasm32")] pub use wasm_mock::Instant; #[cfg(feature = "generic-timer")] mod generic_time { use futures::{Future, Stream}; use futures_timer::Delay; use std::{ pin::Pin, task::{Context, Poll}, time::Duration, }; pub struct Interval { delay: Delay, period: Duration, } impl Interval { fn new(period: Duration) -> Self { Self { delay: Delay::new(period), period, } } } impl Stream for Interval { type Item = (); fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<()>> { match Pin::new(&mut self.delay).poll(cx) { Poll::Ready(_) => { let dur = self.period; self.delay.reset(dur); #[cfg(target_arch = "wasm32")] unsafe { super::super::TIME += dur; } Poll::Ready(Some(())) } Poll::Pending => Poll::Pending, } } } pub fn interval(period: Duration) -> Interval { assert!(period > Duration::new(0, 0), "`period` must be non-zero."); Interval::new(period) } } #[cfg(target_arch = "wasm32")] #[allow(dead_code)] mod wasm_mock { use std::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd}; use std::ops::{Add, AddAssign, Sub}; use std::time::Duration; #[derive(Debug, Copy, Clone)] pub struct Instant { /// mock inner: f64, } impl PartialEq for Instant { fn eq(&self, other: &Instant) -> bool { // Note that this will most likely only compare equal if we clone an `Instant`, // but that's ok. self.inner == other.inner } } impl Eq for Instant {} impl PartialOrd for Instant { fn partial_cmp(&self, other: &Instant) -> Option<Ordering> { self.inner.partial_cmp(&other.inner) } } impl Ord for Instant { fn cmp(&self, other: &Self) -> Ordering { self.inner.partial_cmp(&other.inner).unwrap() } } impl Instant { pub const fn from_f64(val: f64) -> Self { Instant { inner: val } } pub fn now() -> Instant { unsafe { super::super::TIME } } pub fn duration_since(&self, earlier: Instant) -> Duration { *self - earlier } pub fn elapsed(&self) -> Duration { Instant::now() - *self } } impl Add<Duration> for Instant { type Output = Instant; fn add(self, other: Duration) -> Instant { let new_val = self.inner + other.as_millis() as f64; Instant { inner: new_val as f64, } } } impl Sub<Duration> for Instant { type Output = Instant; fn sub(self, other: Duration) -> Instant { let new_val = self.inner - other.as_millis() as f64; Instant { inner: new_val as f64, } } } impl Sub<Instant> for Instant { type Output = Duration; fn sub(self, other: Instant) -> Duration { let ms = self.inner - other.inner; assert!(ms >= 0.0); Duration::from_millis(ms as u64) } } impl AddAssign<Duration> for Instant { fn add_assign(&mut self, rhs: Duration) { *self = *self + rhs; } } } } #[cfg(test)] mod test { use super::Session; use crate::{ config::Config, frame::{Flag, Flags, Frame, FrameCodec, GoAwayCode, Type}, }; use futures::{ channel::mpsc::{channel, Receiver, Sender}, stream::FusedStream, SinkExt, Stream, StreamExt, }; use std::{ io, pin::Pin, task::{Context, Poll}, time::Duration, }; use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, ReadBuf}; use tokio_util::codec::Framed; struct MockSocket { sender: Sender<Vec<u8>>, receiver: Receiver<Vec<u8>>, read_buffer: Vec<u8>, } impl MockSocket { fn new() -> (Self, Self) { let (tx, rx) = channel(25); let (tx_1, rx_1) = channel(25); ( MockSocket { sender: tx, receiver: rx_1, read_buffer: Default::default(), }, MockSocket { sender: tx_1, receiver: rx, read_buffer: Default::default(), }, ) } } impl AsyncRead for MockSocket { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { loop { if self.receiver.is_terminated() { break; } match Pin::new(&mut self.receiver).poll_next(cx) { Poll::Ready(Some(data)) => self.read_buffer.extend(data), Poll::Ready(None) => { return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())); } Poll::Pending => break, } } let n = ::std::cmp::min(buf.remaining(), self.read_buffer.len()); if n == 0 { Poll::Pending } else { buf.put_slice(&self.read_buffer[..n]); self.read_buffer.drain(..n); Poll::Ready(Ok(())) } } } impl AsyncWrite for MockSocket { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context, buf: &[u8], ) -> Poll<io::Result<usize>> { match self.sender.poll_ready(cx) { Poll::Ready(Ok(())) => match self.sender.try_send(buf.to_vec()) { Ok(_) => Poll::Ready(Ok(buf.len())), Err(e) => { if e.is_full() { Poll::Pending } else { Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())) } } }, Poll::Pending => Poll::Pending, Poll::Ready(Err(_)) => Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())), } } fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<io::Result<()>> { Poll::Ready(Ok(())) } fn poll_shutdown(mut self: Pin<&mut Self>, _cx: &mut Context) -> Poll<io::Result<()>> { self.receiver.close(); self.sender.close_channel(); Poll::Ready(Ok(())) } } #[test] fn test_open_exist_stream() { let rt = tokio::runtime::Runtime::new().unwrap(); rt.block_on(async { let (remote, local) = MockSocket::new(); let config = Config { enable_keepalive: false, ..Default::default() }; let mut session = Session::new_server(local, config); tokio::spawn(async move { while let Some(Ok(mut stream)) = session.next().await { tokio::spawn(async move { let mut buf = [0; 100]; let _ignore = stream.read(&mut buf).await; }); } }); let mut client = Framed::new( remote, FrameCodec::default().max_frame_size(config.max_stream_window_size), ); let next_stream_id = 3; // open stream let frame = Frame::new_window_update(Flags::from(Flag::Syn), next_stream_id, 0); client.send(frame).await.unwrap(); // stream window respond assert_eq!( Frame::new_window_update(Flags::from(Flag::Ack), next_stream_id, 0), client.next().await.unwrap().unwrap() ); // open stream with duplicate stream id let frame = Frame::new_window_update(Flags::from(Flag::Syn), next_stream_id, 0); client.send(frame).await.unwrap(); // get go away with protocol error let go_away = client.next().await.unwrap().unwrap(); assert_eq!(go_away.ty(), Type::GoAway); assert_eq!( GoAwayCode::from(go_away.length()), GoAwayCode::ProtocolError ) }) } // issue: https://github.com/nervosnetwork/tentacle/issues/259 // The reason for the problem is that when the session is closed, // all stream states are not set to `RemoteClosed` // // This test can simulate a stuck state. If it is not set, // the test will remain stuck and cannot be finished. #[test] fn test_close_session_on_stream_opened() { let rt = tokio::runtime::Runtime::new().unwrap(); rt.block_on(async { let (remote, local) = MockSocket::new(); let config = Config::default(); let mut session = Session::new_server(local, config); tokio::spawn(async move { while let Some(Ok(mut stream)) = session.next().await { tokio::spawn(async move { let mut buf = [0; 100]; let _ignore = stream.read(&mut buf).await; }); } }); let mut client = Session::new_client(remote, config); let mut control = client.control(); let mut stream = client.open_stream().unwrap(); tokio::spawn(async move { loop { match client.next().await { Some(Ok(_)) => (), Some(Err(_)) => { break; } None => { break; } } } }); tokio::spawn(async move { control.close().await; }); let mut buf = [0; 100]; let _ignore = stream.read(&mut buf).await; }) } #[test] fn test_open_too_many_stream() { let rt = tokio::runtime::Runtime::new().unwrap(); rt.block_on(async { let (remote, local) = MockSocket::new(); let config = Config { enable_keepalive: false, max_stream_count: 1, ..Default::default() }; let mut session = Session::new_server(local, config); tokio::spawn(async move { while let Some(Ok(mut stream)) = session.next().await { tokio::spawn(async move { let mut buf = [0; 100]; let _ignore = stream.read(&mut buf).await; }); } }); let mut client = Framed::new( remote, FrameCodec::default().max_frame_size(config.max_stream_window_size), ); let next_stream_id = 3; // open stream let frame = Frame::new_window_update(Flags::from(Flag::Syn), next_stream_id, 0); client.send(frame).await.unwrap(); // stream window respond assert_eq!( Frame::new_window_update(Flags::from(Flag::Ack), next_stream_id, 0), client.next().await.unwrap().unwrap() ); let frame = Frame::new_window_update(Flags::from(Flag::Syn), next_stream_id + 2, 0); client.send(frame).await.unwrap(); // get reset msg let reset_msg = client.next().await.unwrap().unwrap(); assert_eq!(reset_msg.ty(), Type::WindowUpdate); assert!(!reset_msg.flags().contains(Flag::Syn)); assert!(reset_msg.flags().contains(Flag::Ack)); assert!(reset_msg.flags().contains(Flag::Rst)); assert_eq!(reset_msg.stream_id(), 5) }); } #[test] fn test_remote_does_not_respond_go_away() { let rt = tokio::runtime::Runtime::new().unwrap(); rt.block_on(async { let (_remote, local) = MockSocket::new(); let config = Config { enable_keepalive: false, connection_write_timeout: Duration::from_secs(1), ..Default::default() }; let mut session = Session::new_server(local, config); let mut control = session.control(); tokio::spawn(async move { let _ignore = control.close().await; }); // The purpose of this test is to ensure that if the remote does not respond to the // go away message, it must be able to actively disconnect the session instead of hanging. // So, if the test fails to exit, it means there has a problem while let Some(Ok(mut stream)) = session.next().await { tokio::spawn(async move { let mut buf = [0; 100]; let _ignore = stream.read(&mut buf).await; }); } }); } }
34.343803
102
0.502756
2694125be6e67e8043d94e91ee1627e8681b376a
5,676
//! zkSync core private API server. //! //! This file contains endpoint expected to be used by //! other components of zkSync stack **only**. This API must not be //! available from outside of the cluster. //! //! All the incoming data is assumed to be correct and not double-checked //! for correctness. use crate::{eth_watch::EthWatchRequest, mempool::MempoolRequest}; use actix_web::{web, App, HttpResponse, HttpServer}; use futures::{ channel::{mpsc, oneshot}, sink::SinkExt, }; use std::thread; use zksync_config::ApiServerOptions; use zksync_types::{tx::TxEthSignature, Address, SignedZkSyncTx, H256}; use zksync_utils::panic_notify::ThreadPanicNotify; #[derive(Debug, Clone)] struct AppState { mempool_tx_sender: mpsc::Sender<MempoolRequest>, eth_watch_req_sender: mpsc::Sender<EthWatchRequest>, } /// Adds a new transaction into the mempool. /// Returns a JSON representation of `Result<(), TxAddError>`. /// Expects transaction to be checked on the API side. #[actix_web::post("/new_tx")] async fn new_tx( data: web::Data<AppState>, web::Json(tx): web::Json<SignedZkSyncTx>, ) -> actix_web::Result<HttpResponse> { let (sender, receiver) = oneshot::channel(); let item = MempoolRequest::NewTx(Box::new(tx), sender); let mut mempool_sender = data.mempool_tx_sender.clone(); mempool_sender .send(item) .await .map_err(|_err| HttpResponse::InternalServerError().finish())?; let response = receiver .await .map_err(|_err| HttpResponse::InternalServerError().finish())?; Ok(HttpResponse::Ok().json(response)) } /// Adds a new transactions batch into the mempool. /// Returns a JSON representation of `Result<(), TxAddError>`. /// Expects transaction to be checked on the API side. #[actix_web::post("/new_txs_batch")] async fn new_txs_batch( data: web::Data<AppState>, web::Json((txs, eth_signature)): web::Json<(Vec<SignedZkSyncTx>, Option<TxEthSignature>)>, ) -> actix_web::Result<HttpResponse> { let (sender, receiver) = oneshot::channel(); let item = MempoolRequest::NewTxsBatch(txs, eth_signature, sender); let mut mempool_sender = data.mempool_tx_sender.clone(); mempool_sender .send(item) .await .map_err(|_err| HttpResponse::InternalServerError().finish())?; let response = receiver .await .map_err(|_err| HttpResponse::InternalServerError().finish())?; Ok(HttpResponse::Ok().json(response)) } /// Obtains information about unconfirmed deposits known for a certain address. #[actix_web::get("/unconfirmed_deposits/{address}")] async fn unconfirmed_deposits( data: web::Data<AppState>, web::Path(address): web::Path<Address>, ) -> actix_web::Result<HttpResponse> { let (sender, receiver) = oneshot::channel(); let item = EthWatchRequest::GetUnconfirmedDeposits { address, resp: sender, }; let mut eth_watch_sender = data.eth_watch_req_sender.clone(); eth_watch_sender .send(item) .await .map_err(|_err| HttpResponse::InternalServerError().finish())?; let response = receiver .await .map_err(|_err| HttpResponse::InternalServerError().finish())?; Ok(HttpResponse::Ok().json(response)) } /// Obtains information about unconfirmed deposits known for a certain address. #[actix_web::get("/unconfirmed_op/{tx_hash}")] async fn unconfirmed_op( data: web::Data<AppState>, web::Path(eth_hash): web::Path<H256>, ) -> actix_web::Result<HttpResponse> { let (sender, receiver) = oneshot::channel(); let item = EthWatchRequest::GetUnconfirmedOpByHash { eth_hash: eth_hash.as_ref().to_vec(), resp: sender, }; let mut eth_watch_sender = data.eth_watch_req_sender.clone(); eth_watch_sender .send(item) .await .map_err(|_err| HttpResponse::InternalServerError().finish())?; let response = receiver .await .map_err(|_err| HttpResponse::InternalServerError().finish())?; Ok(HttpResponse::Ok().json(response)) } #[allow(clippy::too_many_arguments)] pub fn start_private_core_api( panic_notify: mpsc::Sender<bool>, mempool_tx_sender: mpsc::Sender<MempoolRequest>, eth_watch_req_sender: mpsc::Sender<EthWatchRequest>, api_server_options: ApiServerOptions, ) { thread::Builder::new() .name("core-private-api".to_string()) .spawn(move || { let _panic_sentinel = ThreadPanicNotify(panic_notify.clone()); let mut actix_runtime = actix_rt::System::new("core-private-api-server"); actix_runtime.block_on(async move { // Start HTTP server. HttpServer::new(move || { let app_state = AppState { mempool_tx_sender: mempool_tx_sender.clone(), eth_watch_req_sender: eth_watch_req_sender.clone(), }; // By calling `register_data` instead of `data` we're avoiding double // `Arc` wrapping of the object. App::new() .wrap(actix_web::middleware::Logger::default()) .app_data(web::Data::new(app_state)) .service(new_tx) .service(new_txs_batch) .service(unconfirmed_op) .service(unconfirmed_deposits) }) .bind(&api_server_options.core_server_address) .expect("failed to bind") .run() .await }) }) .expect("failed to start prover server"); }
35.475
94
0.63883
4a803a36f2f201f3e6556de0753be6a0e5c603e8
5,995
#[cfg(not(feature = "std"))] use alloc::{collections::BTreeMap, rc::Rc, string::String, vec::Vec}; #[cfg(feature = "std")] use std::{ collections::{BTreeMap, HashMap}, hash::{BuildHasher, Hash}, rc::Rc, }; use crate::{ decoding::{Decoder, Error, Object}, encoding::AsString, state_tracker::StructureError, }; ///Basic trait for bencode based value deserialization. pub trait FromBencode { /// Maximum allowed depth of nested structures before the decoding should be aborted. const EXPECTED_RECURSION_DEPTH: usize = 2048; /// Deserialize an object from its byte representation. fn from_bencode(bytes: &[u8]) -> Result<Self, Error> where Self: Sized, { let mut decoder = Decoder::new(bytes).with_max_depth(Self::EXPECTED_RECURSION_DEPTH); let object = decoder.next_object()?; object.map_or( Err(Error::from(StructureError::UnexpectedEof)), Self::decode_bencode_object, ) } /// Deserialize an object from its intermediate bencode representation. fn decode_bencode_object(object: Object) -> Result<Self, Error> where Self: Sized; } macro_rules! impl_from_bencode_for_integer { ($($type:ty)*) => {$( impl FromBencode for $type { const EXPECTED_RECURSION_DEPTH: usize = 0; fn decode_bencode_object(object: Object) -> Result<Self, Error> where Self: Sized, { let content = object.try_into_integer()?; let number = content.parse::<$type>()?; Ok(number) } } )*} } impl_from_bencode_for_integer!(u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize); impl<ContentT: FromBencode> FromBencode for Vec<ContentT> { const EXPECTED_RECURSION_DEPTH: usize = ContentT::EXPECTED_RECURSION_DEPTH + 1; fn decode_bencode_object(object: Object) -> Result<Self, Error> where Self: Sized, { let mut list = object.try_into_list()?; let mut results = Vec::new(); while let Some(object) = list.next_object()? { let item = ContentT::decode_bencode_object(object)?; results.push(item); } Ok(results) } } impl FromBencode for String { const EXPECTED_RECURSION_DEPTH: usize = 0; fn decode_bencode_object(object: Object) -> Result<Self, Error> where Self: Sized, { let content = object.try_into_bytes()?; let content = String::from_utf8(content.to_vec())?; Ok(content) } } impl<K, V> FromBencode for BTreeMap<K, V> where K: FromBencode + Ord, V: FromBencode, { const EXPECTED_RECURSION_DEPTH: usize = V::EXPECTED_RECURSION_DEPTH + 1; fn decode_bencode_object(object: Object) -> Result<Self, Error> where Self: Sized, { let mut dict = object.try_into_dictionary()?; let mut result = BTreeMap::default(); while let Some((key, value)) = dict.next_pair()? { let key = K::decode_bencode_object(Object::Bytes(key))?; let value = V::decode_bencode_object(value)?; result.insert(key, value); } Ok(result) } } #[cfg(feature = "std")] impl<K, V, H> FromBencode for HashMap<K, V, H> where K: FromBencode + Hash + Eq, V: FromBencode, H: BuildHasher + Default, { const EXPECTED_RECURSION_DEPTH: usize = V::EXPECTED_RECURSION_DEPTH + 1; fn decode_bencode_object(object: Object) -> Result<Self, Error> where Self: Sized, { let mut dict = object.try_into_dictionary()?; let mut result = HashMap::default(); while let Some((key, value)) = dict.next_pair()? { let key = K::decode_bencode_object(Object::Bytes(key))?; let value = V::decode_bencode_object(value)?; result.insert(key, value); } Ok(result) } } impl<T: FromBencode> FromBencode for Rc<T> { const EXPECTED_RECURSION_DEPTH: usize = T::EXPECTED_RECURSION_DEPTH; fn decode_bencode_object(object: Object) -> Result<Self, Error> where Self: Sized, { T::decode_bencode_object(object).map(Rc::new) } } impl FromBencode for AsString<Vec<u8>> { const EXPECTED_RECURSION_DEPTH: usize = 0; fn decode_bencode_object(object: Object) -> Result<Self, Error> where Self: Sized, { object.try_into_bytes().map(Vec::from).map(AsString) } } #[cfg(test)] mod test { #[cfg(not(feature = "std"))] use alloc::{format, vec::Vec}; use crate::encoding::AsString; use super::*; #[test] fn from_bencode_to_string_should_work_with_valid_input() { let expected_message = "hello"; let serialized_message = format!("{}:{}", expected_message.len(), expected_message).into_bytes(); let decoded_message = String::from_bencode(&serialized_message).unwrap(); assert_eq!(expected_message, decoded_message); } #[test] fn from_bencode_to_as_string_should_work_with_valid_input() { let expected_message = "hello"; let serialized_message = format!("{}:{}", expected_message.len(), expected_message).into_bytes(); let decoded_vector = AsString::from_bencode(&serialized_message).unwrap(); assert_eq!(expected_message.as_bytes(), &decoded_vector.0[..]); } #[test] #[should_panic(expected = "Num")] fn from_bencode_to_as_string_should_fail_for_integer() { AsString::<Vec<u8>>::from_bencode(&b"i1e"[..]).unwrap(); } #[test] #[should_panic(expected = "NestingTooDeep")] fn from_bencode_to_as_string_should_fail_for_list() { AsString::<Vec<u8>>::from_bencode(&b"l1:ae"[..]).unwrap(); } #[test] #[should_panic(expected = "NestingTooDeep")] fn from_bencode_to_as_string_should_fail_for_dictionary() { AsString::<Vec<u8>>::from_bencode(&b"d1:a1:ae"[..]).unwrap(); } }
27.626728
93
0.622686
bb0d460b3463a5ed4b92ecfb2179c0c35fc2f544
8,981
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. //! Functions for printing array values, as strings, for debugging //! purposes. See the `pretty` crate for additional functions for //! record batch pretty printing. use crate::array; use crate::array::Array; use crate::datatypes::{ ArrowNativeType, ArrowPrimitiveType, DataType, Int16Type, Int32Type, Int64Type, Int8Type, TimeUnit, UInt16Type, UInt32Type, UInt64Type, UInt8Type, }; use array::DictionaryArray; use crate::error::{ArrowError, Result}; macro_rules! make_string { ($array_type:ty, $column: ident, $row: ident) => {{ let array = $column.as_any().downcast_ref::<$array_type>().unwrap(); let s = if array.is_null($row) { "".to_string() } else { array.value($row).to_string() }; Ok(s) }}; } macro_rules! make_string_date { ($array_type:ty, $column: ident, $row: ident) => {{ let array = $column.as_any().downcast_ref::<$array_type>().unwrap(); let s = if array.is_null($row) { "".to_string() } else { array .value_as_date($row) .map(|d| d.to_string()) .unwrap_or_else(|| "ERROR CONVERTING DATE".to_string()) }; Ok(s) }}; } macro_rules! make_string_time { ($array_type:ty, $column: ident, $row: ident) => {{ let array = $column.as_any().downcast_ref::<$array_type>().unwrap(); let s = if array.is_null($row) { "".to_string() } else { array .value_as_time($row) .map(|d| d.to_string()) .unwrap_or_else(|| "ERROR CONVERTING DATE".to_string()) }; Ok(s) }}; } macro_rules! make_string_datetime { ($array_type:ty, $column: ident, $row: ident) => {{ let array = $column.as_any().downcast_ref::<$array_type>().unwrap(); let s = if array.is_null($row) { "".to_string() } else { array .value_as_datetime($row) .map(|d| d.to_string()) .unwrap_or_else(|| "ERROR CONVERTING DATE".to_string()) }; Ok(s) }}; } // It's not possible to do array.value($row).to_string() for &[u8], let's format it as hex macro_rules! make_string_hex { ($array_type:ty, $column: ident, $row: ident) => {{ let array = $column.as_any().downcast_ref::<$array_type>().unwrap(); let s = if array.is_null($row) { "".to_string() } else { let mut tmp = "".to_string(); for character in array.value($row) { tmp += &format!("{:02x}", character); } tmp }; Ok(s) }}; } macro_rules! make_string_from_list { ($column: ident, $row: ident) => {{ let list = $column .as_any() .downcast_ref::<array::ListArray>() .ok_or(ArrowError::InvalidArgumentError(format!( "Repl error: could not convert list column to list array." )))? .value($row); let string_values = (0..list.len()) .map(|i| array_value_to_string(&list.clone(), i)) .collect::<Result<Vec<String>>>()?; Ok(format!("[{}]", string_values.join(", "))) }}; } /// Get the value at the given row in an array as a String. /// /// Note this function is quite inefficient and is unlikely to be /// suitable for converting large arrays or record batches. pub fn array_value_to_string(column: &array::ArrayRef, row: usize) -> Result<String> { match column.data_type() { DataType::Utf8 => make_string!(array::StringArray, column, row), DataType::LargeUtf8 => make_string!(array::LargeStringArray, column, row), DataType::Binary => make_string_hex!(array::BinaryArray, column, row), DataType::LargeBinary => make_string_hex!(array::LargeBinaryArray, column, row), DataType::Boolean => make_string!(array::BooleanArray, column, row), DataType::Int8 => make_string!(array::Int8Array, column, row), DataType::Int16 => make_string!(array::Int16Array, column, row), DataType::Int32 => make_string!(array::Int32Array, column, row), DataType::Int64 => make_string!(array::Int64Array, column, row), DataType::UInt8 => make_string!(array::UInt8Array, column, row), DataType::UInt16 => make_string!(array::UInt16Array, column, row), DataType::UInt32 => make_string!(array::UInt32Array, column, row), DataType::UInt64 => make_string!(array::UInt64Array, column, row), DataType::Float16 => make_string!(array::Float32Array, column, row), DataType::Float32 => make_string!(array::Float32Array, column, row), DataType::Float64 => make_string!(array::Float64Array, column, row), DataType::Timestamp(unit, _) if *unit == TimeUnit::Second => { make_string_datetime!(array::TimestampSecondArray, column, row) } DataType::Timestamp(unit, _) if *unit == TimeUnit::Millisecond => { make_string_datetime!(array::TimestampMillisecondArray, column, row) } DataType::Timestamp(unit, _) if *unit == TimeUnit::Microsecond => { make_string_datetime!(array::TimestampMicrosecondArray, column, row) } DataType::Timestamp(unit, _) if *unit == TimeUnit::Nanosecond => { make_string_datetime!(array::TimestampNanosecondArray, column, row) } DataType::Date32 => make_string_date!(array::Date32Array, column, row), DataType::Date64 => make_string_date!(array::Date64Array, column, row), DataType::Time32(unit) if *unit == TimeUnit::Second => { make_string_time!(array::Time32SecondArray, column, row) } DataType::Time32(unit) if *unit == TimeUnit::Millisecond => { make_string_time!(array::Time32MillisecondArray, column, row) } DataType::Time64(unit) if *unit == TimeUnit::Microsecond => { make_string_time!(array::Time64MicrosecondArray, column, row) } DataType::Time64(unit) if *unit == TimeUnit::Nanosecond => { make_string_time!(array::Time64NanosecondArray, column, row) } DataType::List(_) => make_string_from_list!(column, row), DataType::Dictionary(index_type, _value_type) => match **index_type { DataType::Int8 => dict_array_value_to_string::<Int8Type>(column, row), DataType::Int16 => dict_array_value_to_string::<Int16Type>(column, row), DataType::Int32 => dict_array_value_to_string::<Int32Type>(column, row), DataType::Int64 => dict_array_value_to_string::<Int64Type>(column, row), DataType::UInt8 => dict_array_value_to_string::<UInt8Type>(column, row), DataType::UInt16 => dict_array_value_to_string::<UInt16Type>(column, row), DataType::UInt32 => dict_array_value_to_string::<UInt32Type>(column, row), DataType::UInt64 => dict_array_value_to_string::<UInt64Type>(column, row), _ => Err(ArrowError::InvalidArgumentError(format!( "Pretty printing not supported for {:?} due to index type", column.data_type() ))), }, _ => Err(ArrowError::InvalidArgumentError(format!( "Pretty printing not implemented for {:?} type", column.data_type() ))), } } /// Converts the value of the dictionary array at `row` to a String fn dict_array_value_to_string<K: ArrowPrimitiveType>( colum: &array::ArrayRef, row: usize, ) -> Result<String> { let dict_array = colum.as_any().downcast_ref::<DictionaryArray<K>>().unwrap(); let keys_array = dict_array.keys_array(); if keys_array.is_null(row) { return Ok(String::from("")); } let dict_index = keys_array.value(row).to_usize().ok_or_else(|| { ArrowError::InvalidArgumentError(format!( "Can not convert value {:?} at index {:?} to usize for string conversion.", keys_array.value(row), row )) })?; array_value_to_string(&dict_array.values(), dict_index) }
39.390351
90
0.612849
bf3397408f40595db5e27e8975742eb62f63e5e0
4,887
#![allow(unused_imports)] use super::*; use wasm_bindgen::prelude::*; #[wasm_bindgen] extern "C" { # [ wasm_bindgen ( extends = EventTarget , extends = :: js_sys :: Object , js_name = TextTrackList , typescript_type = "TextTrackList" ) ] #[derive(Debug, Clone, PartialEq, Eq)] #[doc = "The `TextTrackList` class."] #[doc = ""] #[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/TextTrackList)"] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `TextTrackList`*"] pub type TextTrackList; # [ wasm_bindgen ( structural , method , getter , js_class = "TextTrackList" , js_name = length ) ] #[doc = "Getter for the `length` field of this object."] #[doc = ""] #[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/TextTrackList/length)"] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `TextTrackList`*"] pub fn length(this: &TextTrackList) -> u32; # [ wasm_bindgen ( structural , method , getter , js_class = "TextTrackList" , js_name = onchange ) ] #[doc = "Getter for the `onchange` field of this object."] #[doc = ""] #[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/TextTrackList/onchange)"] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `TextTrackList`*"] pub fn onchange(this: &TextTrackList) -> Option<::js_sys::Function>; # [ wasm_bindgen ( structural , method , setter , js_class = "TextTrackList" , js_name = onchange ) ] #[doc = "Setter for the `onchange` field of this object."] #[doc = ""] #[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/TextTrackList/onchange)"] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `TextTrackList`*"] pub fn set_onchange(this: &TextTrackList, value: Option<&::js_sys::Function>); # [ wasm_bindgen ( structural , method , getter , js_class = "TextTrackList" , js_name = onaddtrack ) ] #[doc = "Getter for the `onaddtrack` field of this object."] #[doc = ""] #[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/TextTrackList/onaddtrack)"] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `TextTrackList`*"] pub fn onaddtrack(this: &TextTrackList) -> Option<::js_sys::Function>; # [ wasm_bindgen ( structural , method , setter , js_class = "TextTrackList" , js_name = onaddtrack ) ] #[doc = "Setter for the `onaddtrack` field of this object."] #[doc = ""] #[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/TextTrackList/onaddtrack)"] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `TextTrackList`*"] pub fn set_onaddtrack(this: &TextTrackList, value: Option<&::js_sys::Function>); # [ wasm_bindgen ( structural , method , getter , js_class = "TextTrackList" , js_name = onremovetrack ) ] #[doc = "Getter for the `onremovetrack` field of this object."] #[doc = ""] #[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/TextTrackList/onremovetrack)"] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `TextTrackList`*"] pub fn onremovetrack(this: &TextTrackList) -> Option<::js_sys::Function>; # [ wasm_bindgen ( structural , method , setter , js_class = "TextTrackList" , js_name = onremovetrack ) ] #[doc = "Setter for the `onremovetrack` field of this object."] #[doc = ""] #[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/TextTrackList/onremovetrack)"] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `TextTrackList`*"] pub fn set_onremovetrack(this: &TextTrackList, value: Option<&::js_sys::Function>); #[cfg(feature = "TextTrack")] # [ wasm_bindgen ( method , structural , js_class = "TextTrackList" , js_name = getTrackById ) ] #[doc = "The `getTrackById()` method."] #[doc = ""] #[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/TextTrackList/getTrackById)"] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `TextTrack`, `TextTrackList`*"] pub fn get_track_by_id(this: &TextTrackList, id: &str) -> Option<TextTrack>; #[cfg(feature = "TextTrack")] #[wasm_bindgen(method, structural, js_class = "TextTrackList", indexing_getter)] #[doc = "Indexing getter."] #[doc = ""] #[doc = ""] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `TextTrack`, `TextTrackList`*"] pub fn get(this: &TextTrackList, index: u32) -> Option<TextTrack>; }
61.0875
142
0.654594
6140e647e101d8ef3995944e51b64adfd96ffc57
10,667
//! Datafusion integration for Delta Table //! //! Example: //! //! ```rust //! use std::sync::Arc; //! use datafusion::execution::context::ExecutionContext; //! //! async { //! let mut ctx = ExecutionContext::new(); //! let table = deltalake::open_table("./tests/data/simple_table") //! .await //! .unwrap(); //! ctx.register_table("demo", Arc::new(table)).unwrap(); //! //! let batches = ctx //! .sql("SELECT * FROM demo").unwrap() //! .collect() //! .await.unwrap(); //! }; //! ``` use std::any::Any; use std::convert::TryFrom; use std::sync::Arc; use arrow::datatypes::Schema as ArrowSchema; use datafusion::datasource::datasource::{ColumnStatistics, Statistics}; use datafusion::datasource::TableProvider; use datafusion::logical_plan::{combine_filters, Expr}; use datafusion::physical_plan::parquet::{ParquetExec, ParquetPartition, RowGroupPredicateBuilder}; use datafusion::physical_plan::ExecutionPlan; use datafusion::scalar::ScalarValue; use crate::delta; use crate::schema; impl TableProvider for delta::DeltaTable { fn schema(&self) -> Arc<ArrowSchema> { Arc::new( <ArrowSchema as TryFrom<&schema::Schema>>::try_from( delta::DeltaTable::schema(&self).unwrap(), ) .unwrap(), ) } fn scan( &self, projection: &Option<Vec<usize>>, batch_size: usize, filters: &[Expr], limit: Option<usize>, ) -> datafusion::error::Result<Arc<dyn ExecutionPlan>> { let schema = <ArrowSchema as TryFrom<&schema::Schema>>::try_from( delta::DeltaTable::schema(&self).unwrap(), )?; let filenames = self.get_file_uris(); let partitions = filenames .into_iter() .zip(self.get_active_add_actions()) .map(|(fname, action)| { let statistics = if let Ok(Some(statistics)) = action.get_stats() { Statistics { num_rows: Some(statistics.num_records as usize), total_byte_size: Some(action.size as usize), column_statistics: Some( self.schema() .unwrap() .get_fields() .iter() .map(|field| ColumnStatistics { null_count: statistics .null_count .get(field.get_name()) .and_then(|f| f.as_value().map(|v| v as usize)), max_value: statistics .max_values .get(field.get_name()) .and_then(|f| to_scalar_value(f.as_value()?)), min_value: statistics .min_values .get(field.get_name()) .and_then(|f| to_scalar_value(f.as_value()?)), distinct_count: None, // TODO: distinct }) .collect(), ), } } else { Statistics::default() }; Ok(ParquetPartition::new(vec![fname], statistics)) }) .collect::<datafusion::error::Result<_>>()?; let predicate_builder = combine_filters(filters).and_then(|predicate_expr| { RowGroupPredicateBuilder::try_new(&predicate_expr, schema.clone()).ok() }); Ok(Arc::new(ParquetExec::new( partitions, schema, projection.clone(), predicate_builder, batch_size, limit, ))) } fn as_any(&self) -> &dyn Any { self } fn statistics(&self) -> Statistics { self.get_active_add_actions() .iter() .fold( Some(Statistics { num_rows: Some(0), total_byte_size: Some(0), column_statistics: Some(vec![ ColumnStatistics { null_count: Some(0), max_value: None, min_value: None, distinct_count: None }; self.schema().unwrap().get_fields().len() ]), }), |acc, action| { let acc = acc?; let new_stats = action.get_stats().unwrap_or(None)?; Some(Statistics { num_rows: acc .num_rows .map(|rows| rows + new_stats.num_records as usize), total_byte_size: acc .total_byte_size .map(|total_size| total_size + action.size as usize), column_statistics: acc.column_statistics.map(|col_stats| { self.schema() .unwrap() .get_fields() .iter() .zip(col_stats) .map(|(field, stats)| ColumnStatistics { null_count: new_stats .null_count .get(field.get_name()) .and_then(|x| { let null_count_acc = stats.null_count?; let null_count = x.as_value()? as usize; Some(null_count_acc + null_count) }), max_value: new_stats.max_values.get(field.get_name()).and_then( |x| { let old_stats = stats.clone(); let max_value = to_scalar_value(x.as_value()?); match (max_value, old_stats.max_value) { (Some(max_value), Some(old_max_value)) => { if left_larger_than_right( old_max_value.clone(), max_value.clone(), ) { Some(old_max_value) } else { Some(max_value) } } (Some(max_value), None) => Some(max_value), (None, old) => old, } }, ), min_value: new_stats.min_values.get(field.get_name()).and_then( |x| { let old_stats = stats.clone(); let min_value = to_scalar_value(x.as_value()?); match (min_value, old_stats.min_value) { (Some(min_value), Some(old_min_value)) => { if left_larger_than_right( min_value.clone(), old_min_value.clone(), ) { Some(old_min_value) } else { Some(min_value) } } (Some(min_value), None) => Some(min_value), (None, old) => old, } }, ), distinct_count: None, // TODO: distinct }) .collect() }), }) }, ) .unwrap_or_default() } } fn to_scalar_value(stat_val: &serde_json::Value) -> Option<datafusion::scalar::ScalarValue> { if stat_val.is_number() { if let Some(val) = stat_val.as_i64() { Some(ScalarValue::from(val)) } else if let Some(val) = stat_val.as_u64() { Some(ScalarValue::from(val)) } else { stat_val.as_f64().map(ScalarValue::from) } } else { None } } fn left_larger_than_right( left: datafusion::scalar::ScalarValue, right: datafusion::scalar::ScalarValue, ) -> bool { match left { ScalarValue::Float64(Some(v)) => { let f_right = f64::try_from(right).unwrap(); v > f_right } ScalarValue::Float32(Some(v)) => { let f_right = f32::try_from(right).unwrap(); v > f_right } ScalarValue::Int8(Some(v)) => { let i_right = i8::try_from(right).unwrap(); v > i_right } ScalarValue::Int16(Some(v)) => { let i_right = i16::try_from(right).unwrap(); v > i_right } ScalarValue::Int32(Some(v)) => { let i_right = i32::try_from(right).unwrap(); v > i_right } ScalarValue::Int64(Some(v)) => { let i_right = i64::try_from(right).unwrap(); v > i_right } _ => unimplemented!( "Scalar value comparison unimplemented for {:?} and {:?}", left, right ), } }
40.71374
99
0.380051
29c2e1ca1e33f72c70db7081b82192f35a797248
5,445
#![doc = "generated by AutoRust"] #![allow(non_camel_case_types)] #![allow(unused_imports)] use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkbookTemplate { #[serde(flatten)] pub workbook_template_resource: WorkbookTemplateResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<WorkbookTemplateProperties>, } impl WorkbookTemplate { pub fn new(workbook_template_resource: WorkbookTemplateResource) -> Self { Self { workbook_template_resource, properties: None, } } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct WorkbookTemplateError { #[serde(default, skip_serializing_if = "Option::is_none")] pub error: Option<WorkbookTemplateErrorBody>, } impl WorkbookTemplateError { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct WorkbookTemplateErrorBody { #[serde(default, skip_serializing_if = "Option::is_none")] pub code: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub details: Vec<WorkbookTemplateErrorFieldContract>, } impl WorkbookTemplateErrorBody { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct WorkbookTemplateErrorFieldContract { #[serde(default, skip_serializing_if = "Option::is_none")] pub code: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub target: Option<String>, } impl WorkbookTemplateErrorFieldContract { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct WorkbookTemplateGallery { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub category: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub order: Option<i32>, #[serde(rename = "resourceType", default, skip_serializing_if = "Option::is_none")] pub resource_type: Option<String>, } impl WorkbookTemplateGallery { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct WorkbookTemplateLocalizedGallery { #[serde(rename = "templateData", default, skip_serializing_if = "Option::is_none")] pub template_data: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub galleries: Vec<WorkbookTemplateGallery>, } impl WorkbookTemplateLocalizedGallery { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkbookTemplateProperties { #[serde(default, skip_serializing_if = "Option::is_none")] pub priority: Option<i32>, #[serde(default, skip_serializing_if = "Option::is_none")] pub author: Option<String>, #[serde(rename = "templateData")] pub template_data: serde_json::Value, pub galleries: Vec<WorkbookTemplateGallery>, #[serde(default, skip_serializing_if = "Option::is_none")] pub localized: Option<serde_json::Value>, } impl WorkbookTemplateProperties { pub fn new(template_data: serde_json::Value, galleries: Vec<WorkbookTemplateGallery>) -> Self { Self { priority: None, author: None, template_data, galleries, localized: None, } } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkbookTemplateResource { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, pub location: String, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } impl WorkbookTemplateResource { pub fn new(location: String) -> Self { Self { id: None, name: None, type_: None, location, tags: None, } } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct WorkbookTemplateUpdateParameters { #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<WorkbookTemplateProperties>, } impl WorkbookTemplateUpdateParameters { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct WorkbookTemplatesListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<WorkbookTemplate>, } impl WorkbookTemplatesListResult { pub fn new() -> Self { Self::default() } }
34.903846
99
0.68191
b9bb57fef03f1a364d4501e412eb68d0e595d8b7
15
pub mod route;
7.5
14
0.733333
4a4270dc94f3ec5033b205298dcaf8ae93e64c7f
661
// Copyright 2021 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // mod default_database; pub mod default_database_factory;
36.722222
76
0.741301
9b9c2a5826f47e739387c8df4999b0015a84c74b
31,731
#[doc = "Reader of register CC3_CTRL"] pub type R = crate::R<u32, super::CC3_CTRL>; #[doc = "Writer for register CC3_CTRL"] pub type W = crate::W<u32, super::CC3_CTRL>; #[doc = "Register CC3_CTRL `reset()`'s with value 0"] impl crate::ResetValue for super::CC3_CTRL { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "CC Channel Mode\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum MODE_A { #[doc = "0: Compare/Capture channel turned off"] OFF = 0, #[doc = "1: Input capture"] INPUTCAPTURE = 1, #[doc = "2: Output compare"] OUTPUTCOMPARE = 2, #[doc = "3: Pulse-Width Modulation"] PWM = 3, } impl From<MODE_A> for u8 { #[inline(always)] fn from(variant: MODE_A) -> Self { variant as _ } } #[doc = "Reader of field `MODE`"] pub type MODE_R = crate::R<u8, MODE_A>; impl MODE_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> MODE_A { match self.bits { 0 => MODE_A::OFF, 1 => MODE_A::INPUTCAPTURE, 2 => MODE_A::OUTPUTCOMPARE, 3 => MODE_A::PWM, _ => unreachable!(), } } #[doc = "Checks if the value of the field is `OFF`"] #[inline(always)] pub fn is_off(&self) -> bool { *self == MODE_A::OFF } #[doc = "Checks if the value of the field is `INPUTCAPTURE`"] #[inline(always)] pub fn is_inputcapture(&self) -> bool { *self == MODE_A::INPUTCAPTURE } #[doc = "Checks if the value of the field is `OUTPUTCOMPARE`"] #[inline(always)] pub fn is_outputcompare(&self) -> bool { *self == MODE_A::OUTPUTCOMPARE } #[doc = "Checks if the value of the field is `PWM`"] #[inline(always)] pub fn is_pwm(&self) -> bool { *self == MODE_A::PWM } } #[doc = "Write proxy for field `MODE`"] pub struct MODE_W<'a> { w: &'a mut W, } impl<'a> MODE_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: MODE_A) -> &'a mut W { { self.bits(variant.into()) } } #[doc = "Compare/Capture channel turned off"] #[inline(always)] pub fn off(self) -> &'a mut W { self.variant(MODE_A::OFF) } #[doc = "Input capture"] #[inline(always)] pub fn inputcapture(self) -> &'a mut W { self.variant(MODE_A::INPUTCAPTURE) } #[doc = "Output compare"] #[inline(always)] pub fn outputcompare(self) -> &'a mut W { self.variant(MODE_A::OUTPUTCOMPARE) } #[doc = "Pulse-Width Modulation"] #[inline(always)] pub fn pwm(self) -> &'a mut W { self.variant(MODE_A::PWM) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0x03) | ((value as u32) & 0x03); self.w } } #[doc = "Reader of field `OUTINV`"] pub type OUTINV_R = crate::R<bool, bool>; #[doc = "Write proxy for field `OUTINV`"] pub struct OUTINV_W<'a> { w: &'a mut W, } impl<'a> OUTINV_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2); self.w } } #[doc = "Reader of field `COIST`"] pub type COIST_R = crate::R<bool, bool>; #[doc = "Write proxy for field `COIST`"] pub struct COIST_W<'a> { w: &'a mut W, } impl<'a> COIST_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4); self.w } } #[doc = "Compare Match Output Action\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum CMOA_A { #[doc = "0: No action on compare match"] NONE = 0, #[doc = "1: Toggle output on compare match"] TOGGLE = 1, #[doc = "2: Clear output on compare match"] CLEAR = 2, #[doc = "3: Set output on compare match"] SET = 3, } impl From<CMOA_A> for u8 { #[inline(always)] fn from(variant: CMOA_A) -> Self { variant as _ } } #[doc = "Reader of field `CMOA`"] pub type CMOA_R = crate::R<u8, CMOA_A>; impl CMOA_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> CMOA_A { match self.bits { 0 => CMOA_A::NONE, 1 => CMOA_A::TOGGLE, 2 => CMOA_A::CLEAR, 3 => CMOA_A::SET, _ => unreachable!(), } } #[doc = "Checks if the value of the field is `NONE`"] #[inline(always)] pub fn is_none(&self) -> bool { *self == CMOA_A::NONE } #[doc = "Checks if the value of the field is `TOGGLE`"] #[inline(always)] pub fn is_toggle(&self) -> bool { *self == CMOA_A::TOGGLE } #[doc = "Checks if the value of the field is `CLEAR`"] #[inline(always)] pub fn is_clear(&self) -> bool { *self == CMOA_A::CLEAR } #[doc = "Checks if the value of the field is `SET`"] #[inline(always)] pub fn is_set(&self) -> bool { *self == CMOA_A::SET } } #[doc = "Write proxy for field `CMOA`"] pub struct CMOA_W<'a> { w: &'a mut W, } impl<'a> CMOA_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: CMOA_A) -> &'a mut W { { self.bits(variant.into()) } } #[doc = "No action on compare match"] #[inline(always)] pub fn none(self) -> &'a mut W { self.variant(CMOA_A::NONE) } #[doc = "Toggle output on compare match"] #[inline(always)] pub fn toggle(self) -> &'a mut W { self.variant(CMOA_A::TOGGLE) } #[doc = "Clear output on compare match"] #[inline(always)] pub fn clear(self) -> &'a mut W { self.variant(CMOA_A::CLEAR) } #[doc = "Set output on compare match"] #[inline(always)] pub fn set(self) -> &'a mut W { self.variant(CMOA_A::SET) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 8)) | (((value as u32) & 0x03) << 8); self.w } } #[doc = "Counter Overflow Output Action\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum COFOA_A { #[doc = "0: No action on counter overflow"] NONE = 0, #[doc = "1: Toggle output on counter overflow"] TOGGLE = 1, #[doc = "2: Clear output on counter overflow"] CLEAR = 2, #[doc = "3: Set output on counter overflow"] SET = 3, } impl From<COFOA_A> for u8 { #[inline(always)] fn from(variant: COFOA_A) -> Self { variant as _ } } #[doc = "Reader of field `COFOA`"] pub type COFOA_R = crate::R<u8, COFOA_A>; impl COFOA_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> COFOA_A { match self.bits { 0 => COFOA_A::NONE, 1 => COFOA_A::TOGGLE, 2 => COFOA_A::CLEAR, 3 => COFOA_A::SET, _ => unreachable!(), } } #[doc = "Checks if the value of the field is `NONE`"] #[inline(always)] pub fn is_none(&self) -> bool { *self == COFOA_A::NONE } #[doc = "Checks if the value of the field is `TOGGLE`"] #[inline(always)] pub fn is_toggle(&self) -> bool { *self == COFOA_A::TOGGLE } #[doc = "Checks if the value of the field is `CLEAR`"] #[inline(always)] pub fn is_clear(&self) -> bool { *self == COFOA_A::CLEAR } #[doc = "Checks if the value of the field is `SET`"] #[inline(always)] pub fn is_set(&self) -> bool { *self == COFOA_A::SET } } #[doc = "Write proxy for field `COFOA`"] pub struct COFOA_W<'a> { w: &'a mut W, } impl<'a> COFOA_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: COFOA_A) -> &'a mut W { { self.bits(variant.into()) } } #[doc = "No action on counter overflow"] #[inline(always)] pub fn none(self) -> &'a mut W { self.variant(COFOA_A::NONE) } #[doc = "Toggle output on counter overflow"] #[inline(always)] pub fn toggle(self) -> &'a mut W { self.variant(COFOA_A::TOGGLE) } #[doc = "Clear output on counter overflow"] #[inline(always)] pub fn clear(self) -> &'a mut W { self.variant(COFOA_A::CLEAR) } #[doc = "Set output on counter overflow"] #[inline(always)] pub fn set(self) -> &'a mut W { self.variant(COFOA_A::SET) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 10)) | (((value as u32) & 0x03) << 10); self.w } } #[doc = "Counter Underflow Output Action\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum CUFOA_A { #[doc = "0: No action on counter underflow"] NONE = 0, #[doc = "1: Toggle output on counter underflow"] TOGGLE = 1, #[doc = "2: Clear output on counter underflow"] CLEAR = 2, #[doc = "3: Set output on counter underflow"] SET = 3, } impl From<CUFOA_A> for u8 { #[inline(always)] fn from(variant: CUFOA_A) -> Self { variant as _ } } #[doc = "Reader of field `CUFOA`"] pub type CUFOA_R = crate::R<u8, CUFOA_A>; impl CUFOA_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> CUFOA_A { match self.bits { 0 => CUFOA_A::NONE, 1 => CUFOA_A::TOGGLE, 2 => CUFOA_A::CLEAR, 3 => CUFOA_A::SET, _ => unreachable!(), } } #[doc = "Checks if the value of the field is `NONE`"] #[inline(always)] pub fn is_none(&self) -> bool { *self == CUFOA_A::NONE } #[doc = "Checks if the value of the field is `TOGGLE`"] #[inline(always)] pub fn is_toggle(&self) -> bool { *self == CUFOA_A::TOGGLE } #[doc = "Checks if the value of the field is `CLEAR`"] #[inline(always)] pub fn is_clear(&self) -> bool { *self == CUFOA_A::CLEAR } #[doc = "Checks if the value of the field is `SET`"] #[inline(always)] pub fn is_set(&self) -> bool { *self == CUFOA_A::SET } } #[doc = "Write proxy for field `CUFOA`"] pub struct CUFOA_W<'a> { w: &'a mut W, } impl<'a> CUFOA_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: CUFOA_A) -> &'a mut W { { self.bits(variant.into()) } } #[doc = "No action on counter underflow"] #[inline(always)] pub fn none(self) -> &'a mut W { self.variant(CUFOA_A::NONE) } #[doc = "Toggle output on counter underflow"] #[inline(always)] pub fn toggle(self) -> &'a mut W { self.variant(CUFOA_A::TOGGLE) } #[doc = "Clear output on counter underflow"] #[inline(always)] pub fn clear(self) -> &'a mut W { self.variant(CUFOA_A::CLEAR) } #[doc = "Set output on counter underflow"] #[inline(always)] pub fn set(self) -> &'a mut W { self.variant(CUFOA_A::SET) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 12)) | (((value as u32) & 0x03) << 12); self.w } } #[doc = "Compare/Capture Channel PRS Input Channel Selection\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum PRSSEL_A { #[doc = "0: PRS Channel 0 selected as input"] PRSCH0 = 0, #[doc = "1: PRS Channel 1 selected as input"] PRSCH1 = 1, #[doc = "2: PRS Channel 2 selected as input"] PRSCH2 = 2, #[doc = "3: PRS Channel 3 selected as input"] PRSCH3 = 3, #[doc = "4: PRS Channel 4 selected as input"] PRSCH4 = 4, #[doc = "5: PRS Channel 5 selected as input"] PRSCH5 = 5, #[doc = "6: PRS Channel 6 selected as input"] PRSCH6 = 6, #[doc = "7: PRS Channel 7 selected as input"] PRSCH7 = 7, #[doc = "8: PRS Channel 8 selected as input"] PRSCH8 = 8, #[doc = "9: PRS Channel 9 selected as input"] PRSCH9 = 9, #[doc = "10: PRS Channel 10 selected as input"] PRSCH10 = 10, #[doc = "11: PRS Channel 11 selected as input"] PRSCH11 = 11, #[doc = "12: PRS Channel 12 selected as input"] PRSCH12 = 12, #[doc = "13: PRS Channel 13 selected as input"] PRSCH13 = 13, #[doc = "14: PRS Channel 14 selected as input"] PRSCH14 = 14, #[doc = "15: PRS Channel 15 selected as input"] PRSCH15 = 15, } impl From<PRSSEL_A> for u8 { #[inline(always)] fn from(variant: PRSSEL_A) -> Self { variant as _ } } #[doc = "Reader of field `PRSSEL`"] pub type PRSSEL_R = crate::R<u8, PRSSEL_A>; impl PRSSEL_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> PRSSEL_A { match self.bits { 0 => PRSSEL_A::PRSCH0, 1 => PRSSEL_A::PRSCH1, 2 => PRSSEL_A::PRSCH2, 3 => PRSSEL_A::PRSCH3, 4 => PRSSEL_A::PRSCH4, 5 => PRSSEL_A::PRSCH5, 6 => PRSSEL_A::PRSCH6, 7 => PRSSEL_A::PRSCH7, 8 => PRSSEL_A::PRSCH8, 9 => PRSSEL_A::PRSCH9, 10 => PRSSEL_A::PRSCH10, 11 => PRSSEL_A::PRSCH11, 12 => PRSSEL_A::PRSCH12, 13 => PRSSEL_A::PRSCH13, 14 => PRSSEL_A::PRSCH14, 15 => PRSSEL_A::PRSCH15, _ => unreachable!(), } } #[doc = "Checks if the value of the field is `PRSCH0`"] #[inline(always)] pub fn is_prsch0(&self) -> bool { *self == PRSSEL_A::PRSCH0 } #[doc = "Checks if the value of the field is `PRSCH1`"] #[inline(always)] pub fn is_prsch1(&self) -> bool { *self == PRSSEL_A::PRSCH1 } #[doc = "Checks if the value of the field is `PRSCH2`"] #[inline(always)] pub fn is_prsch2(&self) -> bool { *self == PRSSEL_A::PRSCH2 } #[doc = "Checks if the value of the field is `PRSCH3`"] #[inline(always)] pub fn is_prsch3(&self) -> bool { *self == PRSSEL_A::PRSCH3 } #[doc = "Checks if the value of the field is `PRSCH4`"] #[inline(always)] pub fn is_prsch4(&self) -> bool { *self == PRSSEL_A::PRSCH4 } #[doc = "Checks if the value of the field is `PRSCH5`"] #[inline(always)] pub fn is_prsch5(&self) -> bool { *self == PRSSEL_A::PRSCH5 } #[doc = "Checks if the value of the field is `PRSCH6`"] #[inline(always)] pub fn is_prsch6(&self) -> bool { *self == PRSSEL_A::PRSCH6 } #[doc = "Checks if the value of the field is `PRSCH7`"] #[inline(always)] pub fn is_prsch7(&self) -> bool { *self == PRSSEL_A::PRSCH7 } #[doc = "Checks if the value of the field is `PRSCH8`"] #[inline(always)] pub fn is_prsch8(&self) -> bool { *self == PRSSEL_A::PRSCH8 } #[doc = "Checks if the value of the field is `PRSCH9`"] #[inline(always)] pub fn is_prsch9(&self) -> bool { *self == PRSSEL_A::PRSCH9 } #[doc = "Checks if the value of the field is `PRSCH10`"] #[inline(always)] pub fn is_prsch10(&self) -> bool { *self == PRSSEL_A::PRSCH10 } #[doc = "Checks if the value of the field is `PRSCH11`"] #[inline(always)] pub fn is_prsch11(&self) -> bool { *self == PRSSEL_A::PRSCH11 } #[doc = "Checks if the value of the field is `PRSCH12`"] #[inline(always)] pub fn is_prsch12(&self) -> bool { *self == PRSSEL_A::PRSCH12 } #[doc = "Checks if the value of the field is `PRSCH13`"] #[inline(always)] pub fn is_prsch13(&self) -> bool { *self == PRSSEL_A::PRSCH13 } #[doc = "Checks if the value of the field is `PRSCH14`"] #[inline(always)] pub fn is_prsch14(&self) -> bool { *self == PRSSEL_A::PRSCH14 } #[doc = "Checks if the value of the field is `PRSCH15`"] #[inline(always)] pub fn is_prsch15(&self) -> bool { *self == PRSSEL_A::PRSCH15 } } #[doc = "Write proxy for field `PRSSEL`"] pub struct PRSSEL_W<'a> { w: &'a mut W, } impl<'a> PRSSEL_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: PRSSEL_A) -> &'a mut W { { self.bits(variant.into()) } } #[doc = "PRS Channel 0 selected as input"] #[inline(always)] pub fn prsch0(self) -> &'a mut W { self.variant(PRSSEL_A::PRSCH0) } #[doc = "PRS Channel 1 selected as input"] #[inline(always)] pub fn prsch1(self) -> &'a mut W { self.variant(PRSSEL_A::PRSCH1) } #[doc = "PRS Channel 2 selected as input"] #[inline(always)] pub fn prsch2(self) -> &'a mut W { self.variant(PRSSEL_A::PRSCH2) } #[doc = "PRS Channel 3 selected as input"] #[inline(always)] pub fn prsch3(self) -> &'a mut W { self.variant(PRSSEL_A::PRSCH3) } #[doc = "PRS Channel 4 selected as input"] #[inline(always)] pub fn prsch4(self) -> &'a mut W { self.variant(PRSSEL_A::PRSCH4) } #[doc = "PRS Channel 5 selected as input"] #[inline(always)] pub fn prsch5(self) -> &'a mut W { self.variant(PRSSEL_A::PRSCH5) } #[doc = "PRS Channel 6 selected as input"] #[inline(always)] pub fn prsch6(self) -> &'a mut W { self.variant(PRSSEL_A::PRSCH6) } #[doc = "PRS Channel 7 selected as input"] #[inline(always)] pub fn prsch7(self) -> &'a mut W { self.variant(PRSSEL_A::PRSCH7) } #[doc = "PRS Channel 8 selected as input"] #[inline(always)] pub fn prsch8(self) -> &'a mut W { self.variant(PRSSEL_A::PRSCH8) } #[doc = "PRS Channel 9 selected as input"] #[inline(always)] pub fn prsch9(self) -> &'a mut W { self.variant(PRSSEL_A::PRSCH9) } #[doc = "PRS Channel 10 selected as input"] #[inline(always)] pub fn prsch10(self) -> &'a mut W { self.variant(PRSSEL_A::PRSCH10) } #[doc = "PRS Channel 11 selected as input"] #[inline(always)] pub fn prsch11(self) -> &'a mut W { self.variant(PRSSEL_A::PRSCH11) } #[doc = "PRS Channel 12 selected as input"] #[inline(always)] pub fn prsch12(self) -> &'a mut W { self.variant(PRSSEL_A::PRSCH12) } #[doc = "PRS Channel 13 selected as input"] #[inline(always)] pub fn prsch13(self) -> &'a mut W { self.variant(PRSSEL_A::PRSCH13) } #[doc = "PRS Channel 14 selected as input"] #[inline(always)] pub fn prsch14(self) -> &'a mut W { self.variant(PRSSEL_A::PRSCH14) } #[doc = "PRS Channel 15 selected as input"] #[inline(always)] pub fn prsch15(self) -> &'a mut W { self.variant(PRSSEL_A::PRSCH15) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x0f << 16)) | (((value as u32) & 0x0f) << 16); self.w } } #[doc = "Input Capture Edge Select\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum ICEDGE_A { #[doc = "0: Rising edges detected"] RISING = 0, #[doc = "1: Falling edges detected"] FALLING = 1, #[doc = "2: Both edges detected"] BOTH = 2, #[doc = "3: No edge detection, signal is left as it is"] NONE = 3, } impl From<ICEDGE_A> for u8 { #[inline(always)] fn from(variant: ICEDGE_A) -> Self { variant as _ } } #[doc = "Reader of field `ICEDGE`"] pub type ICEDGE_R = crate::R<u8, ICEDGE_A>; impl ICEDGE_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> ICEDGE_A { match self.bits { 0 => ICEDGE_A::RISING, 1 => ICEDGE_A::FALLING, 2 => ICEDGE_A::BOTH, 3 => ICEDGE_A::NONE, _ => unreachable!(), } } #[doc = "Checks if the value of the field is `RISING`"] #[inline(always)] pub fn is_rising(&self) -> bool { *self == ICEDGE_A::RISING } #[doc = "Checks if the value of the field is `FALLING`"] #[inline(always)] pub fn is_falling(&self) -> bool { *self == ICEDGE_A::FALLING } #[doc = "Checks if the value of the field is `BOTH`"] #[inline(always)] pub fn is_both(&self) -> bool { *self == ICEDGE_A::BOTH } #[doc = "Checks if the value of the field is `NONE`"] #[inline(always)] pub fn is_none(&self) -> bool { *self == ICEDGE_A::NONE } } #[doc = "Write proxy for field `ICEDGE`"] pub struct ICEDGE_W<'a> { w: &'a mut W, } impl<'a> ICEDGE_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: ICEDGE_A) -> &'a mut W { { self.bits(variant.into()) } } #[doc = "Rising edges detected"] #[inline(always)] pub fn rising(self) -> &'a mut W { self.variant(ICEDGE_A::RISING) } #[doc = "Falling edges detected"] #[inline(always)] pub fn falling(self) -> &'a mut W { self.variant(ICEDGE_A::FALLING) } #[doc = "Both edges detected"] #[inline(always)] pub fn both(self) -> &'a mut W { self.variant(ICEDGE_A::BOTH) } #[doc = "No edge detection, signal is left as it is"] #[inline(always)] pub fn none(self) -> &'a mut W { self.variant(ICEDGE_A::NONE) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 24)) | (((value as u32) & 0x03) << 24); self.w } } #[doc = "Input Capture Event Control\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum ICEVCTRL_A { #[doc = "0: PRS output pulse and interrupt flag set on every capture"] EVERYEDGE = 0, #[doc = "1: PRS output pulse and interrupt flag set on every second capture"] EVERYSECONDEDGE = 1, #[doc = "2: PRS output pulse and interrupt flag set on rising edge only (if ICEDGE = BOTH)"] RISING = 2, #[doc = "3: PRS output pulse and interrupt flag set on falling edge only (if ICEDGE = BOTH)"] FALLING = 3, } impl From<ICEVCTRL_A> for u8 { #[inline(always)] fn from(variant: ICEVCTRL_A) -> Self { variant as _ } } #[doc = "Reader of field `ICEVCTRL`"] pub type ICEVCTRL_R = crate::R<u8, ICEVCTRL_A>; impl ICEVCTRL_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> ICEVCTRL_A { match self.bits { 0 => ICEVCTRL_A::EVERYEDGE, 1 => ICEVCTRL_A::EVERYSECONDEDGE, 2 => ICEVCTRL_A::RISING, 3 => ICEVCTRL_A::FALLING, _ => unreachable!(), } } #[doc = "Checks if the value of the field is `EVERYEDGE`"] #[inline(always)] pub fn is_everyedge(&self) -> bool { *self == ICEVCTRL_A::EVERYEDGE } #[doc = "Checks if the value of the field is `EVERYSECONDEDGE`"] #[inline(always)] pub fn is_everysecondedge(&self) -> bool { *self == ICEVCTRL_A::EVERYSECONDEDGE } #[doc = "Checks if the value of the field is `RISING`"] #[inline(always)] pub fn is_rising(&self) -> bool { *self == ICEVCTRL_A::RISING } #[doc = "Checks if the value of the field is `FALLING`"] #[inline(always)] pub fn is_falling(&self) -> bool { *self == ICEVCTRL_A::FALLING } } #[doc = "Write proxy for field `ICEVCTRL`"] pub struct ICEVCTRL_W<'a> { w: &'a mut W, } impl<'a> ICEVCTRL_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: ICEVCTRL_A) -> &'a mut W { { self.bits(variant.into()) } } #[doc = "PRS output pulse and interrupt flag set on every capture"] #[inline(always)] pub fn everyedge(self) -> &'a mut W { self.variant(ICEVCTRL_A::EVERYEDGE) } #[doc = "PRS output pulse and interrupt flag set on every second capture"] #[inline(always)] pub fn everysecondedge(self) -> &'a mut W { self.variant(ICEVCTRL_A::EVERYSECONDEDGE) } #[doc = "PRS output pulse and interrupt flag set on rising edge only (if ICEDGE = BOTH)"] #[inline(always)] pub fn rising(self) -> &'a mut W { self.variant(ICEVCTRL_A::RISING) } #[doc = "PRS output pulse and interrupt flag set on falling edge only (if ICEDGE = BOTH)"] #[inline(always)] pub fn falling(self) -> &'a mut W { self.variant(ICEVCTRL_A::FALLING) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 26)) | (((value as u32) & 0x03) << 26); self.w } } #[doc = "Reader of field `PRSCONF`"] pub type PRSCONF_R = crate::R<bool, bool>; #[doc = "Write proxy for field `PRSCONF`"] pub struct PRSCONF_W<'a> { w: &'a mut W, } impl<'a> PRSCONF_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 28)) | (((value as u32) & 0x01) << 28); self.w } } #[doc = "Reader of field `INSEL`"] pub type INSEL_R = crate::R<bool, bool>; #[doc = "Write proxy for field `INSEL`"] pub struct INSEL_W<'a> { w: &'a mut W, } impl<'a> INSEL_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 29)) | (((value as u32) & 0x01) << 29); self.w } } #[doc = "Reader of field `FILT`"] pub type FILT_R = crate::R<bool, bool>; #[doc = "Write proxy for field `FILT`"] pub struct FILT_W<'a> { w: &'a mut W, } impl<'a> FILT_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 30)) | (((value as u32) & 0x01) << 30); self.w } } impl R { #[doc = "Bits 0:1 - CC Channel Mode"] #[inline(always)] pub fn mode(&self) -> MODE_R { MODE_R::new((self.bits & 0x03) as u8) } #[doc = "Bit 2 - Output Invert"] #[inline(always)] pub fn outinv(&self) -> OUTINV_R { OUTINV_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 4 - Compare Output Initial State"] #[inline(always)] pub fn coist(&self) -> COIST_R { COIST_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bits 8:9 - Compare Match Output Action"] #[inline(always)] pub fn cmoa(&self) -> CMOA_R { CMOA_R::new(((self.bits >> 8) & 0x03) as u8) } #[doc = "Bits 10:11 - Counter Overflow Output Action"] #[inline(always)] pub fn cofoa(&self) -> COFOA_R { COFOA_R::new(((self.bits >> 10) & 0x03) as u8) } #[doc = "Bits 12:13 - Counter Underflow Output Action"] #[inline(always)] pub fn cufoa(&self) -> CUFOA_R { CUFOA_R::new(((self.bits >> 12) & 0x03) as u8) } #[doc = "Bits 16:19 - Compare/Capture Channel PRS Input Channel Selection"] #[inline(always)] pub fn prssel(&self) -> PRSSEL_R { PRSSEL_R::new(((self.bits >> 16) & 0x0f) as u8) } #[doc = "Bits 24:25 - Input Capture Edge Select"] #[inline(always)] pub fn icedge(&self) -> ICEDGE_R { ICEDGE_R::new(((self.bits >> 24) & 0x03) as u8) } #[doc = "Bits 26:27 - Input Capture Event Control"] #[inline(always)] pub fn icevctrl(&self) -> ICEVCTRL_R { ICEVCTRL_R::new(((self.bits >> 26) & 0x03) as u8) } #[doc = "Bit 28 - PRS Configuration"] #[inline(always)] pub fn prsconf(&self) -> PRSCONF_R { PRSCONF_R::new(((self.bits >> 28) & 0x01) != 0) } #[doc = "Bit 29 - Input Selection"] #[inline(always)] pub fn insel(&self) -> INSEL_R { INSEL_R::new(((self.bits >> 29) & 0x01) != 0) } #[doc = "Bit 30 - Digital Filter"] #[inline(always)] pub fn filt(&self) -> FILT_R { FILT_R::new(((self.bits >> 30) & 0x01) != 0) } } impl W { #[doc = "Bits 0:1 - CC Channel Mode"] #[inline(always)] pub fn mode(&mut self) -> MODE_W { MODE_W { w: self } } #[doc = "Bit 2 - Output Invert"] #[inline(always)] pub fn outinv(&mut self) -> OUTINV_W { OUTINV_W { w: self } } #[doc = "Bit 4 - Compare Output Initial State"] #[inline(always)] pub fn coist(&mut self) -> COIST_W { COIST_W { w: self } } #[doc = "Bits 8:9 - Compare Match Output Action"] #[inline(always)] pub fn cmoa(&mut self) -> CMOA_W { CMOA_W { w: self } } #[doc = "Bits 10:11 - Counter Overflow Output Action"] #[inline(always)] pub fn cofoa(&mut self) -> COFOA_W { COFOA_W { w: self } } #[doc = "Bits 12:13 - Counter Underflow Output Action"] #[inline(always)] pub fn cufoa(&mut self) -> CUFOA_W { CUFOA_W { w: self } } #[doc = "Bits 16:19 - Compare/Capture Channel PRS Input Channel Selection"] #[inline(always)] pub fn prssel(&mut self) -> PRSSEL_W { PRSSEL_W { w: self } } #[doc = "Bits 24:25 - Input Capture Edge Select"] #[inline(always)] pub fn icedge(&mut self) -> ICEDGE_W { ICEDGE_W { w: self } } #[doc = "Bits 26:27 - Input Capture Event Control"] #[inline(always)] pub fn icevctrl(&mut self) -> ICEVCTRL_W { ICEVCTRL_W { w: self } } #[doc = "Bit 28 - PRS Configuration"] #[inline(always)] pub fn prsconf(&mut self) -> PRSCONF_W { PRSCONF_W { w: self } } #[doc = "Bit 29 - Input Selection"] #[inline(always)] pub fn insel(&mut self) -> INSEL_W { INSEL_W { w: self } } #[doc = "Bit 30 - Digital Filter"] #[inline(always)] pub fn filt(&mut self) -> FILT_W { FILT_W { w: self } } }
29.822368
97
0.547603
16cb1316270ec14d367e59712b46c9e87ea4c36c
3,791
use quote::quote; use proc_macro2::TokenStream; use crate::SmartStreamFn; pub fn generate_map_smartstream(func: &SmartStreamFn, has_params: bool) -> TokenStream { let user_code = &func.func; let user_fn = &func.name; let params_parsing = if has_params { quote!( use std::convert::TryInto; let params = match smartstream_input.params.try_into(){ Ok(params) => params, Err(err) => return SmartStreamInternalError::ParsingExtraParams as i32, }; ) } else { quote!() }; let function_call = if has_params { quote!( super:: #user_fn(&record, &params) ) } else { quote!( super:: #user_fn(&record) ) }; quote! { #user_code mod __system { #[no_mangle] #[allow(clippy::missing_safety_doc)] pub unsafe fn map(ptr: *mut u8, len: usize) -> i32 { use fluvio_smartstream::dataplane::smartstream::{ SmartStreamInput, SmartStreamInternalError, SmartStreamRuntimeError, SmartStreamType, SmartStreamOutput, }; use fluvio_smartstream::dataplane::core::{Encoder, Decoder}; use fluvio_smartstream::dataplane::record::{Record, RecordData}; // DECODING extern "C" { fn copy_records(putr: i32, len: i32); } let input_data = Vec::from_raw_parts(ptr, len, len); let mut smartstream_input = SmartStreamInput::default(); if let Err(_err) = Decoder::decode(&mut smartstream_input, &mut std::io::Cursor::new(input_data), 0) { return SmartStreamInternalError::DecodingBaseInput as i32; } let records_input = smartstream_input.record_data; let mut records: Vec<Record> = vec![]; if let Err(_err) = Decoder::decode(&mut records, &mut std::io::Cursor::new(records_input), 0) { return SmartStreamInternalError::DecodingRecords as i32; }; #params_parsing // PROCESSING let mut output = SmartStreamOutput { successes: Vec::with_capacity(records.len()), error: None, }; for mut record in records.into_iter() { let result = #function_call; match result { Ok((maybe_key, value)) => { record.key = maybe_key; record.value = value; output.successes.push(record); } Err(err) => { let error = SmartStreamRuntimeError::new( &record, smartstream_input.base_offset, SmartStreamType::Map, err, ); output.error = Some(error); break; } } } // ENCODING let mut out = vec![]; if let Err(_) = Encoder::encode(&mut output, &mut out, 0) { return SmartStreamInternalError::EncodingOutput as i32; } let out_len = out.len(); let ptr = out.as_mut_ptr(); std::mem::forget(out); copy_records(ptr as i32, out_len as i32); output.successes.len() as i32 } } } }
35.101852
118
0.466631
500c9aa760afd4c8a34a6780c5ee662d4fb0662f
1,695
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use diem_metrics::{register_histogram_vec, HistogramVec}; use once_cell::sync::Lazy; use warp::log::{custom, Info, Log}; static HISTOGRAM: Lazy<HistogramVec> = Lazy::new(|| { register_histogram_vec!( "diem_api_requests", "API requests latency grouped by method, operation_id and status", &["method", "operation_id", "status"] ) .unwrap() }); static RESPONSE_STATUS: Lazy<HistogramVec> = Lazy::new(|| { register_histogram_vec!( "diem_api_response_status", "API requests latency grouped by status code only", &["status"] ) .unwrap() }); // Record metrics by method, operation_id and status. // The operation_id is the id for the request handler. // Should use same `operationId` defined in `openapi.yaml` whenever possible. pub fn metrics(operation_id: &'static str) -> Log<impl Fn(Info) + Copy> { let func = move |info: Info| { HISTOGRAM .with_label_values(&[ info.method().to_string().as_str(), operation_id, info.status().as_u16().to_string().as_str(), ]) .observe(info.elapsed().as_secs_f64()); }; custom(func) } // Record metrics by response status. // This is for understanding the overview of responses in case server // is overloaded by unknown reason. pub fn status_metrics() -> Log<impl Fn(Info) + Copy> { let func = move |info: Info| { RESPONSE_STATUS .with_label_values(&[info.status().as_u16().to_string().as_str()]) .observe(info.elapsed().as_secs_f64()); }; custom(func) }
31.388889
78
0.633038
2283b9b7e602b83f0bb2d3cd8b647de4c4144340
7,641
use crate::error::{GeozeroError, Result}; use crate::{GeomProcessor, GeozeroGeometry}; use gdal::vector::Geometry; use gdal_sys::{self, OGRwkbGeometryType}; impl GeozeroGeometry for Geometry { fn process_geom<P: GeomProcessor>(&self, processor: &mut P) -> Result<()> { process_geom(self, processor) } } /// Process GDAL/OGR geometry. pub fn process_geom<P: GeomProcessor>(geo: &Geometry, processor: &mut P) -> Result<()> { process_geom_n(geo, 0, processor) } fn process_geom_n<P: GeomProcessor>(geo: &Geometry, idx: usize, processor: &mut P) -> Result<()> { match type2d(geo.geometry_type()) { OGRwkbGeometryType::wkbPoint => { processor.point_begin(idx)?; process_point(geo, 0, processor)?; processor.point_end(idx)?; } OGRwkbGeometryType::wkbMultiPoint => { let n_pts = geo.geometry_count(); processor.multipoint_begin(n_pts, idx)?; for i in 0..n_pts { let pt = unsafe { geo.get_unowned_geometry(i) }; if type2d(pt.geometry_type()) != OGRwkbGeometryType::wkbPoint { return Err(GeozeroError::GeometryFormat); } process_point(&pt, i, processor)?; } processor.multipoint_end(idx)?; } OGRwkbGeometryType::wkbLineString => { process_linestring(geo, true, idx, processor)?; } OGRwkbGeometryType::wkbMultiLineString => { let n_lines = geo.geometry_count(); processor.multilinestring_begin(n_lines, idx)?; for i in 0..n_lines { let line = unsafe { geo.get_unowned_geometry(i) }; if type2d(line.geometry_type()) != OGRwkbGeometryType::wkbLineString { return Err(GeozeroError::GeometryFormat); } process_linestring(&line, false, i, processor)?; } processor.multilinestring_end(idx)?; } OGRwkbGeometryType::wkbPolygon => { process_polygon(geo, true, idx, processor)?; } OGRwkbGeometryType::wkbMultiPolygon => { let n_polys = geo.geometry_count(); processor.multipolygon_begin(n_polys, idx)?; for i in 0..n_polys { let poly = unsafe { geo.get_unowned_geometry(i) }; if type2d(poly.geometry_type()) != OGRwkbGeometryType::wkbPolygon { return Err(GeozeroError::GeometryFormat); } process_polygon(&poly, false, i, processor)?; } processor.multipolygon_end(idx)?; } OGRwkbGeometryType::wkbGeometryCollection => { let n_geoms = geo.geometry_count(); processor.geometrycollection_begin(n_geoms, idx)?; for i in 0..n_geoms { let g = unsafe { geo.get_unowned_geometry(i) }; process_geom_n(&g, i, processor)?; } processor.geometrycollection_end(idx)?; } _ => return Err(GeozeroError::GeometryFormat), } Ok(()) } fn type2d(wkb_type: OGRwkbGeometryType::Type) -> OGRwkbGeometryType::Type { match wkb_type { OGRwkbGeometryType::wkbPoint | OGRwkbGeometryType::wkbPoint25D => { OGRwkbGeometryType::wkbPoint } OGRwkbGeometryType::wkbMultiPoint | OGRwkbGeometryType::wkbMultiPoint25D => { OGRwkbGeometryType::wkbMultiPoint } OGRwkbGeometryType::wkbLineString | OGRwkbGeometryType::wkbLineString25D => { OGRwkbGeometryType::wkbLineString } OGRwkbGeometryType::wkbMultiLineString | OGRwkbGeometryType::wkbMultiLineString25D => { OGRwkbGeometryType::wkbMultiLineString } OGRwkbGeometryType::wkbPolygon | OGRwkbGeometryType::wkbPolygon25D => { OGRwkbGeometryType::wkbPolygon } OGRwkbGeometryType::wkbMultiPolygon | OGRwkbGeometryType::wkbMultiPolygon25D => { OGRwkbGeometryType::wkbMultiPolygon } other => other, } } fn process_point<P: GeomProcessor>(geo: &Geometry, idx: usize, processor: &mut P) -> Result<()> { let multi = processor.dimensions().z; let (x, y, z) = geo.get_point(0); if multi { processor.coordinate(x, y, Some(z), None, None, None, idx)?; } else { processor.xy(x, y, idx)?; } Ok(()) } fn process_linestring<P: GeomProcessor>( geo: &Geometry, tagged: bool, idx: usize, processor: &mut P, ) -> Result<()> { let length = unsafe { gdal_sys::OGR_G_GetPointCount(geo.c_geometry()) } as usize; processor.linestring_begin(tagged, length, idx)?; let multi = processor.dimensions().z; for i in 0..length { let (x, y, z) = geo.get_point(i as i32); if multi { processor.coordinate(x, y, Some(z), None, None, None, i)?; } else { processor.xy(x, y, i)?; } } processor.linestring_end(tagged, idx) } fn process_polygon<P: GeomProcessor>( geo: &Geometry, tagged: bool, idx: usize, processor: &mut P, ) -> Result<()> { let ring_count = geo.geometry_count(); processor.polygon_begin(tagged, ring_count, idx)?; for i in 0..ring_count { let ring = unsafe { geo.get_unowned_geometry(i) }; if type2d(ring.geometry_type()) != OGRwkbGeometryType::wkbLineString { return Err(GeozeroError::GeometryFormat); } process_linestring(&ring, false, i, processor)?; } processor.polygon_end(tagged, idx) } #[cfg(test)] #[cfg(feature = "with-wkt")] mod test { use super::*; use crate::wkt::WktWriter; use crate::{CoordDimensions, ToWkt}; #[test] fn point() { let wkt = "POINT(1 1)"; let geo = Geometry::from_wkt(wkt).unwrap(); let mut wkt_data: Vec<u8> = Vec::new(); assert!(process_geom(&geo, &mut WktWriter::new(&mut wkt_data)).is_ok()); assert_eq!(std::str::from_utf8(&wkt_data).unwrap(), wkt); } #[test] fn multipoint() { let wkt = "MULTIPOINT(1 1,2 2)"; let geo = Geometry::from_wkt(wkt).unwrap(); assert_eq!(geo.to_wkt().unwrap(), wkt); } #[test] fn line() { let wkt = "LINESTRING(1 1,2 2)"; let geo = Geometry::from_wkt(wkt).unwrap(); assert_eq!(geo.to_wkt().unwrap(), wkt); } #[test] fn line_3d() { let wkt = "LINESTRING(1 1 10,2 2 20)"; let geo = Geometry::from_wkt(wkt).unwrap(); assert_eq!( geo.to_wkt_ndim(CoordDimensions { z: true, m: false, t: false, tm: false }) .unwrap(), wkt ); } #[test] fn multiline() { let wkt = "MULTILINESTRING((1 1,2 2),(3 3,4 4))"; let geo = Geometry::from_wkt(wkt).unwrap(); assert_eq!(geo.to_wkt().unwrap(), wkt); } #[test] fn polygon() { let wkt = "POLYGON((0 0,0 3,3 3,3 0,0 0),(0.2 0.2,0.2 2,2 2,2 0.2,0.2 0.2))"; let geo = Geometry::from_wkt(wkt).unwrap(); assert_eq!(geo.to_wkt().unwrap(), wkt); } #[test] fn multipolygon() { let wkt = "MULTIPOLYGON(((0 0,0 1,1 1,1 0,0 0)))"; let geo = Geometry::from_wkt(wkt).unwrap(); assert_eq!(geo.to_wkt().unwrap(), wkt); } #[test] fn geometry_collection() { let wkt = "GEOMETRYCOLLECTION(POINT(1 1),LINESTRING(1 1,2 2))"; let geo = Geometry::from_wkt(wkt).unwrap(); assert_eq!(geo.to_wkt().unwrap(), wkt); } }
33.366812
98
0.57126
3a810836e6d31d12d02fe35c9f5086a1f76d68f7
1,527
#[doc = r"Value read from the register"] pub struct R { bits: u32, } #[doc = r"Value to write to the register"] pub struct W { bits: u32, } impl super::FMPRE4 { #[doc = r"Modifies the contents of the register"] #[inline(always)] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); self.register.set(f(&R { bits }, &mut W { bits }).bits); } #[doc = r"Reads the contents of the register"] #[inline(always)] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r"Writes to the register"] #[inline(always)] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { self.register.set( f(&mut W { bits: Self::reset_value(), }) .bits, ); } #[doc = r"Reset value of the register"] #[inline(always)] pub const fn reset_value() -> u32 { 0 } #[doc = r"Writes the reset value to the register"] #[inline(always)] pub fn reset(&self) { self.register.set(Self::reset_value()) } } impl R { #[doc = r"Value of the register as raw bits"] #[inline(always)] pub fn bits(&self) -> u32 { self.bits } } impl W { #[doc = r"Writes raw bits to the register"] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } }
23.492308
64
0.507531
0aa1426a36abf38b349b98044699601e7295b2e3
2,165
//! The point of this crate is to be able to have enough different "kinds" of //! documentation generated so we can test each different features. #![crate_name = "test_docs"] #![feature(doc_keyword)] #![feature(doc_cfg)] use std::convert::AsRef; use std::fmt; /// Basic function with some code examples: /// /// ``` /// println!("nothing fancy"); /// ``` /// /// A failing to compile one: /// /// ```compile_fail /// println!("where did my argument {} go? :'("); /// ``` /// /// An ignored one: /// /// ```ignore (it's a test) /// Let's say I'm just some text will ya? /// ``` /// /// An inlined `code`! pub fn foo() {} /// Just a normal struct. pub struct Foo; impl Foo { #[must_use] pub fn must_use(&self) -> bool { true } } impl AsRef<str> for Foo { fn as_ref(&self) -> &str { "hello" } } /// Just a normal enum. #[doc(alias = "ThisIsAnAlias")] pub enum WhoLetTheDogOut { /// Woof! Woof, /// Meoooooooow... Meow, } /// Who doesn't love to wrap a `format!` call? pub fn some_more_function<T: fmt::Debug>(t: &T) -> String { format!("{:?}", t) } /// Woohoo! A trait! pub trait AnotherOne { /// Some func 3. fn func3(); /// Some func 1. fn func1(); fn another(); fn why_not(); /// Some func 2. fn func2(); fn hello(); } /// ```compile_fail /// whatever /// ``` /// /// Check for "i" signs in lists! /// /// 1. elem 1 /// 2. test 1 /// ```compile_fail /// fn foo() {} /// ``` /// 3. elem 3 /// 4. ```ignore (it's a test) /// fn foo() {} /// ``` /// 5. elem 5 /// /// Final one: /// /// ```ignore (still a test) /// let x = 12; /// ``` pub fn check_list_code_block() {} /// a thing with a label #[deprecated(since = "1.0.0", note = "text why this deprecated")] #[doc(cfg(unix))] pub fn replaced_function() {} /// Some doc with `code`! pub enum AnEnum { WithVariants { and: usize, sub: usize, variants: usize }, } #[doc(keyword = "CookieMonster")] /// Some keyword. pub mod keyword {} /// Just some type alias. pub type SomeType = u32; pub mod huge_amount_of_consts { include!(concat!(env!("OUT_DIR"), "/huge_amount_of_consts.rs")); }
17.601626
77
0.561201
0901450f71f7fc8a8a87097ed2d596f997d507ce
724
use crate::common::*; #[derive(StructOpt, Debug)] #[structopt( name = "checker", about = "a crates.io crate name availability checker" )] pub struct Opt { /// Crate name(s) #[structopt(short = "c", long = "check", help = "Check crate name availability")] names: Vec<String>, /// Output file, defaults to stdout #[structopt(short = "o", long = "output", help = "Output file")] output: Option<PathBuf>, } impl Opt { pub fn run(&self) -> Result<()> { let mut table = Table::new(); for name in &self.names { table.add_row(check(name)?)?; } if let Some(output) = &self.output { write(&output, table.to_string())?; return Ok(()); } table.print(); Ok(()) } }
20.685714
83
0.584254
d7e23081c18eff9a5f157b00be7b8a0b9bce4927
37,758
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. pub fn serialize_structure_crate_input_add_layer_version_permission_input( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::input::AddLayerVersionPermissionInput, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_1) = &input.action { object.key("Action").string(var_1); } if let Some(var_2) = &input.organization_id { object.key("OrganizationId").string(var_2); } if let Some(var_3) = &input.principal { object.key("Principal").string(var_3); } if let Some(var_4) = &input.statement_id { object.key("StatementId").string(var_4); } Ok(()) } pub fn serialize_structure_crate_input_add_permission_input( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::input::AddPermissionInput, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_5) = &input.action { object.key("Action").string(var_5); } if let Some(var_6) = &input.event_source_token { object.key("EventSourceToken").string(var_6); } if let Some(var_7) = &input.principal { object.key("Principal").string(var_7); } if let Some(var_8) = &input.revision_id { object.key("RevisionId").string(var_8); } if let Some(var_9) = &input.source_account { object.key("SourceAccount").string(var_9); } if let Some(var_10) = &input.source_arn { object.key("SourceArn").string(var_10); } if let Some(var_11) = &input.statement_id { object.key("StatementId").string(var_11); } Ok(()) } pub fn serialize_structure_crate_input_create_alias_input( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::input::CreateAliasInput, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_12) = &input.description { object.key("Description").string(var_12); } if let Some(var_13) = &input.function_version { object.key("FunctionVersion").string(var_13); } if let Some(var_14) = &input.name { object.key("Name").string(var_14); } if let Some(var_15) = &input.routing_config { let mut object_16 = object.key("RoutingConfig").start_object(); crate::json_ser::serialize_structure_crate_model_alias_routing_configuration( &mut object_16, var_15, )?; object_16.finish(); } Ok(()) } pub fn serialize_structure_crate_input_create_code_signing_config_input( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::input::CreateCodeSigningConfigInput, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_17) = &input.allowed_publishers { let mut object_18 = object.key("AllowedPublishers").start_object(); crate::json_ser::serialize_structure_crate_model_allowed_publishers( &mut object_18, var_17, )?; object_18.finish(); } if let Some(var_19) = &input.code_signing_policies { let mut object_20 = object.key("CodeSigningPolicies").start_object(); crate::json_ser::serialize_structure_crate_model_code_signing_policies( &mut object_20, var_19, )?; object_20.finish(); } if let Some(var_21) = &input.description { object.key("Description").string(var_21); } Ok(()) } pub fn serialize_structure_crate_input_create_event_source_mapping_input( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::input::CreateEventSourceMappingInput, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_22) = &input.batch_size { object.key("BatchSize").number( #[allow(clippy::useless_conversion)] aws_smithy_types::Number::NegInt((*var_22).into()), ); } if let Some(var_23) = &input.bisect_batch_on_function_error { object.key("BisectBatchOnFunctionError").boolean(*var_23); } if let Some(var_24) = &input.destination_config { let mut object_25 = object.key("DestinationConfig").start_object(); crate::json_ser::serialize_structure_crate_model_destination_config( &mut object_25, var_24, )?; object_25.finish(); } if let Some(var_26) = &input.enabled { object.key("Enabled").boolean(*var_26); } if let Some(var_27) = &input.event_source_arn { object.key("EventSourceArn").string(var_27); } if let Some(var_28) = &input.function_name { object.key("FunctionName").string(var_28); } if let Some(var_29) = &input.function_response_types { let mut array_30 = object.key("FunctionResponseTypes").start_array(); for item_31 in var_29 { { array_30.value().string(item_31.as_str()); } } array_30.finish(); } if let Some(var_32) = &input.maximum_batching_window_in_seconds { object.key("MaximumBatchingWindowInSeconds").number( #[allow(clippy::useless_conversion)] aws_smithy_types::Number::NegInt((*var_32).into()), ); } if let Some(var_33) = &input.maximum_record_age_in_seconds { object.key("MaximumRecordAgeInSeconds").number( #[allow(clippy::useless_conversion)] aws_smithy_types::Number::NegInt((*var_33).into()), ); } if let Some(var_34) = &input.maximum_retry_attempts { object.key("MaximumRetryAttempts").number( #[allow(clippy::useless_conversion)] aws_smithy_types::Number::NegInt((*var_34).into()), ); } if let Some(var_35) = &input.parallelization_factor { object.key("ParallelizationFactor").number( #[allow(clippy::useless_conversion)] aws_smithy_types::Number::NegInt((*var_35).into()), ); } if let Some(var_36) = &input.queues { let mut array_37 = object.key("Queues").start_array(); for item_38 in var_36 { { array_37.value().string(item_38); } } array_37.finish(); } if let Some(var_39) = &input.self_managed_event_source { let mut object_40 = object.key("SelfManagedEventSource").start_object(); crate::json_ser::serialize_structure_crate_model_self_managed_event_source( &mut object_40, var_39, )?; object_40.finish(); } if let Some(var_41) = &input.source_access_configurations { let mut array_42 = object.key("SourceAccessConfigurations").start_array(); for item_43 in var_41 { { let mut object_44 = array_42.value().start_object(); crate::json_ser::serialize_structure_crate_model_source_access_configuration( &mut object_44, item_43, )?; object_44.finish(); } } array_42.finish(); } if let Some(var_45) = &input.starting_position { object.key("StartingPosition").string(var_45.as_str()); } if let Some(var_46) = &input.starting_position_timestamp { object .key("StartingPositionTimestamp") .instant(var_46, aws_smithy_types::instant::Format::EpochSeconds); } if let Some(var_47) = &input.topics { let mut array_48 = object.key("Topics").start_array(); for item_49 in var_47 { { array_48.value().string(item_49); } } array_48.finish(); } if let Some(var_50) = &input.tumbling_window_in_seconds { object.key("TumblingWindowInSeconds").number( #[allow(clippy::useless_conversion)] aws_smithy_types::Number::NegInt((*var_50).into()), ); } Ok(()) } pub fn serialize_structure_crate_input_create_function_input( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::input::CreateFunctionInput, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_51) = &input.architectures { let mut array_52 = object.key("Architectures").start_array(); for item_53 in var_51 { { array_52.value().string(item_53.as_str()); } } array_52.finish(); } if let Some(var_54) = &input.code { let mut object_55 = object.key("Code").start_object(); crate::json_ser::serialize_structure_crate_model_function_code(&mut object_55, var_54)?; object_55.finish(); } if let Some(var_56) = &input.code_signing_config_arn { object.key("CodeSigningConfigArn").string(var_56); } if let Some(var_57) = &input.dead_letter_config { let mut object_58 = object.key("DeadLetterConfig").start_object(); crate::json_ser::serialize_structure_crate_model_dead_letter_config( &mut object_58, var_57, )?; object_58.finish(); } if let Some(var_59) = &input.description { object.key("Description").string(var_59); } if let Some(var_60) = &input.environment { let mut object_61 = object.key("Environment").start_object(); crate::json_ser::serialize_structure_crate_model_environment(&mut object_61, var_60)?; object_61.finish(); } if let Some(var_62) = &input.file_system_configs { let mut array_63 = object.key("FileSystemConfigs").start_array(); for item_64 in var_62 { { let mut object_65 = array_63.value().start_object(); crate::json_ser::serialize_structure_crate_model_file_system_config( &mut object_65, item_64, )?; object_65.finish(); } } array_63.finish(); } if let Some(var_66) = &input.function_name { object.key("FunctionName").string(var_66); } if let Some(var_67) = &input.handler { object.key("Handler").string(var_67); } if let Some(var_68) = &input.image_config { let mut object_69 = object.key("ImageConfig").start_object(); crate::json_ser::serialize_structure_crate_model_image_config(&mut object_69, var_68)?; object_69.finish(); } if let Some(var_70) = &input.kms_key_arn { object.key("KMSKeyArn").string(var_70); } if let Some(var_71) = &input.layers { let mut array_72 = object.key("Layers").start_array(); for item_73 in var_71 { { array_72.value().string(item_73); } } array_72.finish(); } if let Some(var_74) = &input.memory_size { object.key("MemorySize").number( #[allow(clippy::useless_conversion)] aws_smithy_types::Number::NegInt((*var_74).into()), ); } if let Some(var_75) = &input.package_type { object.key("PackageType").string(var_75.as_str()); } if input.publish { object.key("Publish").boolean(input.publish); } if let Some(var_76) = &input.role { object.key("Role").string(var_76); } if let Some(var_77) = &input.runtime { object.key("Runtime").string(var_77.as_str()); } if let Some(var_78) = &input.tags { let mut object_79 = object.key("Tags").start_object(); for (key_80, value_81) in var_78 { { object_79.key(key_80).string(value_81); } } object_79.finish(); } if let Some(var_82) = &input.timeout { object.key("Timeout").number( #[allow(clippy::useless_conversion)] aws_smithy_types::Number::NegInt((*var_82).into()), ); } if let Some(var_83) = &input.tracing_config { let mut object_84 = object.key("TracingConfig").start_object(); crate::json_ser::serialize_structure_crate_model_tracing_config(&mut object_84, var_83)?; object_84.finish(); } if let Some(var_85) = &input.vpc_config { let mut object_86 = object.key("VpcConfig").start_object(); crate::json_ser::serialize_structure_crate_model_vpc_config(&mut object_86, var_85)?; object_86.finish(); } Ok(()) } pub fn serialize_structure_crate_input_publish_layer_version_input( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::input::PublishLayerVersionInput, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_87) = &input.compatible_architectures { let mut array_88 = object.key("CompatibleArchitectures").start_array(); for item_89 in var_87 { { array_88.value().string(item_89.as_str()); } } array_88.finish(); } if let Some(var_90) = &input.compatible_runtimes { let mut array_91 = object.key("CompatibleRuntimes").start_array(); for item_92 in var_90 { { array_91.value().string(item_92.as_str()); } } array_91.finish(); } if let Some(var_93) = &input.content { let mut object_94 = object.key("Content").start_object(); crate::json_ser::serialize_structure_crate_model_layer_version_content_input( &mut object_94, var_93, )?; object_94.finish(); } if let Some(var_95) = &input.description { object.key("Description").string(var_95); } if let Some(var_96) = &input.license_info { object.key("LicenseInfo").string(var_96); } Ok(()) } pub fn serialize_structure_crate_input_publish_version_input( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::input::PublishVersionInput, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_97) = &input.code_sha256 { object.key("CodeSha256").string(var_97); } if let Some(var_98) = &input.description { object.key("Description").string(var_98); } if let Some(var_99) = &input.revision_id { object.key("RevisionId").string(var_99); } Ok(()) } pub fn serialize_structure_crate_input_put_function_code_signing_config_input( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::input::PutFunctionCodeSigningConfigInput, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_100) = &input.code_signing_config_arn { object.key("CodeSigningConfigArn").string(var_100); } Ok(()) } pub fn serialize_structure_crate_input_put_function_concurrency_input( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::input::PutFunctionConcurrencyInput, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_101) = &input.reserved_concurrent_executions { object.key("ReservedConcurrentExecutions").number( #[allow(clippy::useless_conversion)] aws_smithy_types::Number::NegInt((*var_101).into()), ); } Ok(()) } pub fn serialize_structure_crate_input_put_function_event_invoke_config_input( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::input::PutFunctionEventInvokeConfigInput, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_102) = &input.destination_config { let mut object_103 = object.key("DestinationConfig").start_object(); crate::json_ser::serialize_structure_crate_model_destination_config( &mut object_103, var_102, )?; object_103.finish(); } if let Some(var_104) = &input.maximum_event_age_in_seconds { object.key("MaximumEventAgeInSeconds").number( #[allow(clippy::useless_conversion)] aws_smithy_types::Number::NegInt((*var_104).into()), ); } if let Some(var_105) = &input.maximum_retry_attempts { object.key("MaximumRetryAttempts").number( #[allow(clippy::useless_conversion)] aws_smithy_types::Number::NegInt((*var_105).into()), ); } Ok(()) } pub fn serialize_structure_crate_input_put_provisioned_concurrency_config_input( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::input::PutProvisionedConcurrencyConfigInput, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_106) = &input.provisioned_concurrent_executions { object.key("ProvisionedConcurrentExecutions").number( #[allow(clippy::useless_conversion)] aws_smithy_types::Number::NegInt((*var_106).into()), ); } Ok(()) } pub fn serialize_structure_crate_input_tag_resource_input( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::input::TagResourceInput, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_107) = &input.tags { let mut object_108 = object.key("Tags").start_object(); for (key_109, value_110) in var_107 { { object_108.key(key_109).string(value_110); } } object_108.finish(); } Ok(()) } pub fn serialize_structure_crate_input_update_alias_input( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::input::UpdateAliasInput, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_111) = &input.description { object.key("Description").string(var_111); } if let Some(var_112) = &input.function_version { object.key("FunctionVersion").string(var_112); } if let Some(var_113) = &input.revision_id { object.key("RevisionId").string(var_113); } if let Some(var_114) = &input.routing_config { let mut object_115 = object.key("RoutingConfig").start_object(); crate::json_ser::serialize_structure_crate_model_alias_routing_configuration( &mut object_115, var_114, )?; object_115.finish(); } Ok(()) } pub fn serialize_structure_crate_input_update_code_signing_config_input( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::input::UpdateCodeSigningConfigInput, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_116) = &input.allowed_publishers { let mut object_117 = object.key("AllowedPublishers").start_object(); crate::json_ser::serialize_structure_crate_model_allowed_publishers( &mut object_117, var_116, )?; object_117.finish(); } if let Some(var_118) = &input.code_signing_policies { let mut object_119 = object.key("CodeSigningPolicies").start_object(); crate::json_ser::serialize_structure_crate_model_code_signing_policies( &mut object_119, var_118, )?; object_119.finish(); } if let Some(var_120) = &input.description { object.key("Description").string(var_120); } Ok(()) } pub fn serialize_structure_crate_input_update_event_source_mapping_input( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::input::UpdateEventSourceMappingInput, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_121) = &input.batch_size { object.key("BatchSize").number( #[allow(clippy::useless_conversion)] aws_smithy_types::Number::NegInt((*var_121).into()), ); } if let Some(var_122) = &input.bisect_batch_on_function_error { object.key("BisectBatchOnFunctionError").boolean(*var_122); } if let Some(var_123) = &input.destination_config { let mut object_124 = object.key("DestinationConfig").start_object(); crate::json_ser::serialize_structure_crate_model_destination_config( &mut object_124, var_123, )?; object_124.finish(); } if let Some(var_125) = &input.enabled { object.key("Enabled").boolean(*var_125); } if let Some(var_126) = &input.function_name { object.key("FunctionName").string(var_126); } if let Some(var_127) = &input.function_response_types { let mut array_128 = object.key("FunctionResponseTypes").start_array(); for item_129 in var_127 { { array_128.value().string(item_129.as_str()); } } array_128.finish(); } if let Some(var_130) = &input.maximum_batching_window_in_seconds { object.key("MaximumBatchingWindowInSeconds").number( #[allow(clippy::useless_conversion)] aws_smithy_types::Number::NegInt((*var_130).into()), ); } if let Some(var_131) = &input.maximum_record_age_in_seconds { object.key("MaximumRecordAgeInSeconds").number( #[allow(clippy::useless_conversion)] aws_smithy_types::Number::NegInt((*var_131).into()), ); } if let Some(var_132) = &input.maximum_retry_attempts { object.key("MaximumRetryAttempts").number( #[allow(clippy::useless_conversion)] aws_smithy_types::Number::NegInt((*var_132).into()), ); } if let Some(var_133) = &input.parallelization_factor { object.key("ParallelizationFactor").number( #[allow(clippy::useless_conversion)] aws_smithy_types::Number::NegInt((*var_133).into()), ); } if let Some(var_134) = &input.source_access_configurations { let mut array_135 = object.key("SourceAccessConfigurations").start_array(); for item_136 in var_134 { { let mut object_137 = array_135.value().start_object(); crate::json_ser::serialize_structure_crate_model_source_access_configuration( &mut object_137, item_136, )?; object_137.finish(); } } array_135.finish(); } if let Some(var_138) = &input.tumbling_window_in_seconds { object.key("TumblingWindowInSeconds").number( #[allow(clippy::useless_conversion)] aws_smithy_types::Number::NegInt((*var_138).into()), ); } Ok(()) } pub fn serialize_structure_crate_input_update_function_code_input( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::input::UpdateFunctionCodeInput, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_139) = &input.architectures { let mut array_140 = object.key("Architectures").start_array(); for item_141 in var_139 { { array_140.value().string(item_141.as_str()); } } array_140.finish(); } if input.dry_run { object.key("DryRun").boolean(input.dry_run); } if let Some(var_142) = &input.image_uri { object.key("ImageUri").string(var_142); } if input.publish { object.key("Publish").boolean(input.publish); } if let Some(var_143) = &input.revision_id { object.key("RevisionId").string(var_143); } if let Some(var_144) = &input.s3_bucket { object.key("S3Bucket").string(var_144); } if let Some(var_145) = &input.s3_key { object.key("S3Key").string(var_145); } if let Some(var_146) = &input.s3_object_version { object.key("S3ObjectVersion").string(var_146); } if let Some(var_147) = &input.zip_file { object .key("ZipFile") .string_unchecked(&aws_smithy_types::base64::encode(var_147)); } Ok(()) } pub fn serialize_structure_crate_input_update_function_configuration_input( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::input::UpdateFunctionConfigurationInput, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_148) = &input.dead_letter_config { let mut object_149 = object.key("DeadLetterConfig").start_object(); crate::json_ser::serialize_structure_crate_model_dead_letter_config( &mut object_149, var_148, )?; object_149.finish(); } if let Some(var_150) = &input.description { object.key("Description").string(var_150); } if let Some(var_151) = &input.environment { let mut object_152 = object.key("Environment").start_object(); crate::json_ser::serialize_structure_crate_model_environment(&mut object_152, var_151)?; object_152.finish(); } if let Some(var_153) = &input.file_system_configs { let mut array_154 = object.key("FileSystemConfigs").start_array(); for item_155 in var_153 { { let mut object_156 = array_154.value().start_object(); crate::json_ser::serialize_structure_crate_model_file_system_config( &mut object_156, item_155, )?; object_156.finish(); } } array_154.finish(); } if let Some(var_157) = &input.handler { object.key("Handler").string(var_157); } if let Some(var_158) = &input.image_config { let mut object_159 = object.key("ImageConfig").start_object(); crate::json_ser::serialize_structure_crate_model_image_config(&mut object_159, var_158)?; object_159.finish(); } if let Some(var_160) = &input.kms_key_arn { object.key("KMSKeyArn").string(var_160); } if let Some(var_161) = &input.layers { let mut array_162 = object.key("Layers").start_array(); for item_163 in var_161 { { array_162.value().string(item_163); } } array_162.finish(); } if let Some(var_164) = &input.memory_size { object.key("MemorySize").number( #[allow(clippy::useless_conversion)] aws_smithy_types::Number::NegInt((*var_164).into()), ); } if let Some(var_165) = &input.revision_id { object.key("RevisionId").string(var_165); } if let Some(var_166) = &input.role { object.key("Role").string(var_166); } if let Some(var_167) = &input.runtime { object.key("Runtime").string(var_167.as_str()); } if let Some(var_168) = &input.timeout { object.key("Timeout").number( #[allow(clippy::useless_conversion)] aws_smithy_types::Number::NegInt((*var_168).into()), ); } if let Some(var_169) = &input.tracing_config { let mut object_170 = object.key("TracingConfig").start_object(); crate::json_ser::serialize_structure_crate_model_tracing_config(&mut object_170, var_169)?; object_170.finish(); } if let Some(var_171) = &input.vpc_config { let mut object_172 = object.key("VpcConfig").start_object(); crate::json_ser::serialize_structure_crate_model_vpc_config(&mut object_172, var_171)?; object_172.finish(); } Ok(()) } pub fn serialize_structure_crate_input_update_function_event_invoke_config_input( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::input::UpdateFunctionEventInvokeConfigInput, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_173) = &input.destination_config { let mut object_174 = object.key("DestinationConfig").start_object(); crate::json_ser::serialize_structure_crate_model_destination_config( &mut object_174, var_173, )?; object_174.finish(); } if let Some(var_175) = &input.maximum_event_age_in_seconds { object.key("MaximumEventAgeInSeconds").number( #[allow(clippy::useless_conversion)] aws_smithy_types::Number::NegInt((*var_175).into()), ); } if let Some(var_176) = &input.maximum_retry_attempts { object.key("MaximumRetryAttempts").number( #[allow(clippy::useless_conversion)] aws_smithy_types::Number::NegInt((*var_176).into()), ); } Ok(()) } pub fn serialize_structure_crate_model_alias_routing_configuration( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::model::AliasRoutingConfiguration, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_177) = &input.additional_version_weights { let mut object_178 = object.key("AdditionalVersionWeights").start_object(); for (key_179, value_180) in var_177 { { object_178.key(key_179).number( #[allow(clippy::useless_conversion)] aws_smithy_types::Number::Float((*value_180).into()), ); } } object_178.finish(); } Ok(()) } pub fn serialize_structure_crate_model_allowed_publishers( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::model::AllowedPublishers, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_181) = &input.signing_profile_version_arns { let mut array_182 = object.key("SigningProfileVersionArns").start_array(); for item_183 in var_181 { { array_182.value().string(item_183); } } array_182.finish(); } Ok(()) } pub fn serialize_structure_crate_model_code_signing_policies( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::model::CodeSigningPolicies, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_184) = &input.untrusted_artifact_on_deployment { object .key("UntrustedArtifactOnDeployment") .string(var_184.as_str()); } Ok(()) } pub fn serialize_structure_crate_model_destination_config( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::model::DestinationConfig, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_185) = &input.on_success { let mut object_186 = object.key("OnSuccess").start_object(); crate::json_ser::serialize_structure_crate_model_on_success(&mut object_186, var_185)?; object_186.finish(); } if let Some(var_187) = &input.on_failure { let mut object_188 = object.key("OnFailure").start_object(); crate::json_ser::serialize_structure_crate_model_on_failure(&mut object_188, var_187)?; object_188.finish(); } Ok(()) } pub fn serialize_structure_crate_model_self_managed_event_source( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::model::SelfManagedEventSource, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_189) = &input.endpoints { let mut object_190 = object.key("Endpoints").start_object(); for (key_191, value_192) in var_189 { { let mut array_193 = object_190.key(key_191.as_str()).start_array(); for item_194 in value_192 { { array_193.value().string(item_194); } } array_193.finish(); } } object_190.finish(); } Ok(()) } pub fn serialize_structure_crate_model_source_access_configuration( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::model::SourceAccessConfiguration, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_195) = &input.r#type { object.key("Type").string(var_195.as_str()); } if let Some(var_196) = &input.uri { object.key("URI").string(var_196); } Ok(()) } pub fn serialize_structure_crate_model_function_code( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::model::FunctionCode, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_197) = &input.zip_file { object .key("ZipFile") .string_unchecked(&aws_smithy_types::base64::encode(var_197)); } if let Some(var_198) = &input.s3_bucket { object.key("S3Bucket").string(var_198); } if let Some(var_199) = &input.s3_key { object.key("S3Key").string(var_199); } if let Some(var_200) = &input.s3_object_version { object.key("S3ObjectVersion").string(var_200); } if let Some(var_201) = &input.image_uri { object.key("ImageUri").string(var_201); } Ok(()) } pub fn serialize_structure_crate_model_dead_letter_config( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::model::DeadLetterConfig, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_202) = &input.target_arn { object.key("TargetArn").string(var_202); } Ok(()) } pub fn serialize_structure_crate_model_environment( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::model::Environment, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_203) = &input.variables { let mut object_204 = object.key("Variables").start_object(); for (key_205, value_206) in var_203 { { object_204.key(key_205).string(value_206); } } object_204.finish(); } Ok(()) } pub fn serialize_structure_crate_model_file_system_config( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::model::FileSystemConfig, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_207) = &input.arn { object.key("Arn").string(var_207); } if let Some(var_208) = &input.local_mount_path { object.key("LocalMountPath").string(var_208); } Ok(()) } pub fn serialize_structure_crate_model_image_config( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::model::ImageConfig, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_209) = &input.entry_point { let mut array_210 = object.key("EntryPoint").start_array(); for item_211 in var_209 { { array_210.value().string(item_211); } } array_210.finish(); } if let Some(var_212) = &input.command { let mut array_213 = object.key("Command").start_array(); for item_214 in var_212 { { array_213.value().string(item_214); } } array_213.finish(); } if let Some(var_215) = &input.working_directory { object.key("WorkingDirectory").string(var_215); } Ok(()) } pub fn serialize_structure_crate_model_tracing_config( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::model::TracingConfig, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_216) = &input.mode { object.key("Mode").string(var_216.as_str()); } Ok(()) } pub fn serialize_structure_crate_model_vpc_config( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::model::VpcConfig, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_217) = &input.subnet_ids { let mut array_218 = object.key("SubnetIds").start_array(); for item_219 in var_217 { { array_218.value().string(item_219); } } array_218.finish(); } if let Some(var_220) = &input.security_group_ids { let mut array_221 = object.key("SecurityGroupIds").start_array(); for item_222 in var_220 { { array_221.value().string(item_222); } } array_221.finish(); } Ok(()) } pub fn serialize_structure_crate_model_layer_version_content_input( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::model::LayerVersionContentInput, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_223) = &input.s3_bucket { object.key("S3Bucket").string(var_223); } if let Some(var_224) = &input.s3_key { object.key("S3Key").string(var_224); } if let Some(var_225) = &input.s3_object_version { object.key("S3ObjectVersion").string(var_225); } if let Some(var_226) = &input.zip_file { object .key("ZipFile") .string_unchecked(&aws_smithy_types::base64::encode(var_226)); } Ok(()) } pub fn serialize_structure_crate_model_on_success( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::model::OnSuccess, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_227) = &input.destination { object.key("Destination").string(var_227); } Ok(()) } pub fn serialize_structure_crate_model_on_failure( object: &mut aws_smithy_json::serialize::JsonObjectWriter, input: &crate::model::OnFailure, ) -> Result<(), aws_smithy_http::operation::SerializationError> { if let Some(var_228) = &input.destination { object.key("Destination").string(var_228); } Ok(()) }
36.729572
99
0.633243
ab67fa5936b388bbc4b4ef52309103df9cb066f7
44,208
#[doc = "Register `HDCR` reader"] pub struct R(crate::R<HDCR_SPEC>); impl core::ops::Deref for R { type Target = crate::R<HDCR_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<HDCR_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<HDCR_SPEC>) -> Self { R(reader) } } #[doc = "Register `HDCR` writer"] pub struct W(crate::W<HDCR_SPEC>); impl core::ops::Deref for W { type Target = crate::W<HDCR_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<HDCR_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<HDCR_SPEC>) -> Self { W(writer) } } #[doc = "Wake-Up on Pin Event Positive Edge Enable\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum WKPEP_A { #[doc = "0: Wake-up event disabled"] VALUE1 = 0, #[doc = "1: Wake-up event enabled"] VALUE2 = 1, } impl From<WKPEP_A> for bool { #[inline(always)] fn from(variant: WKPEP_A) -> Self { variant as u8 != 0 } } #[doc = "Field `WKPEP` reader - Wake-Up on Pin Event Positive Edge Enable"] pub struct WKPEP_R(crate::FieldReader<bool, WKPEP_A>); impl WKPEP_R { pub(crate) fn new(bits: bool) -> Self { WKPEP_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> WKPEP_A { match self.bits { false => WKPEP_A::VALUE1, true => WKPEP_A::VALUE2, } } #[doc = "Checks if the value of the field is `VALUE1`"] #[inline(always)] pub fn is_value1(&self) -> bool { **self == WKPEP_A::VALUE1 } #[doc = "Checks if the value of the field is `VALUE2`"] #[inline(always)] pub fn is_value2(&self) -> bool { **self == WKPEP_A::VALUE2 } } impl core::ops::Deref for WKPEP_R { type Target = crate::FieldReader<bool, WKPEP_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `WKPEP` writer - Wake-Up on Pin Event Positive Edge Enable"] pub struct WKPEP_W<'a> { w: &'a mut W, } impl<'a> WKPEP_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: WKPEP_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Wake-up event disabled"] #[inline(always)] pub fn value1(self) -> &'a mut W { self.variant(WKPEP_A::VALUE1) } #[doc = "Wake-up event enabled"] #[inline(always)] pub fn value2(self) -> &'a mut W { self.variant(WKPEP_A::VALUE2) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01); self.w } } #[doc = "Wake-up on Pin Event Negative Edge Enable\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum WKPEN_A { #[doc = "0: Wake-up event disabled"] VALUE1 = 0, #[doc = "1: Wake-up event enabled"] VALUE2 = 1, } impl From<WKPEN_A> for bool { #[inline(always)] fn from(variant: WKPEN_A) -> Self { variant as u8 != 0 } } #[doc = "Field `WKPEN` reader - Wake-up on Pin Event Negative Edge Enable"] pub struct WKPEN_R(crate::FieldReader<bool, WKPEN_A>); impl WKPEN_R { pub(crate) fn new(bits: bool) -> Self { WKPEN_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> WKPEN_A { match self.bits { false => WKPEN_A::VALUE1, true => WKPEN_A::VALUE2, } } #[doc = "Checks if the value of the field is `VALUE1`"] #[inline(always)] pub fn is_value1(&self) -> bool { **self == WKPEN_A::VALUE1 } #[doc = "Checks if the value of the field is `VALUE2`"] #[inline(always)] pub fn is_value2(&self) -> bool { **self == WKPEN_A::VALUE2 } } impl core::ops::Deref for WKPEN_R { type Target = crate::FieldReader<bool, WKPEN_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `WKPEN` writer - Wake-up on Pin Event Negative Edge Enable"] pub struct WKPEN_W<'a> { w: &'a mut W, } impl<'a> WKPEN_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: WKPEN_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Wake-up event disabled"] #[inline(always)] pub fn value1(self) -> &'a mut W { self.variant(WKPEN_A::VALUE1) } #[doc = "Wake-up event enabled"] #[inline(always)] pub fn value2(self) -> &'a mut W { self.variant(WKPEN_A::VALUE2) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | ((value as u32 & 0x01) << 1); self.w } } #[doc = "Wake-up on RTC Event Enable\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RTCE_A { #[doc = "0: Wake-up event disabled"] VALUE1 = 0, #[doc = "1: Wake-up event enabled"] VALUE2 = 1, } impl From<RTCE_A> for bool { #[inline(always)] fn from(variant: RTCE_A) -> Self { variant as u8 != 0 } } #[doc = "Field `RTCE` reader - Wake-up on RTC Event Enable"] pub struct RTCE_R(crate::FieldReader<bool, RTCE_A>); impl RTCE_R { pub(crate) fn new(bits: bool) -> Self { RTCE_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RTCE_A { match self.bits { false => RTCE_A::VALUE1, true => RTCE_A::VALUE2, } } #[doc = "Checks if the value of the field is `VALUE1`"] #[inline(always)] pub fn is_value1(&self) -> bool { **self == RTCE_A::VALUE1 } #[doc = "Checks if the value of the field is `VALUE2`"] #[inline(always)] pub fn is_value2(&self) -> bool { **self == RTCE_A::VALUE2 } } impl core::ops::Deref for RTCE_R { type Target = crate::FieldReader<bool, RTCE_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `RTCE` writer - Wake-up on RTC Event Enable"] pub struct RTCE_W<'a> { w: &'a mut W, } impl<'a> RTCE_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: RTCE_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Wake-up event disabled"] #[inline(always)] pub fn value1(self) -> &'a mut W { self.variant(RTCE_A::VALUE1) } #[doc = "Wake-up event enabled"] #[inline(always)] pub fn value2(self) -> &'a mut W { self.variant(RTCE_A::VALUE2) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | ((value as u32 & 0x01) << 2); self.w } } #[doc = "ULP WDG Alarm Enable\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum ULPWDGEN_A { #[doc = "0: Wake-up event disabled"] VALUE1 = 0, #[doc = "1: Wake-up event enabled"] VALUE2 = 1, } impl From<ULPWDGEN_A> for bool { #[inline(always)] fn from(variant: ULPWDGEN_A) -> Self { variant as u8 != 0 } } #[doc = "Field `ULPWDGEN` reader - ULP WDG Alarm Enable"] pub struct ULPWDGEN_R(crate::FieldReader<bool, ULPWDGEN_A>); impl ULPWDGEN_R { pub(crate) fn new(bits: bool) -> Self { ULPWDGEN_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> ULPWDGEN_A { match self.bits { false => ULPWDGEN_A::VALUE1, true => ULPWDGEN_A::VALUE2, } } #[doc = "Checks if the value of the field is `VALUE1`"] #[inline(always)] pub fn is_value1(&self) -> bool { **self == ULPWDGEN_A::VALUE1 } #[doc = "Checks if the value of the field is `VALUE2`"] #[inline(always)] pub fn is_value2(&self) -> bool { **self == ULPWDGEN_A::VALUE2 } } impl core::ops::Deref for ULPWDGEN_R { type Target = crate::FieldReader<bool, ULPWDGEN_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `ULPWDGEN` writer - ULP WDG Alarm Enable"] pub struct ULPWDGEN_W<'a> { w: &'a mut W, } impl<'a> ULPWDGEN_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: ULPWDGEN_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Wake-up event disabled"] #[inline(always)] pub fn value1(self) -> &'a mut W { self.variant(ULPWDGEN_A::VALUE1) } #[doc = "Wake-up event enabled"] #[inline(always)] pub fn value2(self) -> &'a mut W { self.variant(ULPWDGEN_A::VALUE2) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 3)) | ((value as u32 & 0x01) << 3); self.w } } #[doc = "Hibernate Request Value Set\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum HIB_A { #[doc = "0: External hibernate request inactive"] VALUE1 = 0, #[doc = "1: External hibernate request active"] VALUE2 = 1, } impl From<HIB_A> for bool { #[inline(always)] fn from(variant: HIB_A) -> Self { variant as u8 != 0 } } #[doc = "Field `HIB` reader - Hibernate Request Value Set"] pub struct HIB_R(crate::FieldReader<bool, HIB_A>); impl HIB_R { pub(crate) fn new(bits: bool) -> Self { HIB_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> HIB_A { match self.bits { false => HIB_A::VALUE1, true => HIB_A::VALUE2, } } #[doc = "Checks if the value of the field is `VALUE1`"] #[inline(always)] pub fn is_value1(&self) -> bool { **self == HIB_A::VALUE1 } #[doc = "Checks if the value of the field is `VALUE2`"] #[inline(always)] pub fn is_value2(&self) -> bool { **self == HIB_A::VALUE2 } } impl core::ops::Deref for HIB_R { type Target = crate::FieldReader<bool, HIB_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `HIB` writer - Hibernate Request Value Set"] pub struct HIB_W<'a> { w: &'a mut W, } impl<'a> HIB_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: HIB_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "External hibernate request inactive"] #[inline(always)] pub fn value1(self) -> &'a mut W { self.variant(HIB_A::VALUE1) } #[doc = "External hibernate request active"] #[inline(always)] pub fn value2(self) -> &'a mut W { self.variant(HIB_A::VALUE2) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 4)) | ((value as u32 & 0x01) << 4); self.w } } #[doc = "fRTC Clock Selection\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RCS_A { #[doc = "0: fOSI selected"] VALUE1 = 0, #[doc = "1: fULP selected"] VALUE2 = 1, } impl From<RCS_A> for bool { #[inline(always)] fn from(variant: RCS_A) -> Self { variant as u8 != 0 } } #[doc = "Field `RCS` reader - fRTC Clock Selection"] pub struct RCS_R(crate::FieldReader<bool, RCS_A>); impl RCS_R { pub(crate) fn new(bits: bool) -> Self { RCS_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RCS_A { match self.bits { false => RCS_A::VALUE1, true => RCS_A::VALUE2, } } #[doc = "Checks if the value of the field is `VALUE1`"] #[inline(always)] pub fn is_value1(&self) -> bool { **self == RCS_A::VALUE1 } #[doc = "Checks if the value of the field is `VALUE2`"] #[inline(always)] pub fn is_value2(&self) -> bool { **self == RCS_A::VALUE2 } } impl core::ops::Deref for RCS_R { type Target = crate::FieldReader<bool, RCS_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `RCS` writer - fRTC Clock Selection"] pub struct RCS_W<'a> { w: &'a mut W, } impl<'a> RCS_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: RCS_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "fOSI selected"] #[inline(always)] pub fn value1(self) -> &'a mut W { self.variant(RCS_A::VALUE1) } #[doc = "fULP selected"] #[inline(always)] pub fn value2(self) -> &'a mut W { self.variant(RCS_A::VALUE2) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 6)) | ((value as u32 & 0x01) << 6); self.w } } #[doc = "fSTDBY Clock Selection\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum STDBYSEL_A { #[doc = "0: fOSI selected"] VALUE1 = 0, #[doc = "1: fULP selected"] VALUE2 = 1, } impl From<STDBYSEL_A> for bool { #[inline(always)] fn from(variant: STDBYSEL_A) -> Self { variant as u8 != 0 } } #[doc = "Field `STDBYSEL` reader - fSTDBY Clock Selection"] pub struct STDBYSEL_R(crate::FieldReader<bool, STDBYSEL_A>); impl STDBYSEL_R { pub(crate) fn new(bits: bool) -> Self { STDBYSEL_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> STDBYSEL_A { match self.bits { false => STDBYSEL_A::VALUE1, true => STDBYSEL_A::VALUE2, } } #[doc = "Checks if the value of the field is `VALUE1`"] #[inline(always)] pub fn is_value1(&self) -> bool { **self == STDBYSEL_A::VALUE1 } #[doc = "Checks if the value of the field is `VALUE2`"] #[inline(always)] pub fn is_value2(&self) -> bool { **self == STDBYSEL_A::VALUE2 } } impl core::ops::Deref for STDBYSEL_R { type Target = crate::FieldReader<bool, STDBYSEL_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `STDBYSEL` writer - fSTDBY Clock Selection"] pub struct STDBYSEL_W<'a> { w: &'a mut W, } impl<'a> STDBYSEL_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: STDBYSEL_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "fOSI selected"] #[inline(always)] pub fn value1(self) -> &'a mut W { self.variant(STDBYSEL_A::VALUE1) } #[doc = "fULP selected"] #[inline(always)] pub fn value2(self) -> &'a mut W { self.variant(STDBYSEL_A::VALUE2) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 7)) | ((value as u32 & 0x01) << 7); self.w } } #[doc = "Wake-Up from Hibernate Trigger Input Selection\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum WKUPSEL_A { #[doc = "0: HIB_IO_1 pin selected"] VALUE1 = 0, #[doc = "1: HIB_IO_0 pin selected"] VALUE2 = 1, } impl From<WKUPSEL_A> for bool { #[inline(always)] fn from(variant: WKUPSEL_A) -> Self { variant as u8 != 0 } } #[doc = "Field `WKUPSEL` reader - Wake-Up from Hibernate Trigger Input Selection"] pub struct WKUPSEL_R(crate::FieldReader<bool, WKUPSEL_A>); impl WKUPSEL_R { pub(crate) fn new(bits: bool) -> Self { WKUPSEL_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> WKUPSEL_A { match self.bits { false => WKUPSEL_A::VALUE1, true => WKUPSEL_A::VALUE2, } } #[doc = "Checks if the value of the field is `VALUE1`"] #[inline(always)] pub fn is_value1(&self) -> bool { **self == WKUPSEL_A::VALUE1 } #[doc = "Checks if the value of the field is `VALUE2`"] #[inline(always)] pub fn is_value2(&self) -> bool { **self == WKUPSEL_A::VALUE2 } } impl core::ops::Deref for WKUPSEL_R { type Target = crate::FieldReader<bool, WKUPSEL_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `WKUPSEL` writer - Wake-Up from Hibernate Trigger Input Selection"] pub struct WKUPSEL_W<'a> { w: &'a mut W, } impl<'a> WKUPSEL_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: WKUPSEL_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "HIB_IO_1 pin selected"] #[inline(always)] pub fn value1(self) -> &'a mut W { self.variant(WKUPSEL_A::VALUE1) } #[doc = "HIB_IO_0 pin selected"] #[inline(always)] pub fn value2(self) -> &'a mut W { self.variant(WKUPSEL_A::VALUE2) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 8)) | ((value as u32 & 0x01) << 8); self.w } } #[doc = "General Purpose Input 0 Selection\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum GPI0SEL_A { #[doc = "0: HIB_IO_1 pin selected"] VALUE1 = 0, #[doc = "1: HIB_IO_0 pin selected"] VALUE2 = 1, } impl From<GPI0SEL_A> for bool { #[inline(always)] fn from(variant: GPI0SEL_A) -> Self { variant as u8 != 0 } } #[doc = "Field `GPI0SEL` reader - General Purpose Input 0 Selection"] pub struct GPI0SEL_R(crate::FieldReader<bool, GPI0SEL_A>); impl GPI0SEL_R { pub(crate) fn new(bits: bool) -> Self { GPI0SEL_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> GPI0SEL_A { match self.bits { false => GPI0SEL_A::VALUE1, true => GPI0SEL_A::VALUE2, } } #[doc = "Checks if the value of the field is `VALUE1`"] #[inline(always)] pub fn is_value1(&self) -> bool { **self == GPI0SEL_A::VALUE1 } #[doc = "Checks if the value of the field is `VALUE2`"] #[inline(always)] pub fn is_value2(&self) -> bool { **self == GPI0SEL_A::VALUE2 } } impl core::ops::Deref for GPI0SEL_R { type Target = crate::FieldReader<bool, GPI0SEL_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `GPI0SEL` writer - General Purpose Input 0 Selection"] pub struct GPI0SEL_W<'a> { w: &'a mut W, } impl<'a> GPI0SEL_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: GPI0SEL_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "HIB_IO_1 pin selected"] #[inline(always)] pub fn value1(self) -> &'a mut W { self.variant(GPI0SEL_A::VALUE1) } #[doc = "HIB_IO_0 pin selected"] #[inline(always)] pub fn value2(self) -> &'a mut W { self.variant(GPI0SEL_A::VALUE2) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 10)) | ((value as u32 & 0x01) << 10); self.w } } #[doc = "HIBIO0 Polarity Set\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum HIBIO0POL_A { #[doc = "0: Direct value"] VALUE1 = 0, #[doc = "1: Inverted value"] VALUE2 = 1, } impl From<HIBIO0POL_A> for bool { #[inline(always)] fn from(variant: HIBIO0POL_A) -> Self { variant as u8 != 0 } } #[doc = "Field `HIBIO0POL` reader - HIBIO0 Polarity Set"] pub struct HIBIO0POL_R(crate::FieldReader<bool, HIBIO0POL_A>); impl HIBIO0POL_R { pub(crate) fn new(bits: bool) -> Self { HIBIO0POL_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> HIBIO0POL_A { match self.bits { false => HIBIO0POL_A::VALUE1, true => HIBIO0POL_A::VALUE2, } } #[doc = "Checks if the value of the field is `VALUE1`"] #[inline(always)] pub fn is_value1(&self) -> bool { **self == HIBIO0POL_A::VALUE1 } #[doc = "Checks if the value of the field is `VALUE2`"] #[inline(always)] pub fn is_value2(&self) -> bool { **self == HIBIO0POL_A::VALUE2 } } impl core::ops::Deref for HIBIO0POL_R { type Target = crate::FieldReader<bool, HIBIO0POL_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `HIBIO0POL` writer - HIBIO0 Polarity Set"] pub struct HIBIO0POL_W<'a> { w: &'a mut W, } impl<'a> HIBIO0POL_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: HIBIO0POL_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Direct value"] #[inline(always)] pub fn value1(self) -> &'a mut W { self.variant(HIBIO0POL_A::VALUE1) } #[doc = "Inverted value"] #[inline(always)] pub fn value2(self) -> &'a mut W { self.variant(HIBIO0POL_A::VALUE2) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 12)) | ((value as u32 & 0x01) << 12); self.w } } #[doc = "HIBIO1 Polarity Set\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum HIBIO1POL_A { #[doc = "0: Direct value"] VALUE1 = 0, #[doc = "1: Inverted value"] VALUE2 = 1, } impl From<HIBIO1POL_A> for bool { #[inline(always)] fn from(variant: HIBIO1POL_A) -> Self { variant as u8 != 0 } } #[doc = "Field `HIBIO1POL` reader - HIBIO1 Polarity Set"] pub struct HIBIO1POL_R(crate::FieldReader<bool, HIBIO1POL_A>); impl HIBIO1POL_R { pub(crate) fn new(bits: bool) -> Self { HIBIO1POL_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> HIBIO1POL_A { match self.bits { false => HIBIO1POL_A::VALUE1, true => HIBIO1POL_A::VALUE2, } } #[doc = "Checks if the value of the field is `VALUE1`"] #[inline(always)] pub fn is_value1(&self) -> bool { **self == HIBIO1POL_A::VALUE1 } #[doc = "Checks if the value of the field is `VALUE2`"] #[inline(always)] pub fn is_value2(&self) -> bool { **self == HIBIO1POL_A::VALUE2 } } impl core::ops::Deref for HIBIO1POL_R { type Target = crate::FieldReader<bool, HIBIO1POL_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `HIBIO1POL` writer - HIBIO1 Polarity Set"] pub struct HIBIO1POL_W<'a> { w: &'a mut W, } impl<'a> HIBIO1POL_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: HIBIO1POL_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Direct value"] #[inline(always)] pub fn value1(self) -> &'a mut W { self.variant(HIBIO1POL_A::VALUE1) } #[doc = "Inverted value"] #[inline(always)] pub fn value2(self) -> &'a mut W { self.variant(HIBIO1POL_A::VALUE2) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 13)) | ((value as u32 & 0x01) << 13); self.w } } #[doc = "HIB_IO_0 Pin I/O Control (default HIBOUT)\n\nValue on reset: 12"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum HIBIO0SEL_A { #[doc = "0: Direct input, No input pull device connected"] VALUE1 = 0, #[doc = "1: Direct input, Input pull-down device connected"] VALUE2 = 1, #[doc = "2: Direct input, Input pull-up device connected"] VALUE3 = 2, #[doc = "8: Push-pull HIB Control output"] VALUE4 = 8, #[doc = "9: Push-pull WDT service output"] VALUE5 = 9, #[doc = "10: Push-pull GPIO output"] VALUE6 = 10, #[doc = "12: Open-drain HIB Control output"] VALUE7 = 12, #[doc = "13: Open-drain WDT service output"] VALUE8 = 13, #[doc = "14: Open-drain GPIO output"] VALUE9 = 14, #[doc = "15: Analog input"] VALUE10 = 15, } impl From<HIBIO0SEL_A> for u8 { #[inline(always)] fn from(variant: HIBIO0SEL_A) -> Self { variant as _ } } #[doc = "Field `HIBIO0SEL` reader - HIB_IO_0 Pin I/O Control (default HIBOUT)"] pub struct HIBIO0SEL_R(crate::FieldReader<u8, HIBIO0SEL_A>); impl HIBIO0SEL_R { pub(crate) fn new(bits: u8) -> Self { HIBIO0SEL_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> Option<HIBIO0SEL_A> { match self.bits { 0 => Some(HIBIO0SEL_A::VALUE1), 1 => Some(HIBIO0SEL_A::VALUE2), 2 => Some(HIBIO0SEL_A::VALUE3), 8 => Some(HIBIO0SEL_A::VALUE4), 9 => Some(HIBIO0SEL_A::VALUE5), 10 => Some(HIBIO0SEL_A::VALUE6), 12 => Some(HIBIO0SEL_A::VALUE7), 13 => Some(HIBIO0SEL_A::VALUE8), 14 => Some(HIBIO0SEL_A::VALUE9), 15 => Some(HIBIO0SEL_A::VALUE10), _ => None, } } #[doc = "Checks if the value of the field is `VALUE1`"] #[inline(always)] pub fn is_value1(&self) -> bool { **self == HIBIO0SEL_A::VALUE1 } #[doc = "Checks if the value of the field is `VALUE2`"] #[inline(always)] pub fn is_value2(&self) -> bool { **self == HIBIO0SEL_A::VALUE2 } #[doc = "Checks if the value of the field is `VALUE3`"] #[inline(always)] pub fn is_value3(&self) -> bool { **self == HIBIO0SEL_A::VALUE3 } #[doc = "Checks if the value of the field is `VALUE4`"] #[inline(always)] pub fn is_value4(&self) -> bool { **self == HIBIO0SEL_A::VALUE4 } #[doc = "Checks if the value of the field is `VALUE5`"] #[inline(always)] pub fn is_value5(&self) -> bool { **self == HIBIO0SEL_A::VALUE5 } #[doc = "Checks if the value of the field is `VALUE6`"] #[inline(always)] pub fn is_value6(&self) -> bool { **self == HIBIO0SEL_A::VALUE6 } #[doc = "Checks if the value of the field is `VALUE7`"] #[inline(always)] pub fn is_value7(&self) -> bool { **self == HIBIO0SEL_A::VALUE7 } #[doc = "Checks if the value of the field is `VALUE8`"] #[inline(always)] pub fn is_value8(&self) -> bool { **self == HIBIO0SEL_A::VALUE8 } #[doc = "Checks if the value of the field is `VALUE9`"] #[inline(always)] pub fn is_value9(&self) -> bool { **self == HIBIO0SEL_A::VALUE9 } #[doc = "Checks if the value of the field is `VALUE10`"] #[inline(always)] pub fn is_value10(&self) -> bool { **self == HIBIO0SEL_A::VALUE10 } } impl core::ops::Deref for HIBIO0SEL_R { type Target = crate::FieldReader<u8, HIBIO0SEL_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `HIBIO0SEL` writer - HIB_IO_0 Pin I/O Control (default HIBOUT)"] pub struct HIBIO0SEL_W<'a> { w: &'a mut W, } impl<'a> HIBIO0SEL_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: HIBIO0SEL_A) -> &'a mut W { unsafe { self.bits(variant.into()) } } #[doc = "Direct input, No input pull device connected"] #[inline(always)] pub fn value1(self) -> &'a mut W { self.variant(HIBIO0SEL_A::VALUE1) } #[doc = "Direct input, Input pull-down device connected"] #[inline(always)] pub fn value2(self) -> &'a mut W { self.variant(HIBIO0SEL_A::VALUE2) } #[doc = "Direct input, Input pull-up device connected"] #[inline(always)] pub fn value3(self) -> &'a mut W { self.variant(HIBIO0SEL_A::VALUE3) } #[doc = "Push-pull HIB Control output"] #[inline(always)] pub fn value4(self) -> &'a mut W { self.variant(HIBIO0SEL_A::VALUE4) } #[doc = "Push-pull WDT service output"] #[inline(always)] pub fn value5(self) -> &'a mut W { self.variant(HIBIO0SEL_A::VALUE5) } #[doc = "Push-pull GPIO output"] #[inline(always)] pub fn value6(self) -> &'a mut W { self.variant(HIBIO0SEL_A::VALUE6) } #[doc = "Open-drain HIB Control output"] #[inline(always)] pub fn value7(self) -> &'a mut W { self.variant(HIBIO0SEL_A::VALUE7) } #[doc = "Open-drain WDT service output"] #[inline(always)] pub fn value8(self) -> &'a mut W { self.variant(HIBIO0SEL_A::VALUE8) } #[doc = "Open-drain GPIO output"] #[inline(always)] pub fn value9(self) -> &'a mut W { self.variant(HIBIO0SEL_A::VALUE9) } #[doc = "Analog input"] #[inline(always)] pub fn value10(self) -> &'a mut W { self.variant(HIBIO0SEL_A::VALUE10) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x0f << 16)) | ((value as u32 & 0x0f) << 16); self.w } } #[doc = "HIB_IO_1 Pin I/O Control (Default WKUP)\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum HIBIO1SEL_A { #[doc = "0: Direct input, No input pull device connected"] VALUE1 = 0, #[doc = "1: Direct input, Input pull-down device connected"] VALUE2 = 1, #[doc = "2: Direct input, Input pull-up device connected"] VALUE3 = 2, #[doc = "8: Push-pull HIB Control output"] VALUE4 = 8, #[doc = "9: Push-pull WDT service output"] VALUE5 = 9, #[doc = "10: Push-pull GPIO output"] VALUE6 = 10, #[doc = "12: Open-drain HIB Control output"] VALUE7 = 12, #[doc = "13: Open-drain WDT service output"] VALUE8 = 13, #[doc = "14: Open-drain GPIO output"] VALUE9 = 14, #[doc = "15: Analog input"] VALUE10 = 15, } impl From<HIBIO1SEL_A> for u8 { #[inline(always)] fn from(variant: HIBIO1SEL_A) -> Self { variant as _ } } #[doc = "Field `HIBIO1SEL` reader - HIB_IO_1 Pin I/O Control (Default WKUP)"] pub struct HIBIO1SEL_R(crate::FieldReader<u8, HIBIO1SEL_A>); impl HIBIO1SEL_R { pub(crate) fn new(bits: u8) -> Self { HIBIO1SEL_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> Option<HIBIO1SEL_A> { match self.bits { 0 => Some(HIBIO1SEL_A::VALUE1), 1 => Some(HIBIO1SEL_A::VALUE2), 2 => Some(HIBIO1SEL_A::VALUE3), 8 => Some(HIBIO1SEL_A::VALUE4), 9 => Some(HIBIO1SEL_A::VALUE5), 10 => Some(HIBIO1SEL_A::VALUE6), 12 => Some(HIBIO1SEL_A::VALUE7), 13 => Some(HIBIO1SEL_A::VALUE8), 14 => Some(HIBIO1SEL_A::VALUE9), 15 => Some(HIBIO1SEL_A::VALUE10), _ => None, } } #[doc = "Checks if the value of the field is `VALUE1`"] #[inline(always)] pub fn is_value1(&self) -> bool { **self == HIBIO1SEL_A::VALUE1 } #[doc = "Checks if the value of the field is `VALUE2`"] #[inline(always)] pub fn is_value2(&self) -> bool { **self == HIBIO1SEL_A::VALUE2 } #[doc = "Checks if the value of the field is `VALUE3`"] #[inline(always)] pub fn is_value3(&self) -> bool { **self == HIBIO1SEL_A::VALUE3 } #[doc = "Checks if the value of the field is `VALUE4`"] #[inline(always)] pub fn is_value4(&self) -> bool { **self == HIBIO1SEL_A::VALUE4 } #[doc = "Checks if the value of the field is `VALUE5`"] #[inline(always)] pub fn is_value5(&self) -> bool { **self == HIBIO1SEL_A::VALUE5 } #[doc = "Checks if the value of the field is `VALUE6`"] #[inline(always)] pub fn is_value6(&self) -> bool { **self == HIBIO1SEL_A::VALUE6 } #[doc = "Checks if the value of the field is `VALUE7`"] #[inline(always)] pub fn is_value7(&self) -> bool { **self == HIBIO1SEL_A::VALUE7 } #[doc = "Checks if the value of the field is `VALUE8`"] #[inline(always)] pub fn is_value8(&self) -> bool { **self == HIBIO1SEL_A::VALUE8 } #[doc = "Checks if the value of the field is `VALUE9`"] #[inline(always)] pub fn is_value9(&self) -> bool { **self == HIBIO1SEL_A::VALUE9 } #[doc = "Checks if the value of the field is `VALUE10`"] #[inline(always)] pub fn is_value10(&self) -> bool { **self == HIBIO1SEL_A::VALUE10 } } impl core::ops::Deref for HIBIO1SEL_R { type Target = crate::FieldReader<u8, HIBIO1SEL_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `HIBIO1SEL` writer - HIB_IO_1 Pin I/O Control (Default WKUP)"] pub struct HIBIO1SEL_W<'a> { w: &'a mut W, } impl<'a> HIBIO1SEL_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: HIBIO1SEL_A) -> &'a mut W { unsafe { self.bits(variant.into()) } } #[doc = "Direct input, No input pull device connected"] #[inline(always)] pub fn value1(self) -> &'a mut W { self.variant(HIBIO1SEL_A::VALUE1) } #[doc = "Direct input, Input pull-down device connected"] #[inline(always)] pub fn value2(self) -> &'a mut W { self.variant(HIBIO1SEL_A::VALUE2) } #[doc = "Direct input, Input pull-up device connected"] #[inline(always)] pub fn value3(self) -> &'a mut W { self.variant(HIBIO1SEL_A::VALUE3) } #[doc = "Push-pull HIB Control output"] #[inline(always)] pub fn value4(self) -> &'a mut W { self.variant(HIBIO1SEL_A::VALUE4) } #[doc = "Push-pull WDT service output"] #[inline(always)] pub fn value5(self) -> &'a mut W { self.variant(HIBIO1SEL_A::VALUE5) } #[doc = "Push-pull GPIO output"] #[inline(always)] pub fn value6(self) -> &'a mut W { self.variant(HIBIO1SEL_A::VALUE6) } #[doc = "Open-drain HIB Control output"] #[inline(always)] pub fn value7(self) -> &'a mut W { self.variant(HIBIO1SEL_A::VALUE7) } #[doc = "Open-drain WDT service output"] #[inline(always)] pub fn value8(self) -> &'a mut W { self.variant(HIBIO1SEL_A::VALUE8) } #[doc = "Open-drain GPIO output"] #[inline(always)] pub fn value9(self) -> &'a mut W { self.variant(HIBIO1SEL_A::VALUE9) } #[doc = "Analog input"] #[inline(always)] pub fn value10(self) -> &'a mut W { self.variant(HIBIO1SEL_A::VALUE10) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x0f << 20)) | ((value as u32 & 0x0f) << 20); self.w } } impl R { #[doc = "Bit 0 - Wake-Up on Pin Event Positive Edge Enable"] #[inline(always)] pub fn wkpep(&self) -> WKPEP_R { WKPEP_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - Wake-up on Pin Event Negative Edge Enable"] #[inline(always)] pub fn wkpen(&self) -> WKPEN_R { WKPEN_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 2 - Wake-up on RTC Event Enable"] #[inline(always)] pub fn rtce(&self) -> RTCE_R { RTCE_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 3 - ULP WDG Alarm Enable"] #[inline(always)] pub fn ulpwdgen(&self) -> ULPWDGEN_R { ULPWDGEN_R::new(((self.bits >> 3) & 0x01) != 0) } #[doc = "Bit 4 - Hibernate Request Value Set"] #[inline(always)] pub fn hib(&self) -> HIB_R { HIB_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bit 6 - fRTC Clock Selection"] #[inline(always)] pub fn rcs(&self) -> RCS_R { RCS_R::new(((self.bits >> 6) & 0x01) != 0) } #[doc = "Bit 7 - fSTDBY Clock Selection"] #[inline(always)] pub fn stdbysel(&self) -> STDBYSEL_R { STDBYSEL_R::new(((self.bits >> 7) & 0x01) != 0) } #[doc = "Bit 8 - Wake-Up from Hibernate Trigger Input Selection"] #[inline(always)] pub fn wkupsel(&self) -> WKUPSEL_R { WKUPSEL_R::new(((self.bits >> 8) & 0x01) != 0) } #[doc = "Bit 10 - General Purpose Input 0 Selection"] #[inline(always)] pub fn gpi0sel(&self) -> GPI0SEL_R { GPI0SEL_R::new(((self.bits >> 10) & 0x01) != 0) } #[doc = "Bit 12 - HIBIO0 Polarity Set"] #[inline(always)] pub fn hibio0pol(&self) -> HIBIO0POL_R { HIBIO0POL_R::new(((self.bits >> 12) & 0x01) != 0) } #[doc = "Bit 13 - HIBIO1 Polarity Set"] #[inline(always)] pub fn hibio1pol(&self) -> HIBIO1POL_R { HIBIO1POL_R::new(((self.bits >> 13) & 0x01) != 0) } #[doc = "Bits 16:19 - HIB_IO_0 Pin I/O Control (default HIBOUT)"] #[inline(always)] pub fn hibio0sel(&self) -> HIBIO0SEL_R { HIBIO0SEL_R::new(((self.bits >> 16) & 0x0f) as u8) } #[doc = "Bits 20:23 - HIB_IO_1 Pin I/O Control (Default WKUP)"] #[inline(always)] pub fn hibio1sel(&self) -> HIBIO1SEL_R { HIBIO1SEL_R::new(((self.bits >> 20) & 0x0f) as u8) } } impl W { #[doc = "Bit 0 - Wake-Up on Pin Event Positive Edge Enable"] #[inline(always)] pub fn wkpep(&mut self) -> WKPEP_W { WKPEP_W { w: self } } #[doc = "Bit 1 - Wake-up on Pin Event Negative Edge Enable"] #[inline(always)] pub fn wkpen(&mut self) -> WKPEN_W { WKPEN_W { w: self } } #[doc = "Bit 2 - Wake-up on RTC Event Enable"] #[inline(always)] pub fn rtce(&mut self) -> RTCE_W { RTCE_W { w: self } } #[doc = "Bit 3 - ULP WDG Alarm Enable"] #[inline(always)] pub fn ulpwdgen(&mut self) -> ULPWDGEN_W { ULPWDGEN_W { w: self } } #[doc = "Bit 4 - Hibernate Request Value Set"] #[inline(always)] pub fn hib(&mut self) -> HIB_W { HIB_W { w: self } } #[doc = "Bit 6 - fRTC Clock Selection"] #[inline(always)] pub fn rcs(&mut self) -> RCS_W { RCS_W { w: self } } #[doc = "Bit 7 - fSTDBY Clock Selection"] #[inline(always)] pub fn stdbysel(&mut self) -> STDBYSEL_W { STDBYSEL_W { w: self } } #[doc = "Bit 8 - Wake-Up from Hibernate Trigger Input Selection"] #[inline(always)] pub fn wkupsel(&mut self) -> WKUPSEL_W { WKUPSEL_W { w: self } } #[doc = "Bit 10 - General Purpose Input 0 Selection"] #[inline(always)] pub fn gpi0sel(&mut self) -> GPI0SEL_W { GPI0SEL_W { w: self } } #[doc = "Bit 12 - HIBIO0 Polarity Set"] #[inline(always)] pub fn hibio0pol(&mut self) -> HIBIO0POL_W { HIBIO0POL_W { w: self } } #[doc = "Bit 13 - HIBIO1 Polarity Set"] #[inline(always)] pub fn hibio1pol(&mut self) -> HIBIO1POL_W { HIBIO1POL_W { w: self } } #[doc = "Bits 16:19 - HIB_IO_0 Pin I/O Control (default HIBOUT)"] #[inline(always)] pub fn hibio0sel(&mut self) -> HIBIO0SEL_W { HIBIO0SEL_W { w: self } } #[doc = "Bits 20:23 - HIB_IO_1 Pin I/O Control (Default WKUP)"] #[inline(always)] pub fn hibio1sel(&mut self) -> HIBIO1SEL_W { HIBIO1SEL_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "Hibernate Domain Control Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [hdcr](index.html) module"] pub struct HDCR_SPEC; impl crate::RegisterSpec for HDCR_SPEC { type Ux = u32; } #[doc = "`read()` method returns [hdcr::R](R) reader structure"] impl crate::Readable for HDCR_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [hdcr::W](W) writer structure"] impl crate::Writable for HDCR_SPEC { type Writer = W; } #[doc = "`reset()` method sets HDCR to value 0x000c_2000"] impl crate::Resettable for HDCR_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0x000c_2000 } }
30.114441
418
0.564694
ccaa4460c5b8a181efa6802582ed3665c64ee76f
7,244
//! Debug utils for WebAssembly using Cranelift. #![allow(clippy::cast_ptr_alignment)] use alloc::string::String; use alloc::vec::Vec; use cranelift_codegen::isa::TargetFrontendConfig; use faerie::{Artifact, Decl}; use failure::Error; #[cfg(not(feature = "std"))] use hashbrown::{hash_map, HashMap, HashSet}; use more_asserts::assert_gt; #[cfg(feature = "std")] use std::collections::{hash_map, HashMap, HashSet}; use target_lexicon::{BinaryFormat, Triple}; use wasmtime_environ::{ModuleAddressMap, ModuleVmctxInfo, ValueLabelsRanges}; pub use crate::read_debuginfo::{read_debuginfo, DebugInfoData, WasmFileInfo}; pub use crate::transform::transform_dwarf; pub use crate::write_debuginfo::{emit_dwarf, ResolvedSymbol, SymbolResolver}; mod gc; mod read_debuginfo; mod transform; mod write_debuginfo; extern crate alloc; struct FunctionRelocResolver {} impl SymbolResolver for FunctionRelocResolver { fn resolve_symbol(&self, symbol: usize, addend: i64) -> ResolvedSymbol { let name = format!("_wasm_function_{}", symbol); ResolvedSymbol::Reloc { name, addend } } } pub fn emit_debugsections( obj: &mut Artifact, vmctx_info: &ModuleVmctxInfo, target_config: &TargetFrontendConfig, debuginfo_data: &DebugInfoData, at: &ModuleAddressMap, ranges: &ValueLabelsRanges, ) -> Result<(), Error> { let resolver = FunctionRelocResolver {}; let dwarf = transform_dwarf(target_config, debuginfo_data, at, vmctx_info, ranges)?; emit_dwarf(obj, dwarf, &resolver)?; Ok(()) } struct ImageRelocResolver<'a> { func_offsets: &'a Vec<u64>, } impl<'a> SymbolResolver for ImageRelocResolver<'a> { fn resolve_symbol(&self, symbol: usize, addend: i64) -> ResolvedSymbol { let func_start = self.func_offsets[symbol]; ResolvedSymbol::PhysicalAddress(func_start + addend as u64) } } pub fn emit_debugsections_image( triple: Triple, target_config: &TargetFrontendConfig, debuginfo_data: &DebugInfoData, vmctx_info: &ModuleVmctxInfo, at: &ModuleAddressMap, ranges: &ValueLabelsRanges, funcs: &[(*const u8, usize)], ) -> Result<Vec<u8>, Error> { let func_offsets = &funcs .iter() .map(|(ptr, _)| *ptr as u64) .collect::<Vec<u64>>(); let mut obj = Artifact::new(triple, String::from("module")); let resolver = ImageRelocResolver { func_offsets }; let dwarf = transform_dwarf(target_config, debuginfo_data, at, vmctx_info, ranges)?; // Assuming all functions in the same code block, looking min/max of its range. assert_gt!(funcs.len(), 0); let mut segment_body: (usize, usize) = (!0, 0); for (body_ptr, body_len) in funcs { segment_body.0 = ::core::cmp::min(segment_body.0, *body_ptr as usize); segment_body.1 = ::core::cmp::max(segment_body.1, *body_ptr as usize + body_len); } let segment_body = (segment_body.0 as *const u8, segment_body.1 - segment_body.0); let body = unsafe { ::core::slice::from_raw_parts(segment_body.0, segment_body.1) }; obj.declare_with("all", Decl::function(), body.to_vec())?; emit_dwarf(&mut obj, dwarf, &resolver)?; // LLDB is too "magical" about mach-o, generating elf let mut bytes = obj.emit_as(BinaryFormat::Elf)?; // elf is still missing details... convert_faerie_elf_to_loadable_file(&mut bytes, segment_body.0); // let mut file = ::std::fs::File::create(::std::path::Path::new("test.o")).expect("file"); // ::std::io::Write::write(&mut file, &bytes).expect("write"); Ok(bytes) } fn convert_faerie_elf_to_loadable_file(bytes: &mut Vec<u8>, code_ptr: *const u8) { use std::ffi::CStr; use std::os::raw::c_char; assert!( bytes[0x4] == 2 && bytes[0x5] == 1, "bits and endianess in .ELF" ); let e_phoff = unsafe { *(bytes.as_ptr().offset(0x20) as *const u64) }; let e_phnum = unsafe { *(bytes.as_ptr().offset(0x38) as *const u16) }; assert!( e_phoff == 0 && e_phnum == 0, "program header table is empty" ); let e_phentsize = unsafe { *(bytes.as_ptr().offset(0x36) as *const u16) }; assert_eq!(e_phentsize, 0x38, "size of ph"); let e_shentsize = unsafe { *(bytes.as_ptr().offset(0x3A) as *const u16) }; assert_eq!(e_shentsize, 0x40, "size of sh"); let e_shoff = unsafe { *(bytes.as_ptr().offset(0x28) as *const u64) }; let e_shnum = unsafe { *(bytes.as_ptr().offset(0x3C) as *const u16) }; let mut shstrtab_off = 0; let mut segment = None; for i in 0..e_shnum { let off = e_shoff as isize + i as isize * e_shentsize as isize; let sh_type = unsafe { *(bytes.as_ptr().offset(off + 0x4) as *const u32) }; if sh_type == /* SHT_SYMTAB */ 3 { shstrtab_off = unsafe { *(bytes.as_ptr().offset(off + 0x18) as *const u64) }; } if sh_type != /* SHT_PROGBITS */ 1 { continue; } // It is a SHT_PROGBITS, but we need to check sh_name to ensure it is our function let sh_name = unsafe { let sh_name_off = *(bytes.as_ptr().offset(off) as *const u32); CStr::from_ptr( bytes .as_ptr() .offset((shstrtab_off + sh_name_off as u64) as isize) as *const c_char, ) .to_str() .expect("name") }; if sh_name != ".text.all" { continue; } assert!(segment.is_none()); // Functions was added at emit_debugsections_image as .text.all. // Patch vaddr, and save file location and its size. unsafe { *(bytes.as_ptr().offset(off + 0x10) as *mut u64) = code_ptr as u64; }; let sh_offset = unsafe { *(bytes.as_ptr().offset(off + 0x18) as *const u64) }; let sh_size = unsafe { *(bytes.as_ptr().offset(off + 0x20) as *const u64) }; segment = Some((sh_offset, code_ptr, sh_size)); // Fix name too: cut it to just ".text" unsafe { let sh_name_off = *(bytes.as_ptr().offset(off) as *const u32); bytes[(shstrtab_off + sh_name_off as u64) as usize + ".text".len()] = 0; } } // LLDB wants segment with virtual address set, placing them at the end of ELF. let ph_off = bytes.len(); if let Some((sh_offset, v_offset, sh_size)) = segment { let segment = vec![0; 0x38]; unsafe { *(segment.as_ptr() as *mut u32) = /* PT_LOAD */ 0x1; *(segment.as_ptr().offset(0x8) as *mut u64) = sh_offset; *(segment.as_ptr().offset(0x10) as *mut u64) = v_offset as u64; *(segment.as_ptr().offset(0x18) as *mut u64) = v_offset as u64; *(segment.as_ptr().offset(0x20) as *mut u64) = sh_size; *(segment.as_ptr().offset(0x28) as *mut u64) = sh_size; } bytes.extend_from_slice(&segment); } else { unreachable!(); } // It is somewhat loadable ELF file at this moment. // Update e_flags, e_phoff and e_phnum. unsafe { *(bytes.as_ptr().offset(0x10) as *mut u16) = /* ET_DYN */ 3; *(bytes.as_ptr().offset(0x20) as *mut u64) = ph_off as u64; *(bytes.as_ptr().offset(0x38) as *mut u16) = 1 as u16; } }
37.533679
95
0.621618
7ac9b72883c8043199c1582c84b397865300de5f
1,410
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 //! The purpose of this crate is to offer a single source of truth for the definitions of shared //! constants within the codebase. This is useful because many different components within //! Aptos often require access to global constant definitions (e.g., Safety Rules, //! Key Manager, and Secure Storage). To avoid duplicating these definitions across crates //! (and better allow these constants to be updated in a single location), we define them here. #![forbid(unsafe_code)] /// Definitions of global cryptographic keys (e.g., as held in secure storage) pub const APTOS_ROOT_KEY: &str = "aptos_root"; pub const CONSENSUS_KEY: &str = "consensus"; pub const EXECUTION_KEY: &str = "execution"; pub const FULLNODE_NETWORK_KEY: &str = "fullnode_network"; pub const TREASURY_COMPLIANCE_KEY: &str = "treasury_compliance"; pub const OPERATOR_ACCOUNT: &str = "operator_account"; pub const OPERATOR_KEY: &str = "operator"; pub const OWNER_ACCOUNT: &str = "owner_account"; pub const OWNER_KEY: &str = "owner"; pub const VALIDATOR_NETWORK_KEY: &str = "validator_network"; /// Definitions of global data items (e.g., as held in secure storage) pub const SAFETY_DATA: &str = "safety_data"; pub const WAYPOINT: &str = "waypoint"; pub const GENESIS_WAYPOINT: &str = "genesis-waypoint"; pub const MOVE_MODULES: &str = "move-modules";
50.357143
96
0.756738
620b2f225459fa5b32a16273de1bdd2539c67212
12,087
use std::io::BufRead; use bitvec::prelude::*; const DATA_FILE: &str = "16.txt"; pub fn data<P: AsRef<std::path::Path>>(data_dir: P) -> impl Iterator<Item = String> { let data_file = data_dir.as_ref().join(DATA_FILE); let data = std::io::BufReader::new(std::fs::File::open(&data_file).unwrap()); data.lines().map(|s_res| s_res.unwrap()) } pub fn parse<I: Iterator<Item = String>>(mut data: I) -> Vec<u8> { let data = data.next().unwrap(); data.as_bytes() .chunks(2) .map(|b| std::str::from_utf8(b).unwrap()) .map(|s| u8::from_str_radix(s, 16).unwrap()) .collect() } #[derive(Debug, Clone)] struct Packet { version: u8, symbol: Symbol, } impl Packet { fn parse_group(bits_in: &BitSlice<Msb0, u8>) -> (Vec<Packet>, usize) { let mut out: Vec<Packet> = Vec::with_capacity(2); let mut offset = 0; while offset < bits_in.len() && bits_in[offset..].any() { let (packet, size) = Packet::parse(&bits_in[offset..]); out.push(packet); //println!("Parsed packet at offset {}, moving to next packet at {}", offset, offset + size); offset += size; } (out, offset) } fn parse_group_n(bits_in: &BitSlice<Msb0, u8>, max_packets: usize) -> (Vec<Packet>, usize) { let mut out: Vec<Packet> = Vec::with_capacity(max_packets); let mut offset = 0; while offset < bits_in.len() && out.len() < max_packets { let (packet, size) = Packet::parse(&bits_in[offset..]); out.push(packet); offset += size; } (out, offset) } fn parse(bits_in: &BitSlice<Msb0, u8>) -> (Packet, usize) { //println!("Parsing packet from bits {:?}", bits_in); let mut count = 0; let version = bits_in[count..count + 3].load_be::<u8>(); count += 3; //println!("Found packet version {}", version); let (symbol, size) = Symbol::parse(&bits_in[count..]); count += size; (Packet { version, symbol }, count) } fn eval(&self) -> u64 { self.symbol.eval() } } #[derive(Debug, Clone)] enum Symbol { Literal(u64), OperatorSum(Vec<Packet>), OperatorProduct(Vec<Packet>), OperatorMin(Vec<Packet>), OperatorMax(Vec<Packet>), OperatorGt(Vec<Packet>), OperatorLt(Vec<Packet>), OperatorEq(Vec<Packet>), } impl Symbol { fn parse(bits_in: &BitSlice<Msb0, u8>) -> (Self, usize) { let mut count = 0; let (type_id_bits, bits_in) = bits_in.split_at(3); count += 3; let type_id = type_id_bits.load_be::<u8>(); //println!("\tFound packet type {:#x}", type_id); match type_id { 0x4 => { let (sym, len) = Symbol::parse_literal(bits_in); (sym, count + len) } op => { let (sym, len) = Symbol::parse_op(bits_in, op); (sym, count + len) } } } fn parse_literal(bits_in: &BitSlice<Msb0, u8>) -> (Self, usize) { //println!("\tparse_literal({:?})", bits_in); let (literal, count) = bits_in .chunks_exact(5) //.inspect(|b| println!("Parsing literal chunk {:b}", b)) .map(|b| b.load_be::<u8>()) //.inspect(|b| println!("Chunk became byte {:05b}", b)) .enumerate() .scan((true, 0u64), |(do_next, acc), (count, x)| { if *do_next { *do_next = (x & 0b010000) > 0; *acc = (*acc << 4) | (x & 0b01111) as u64; Some((*acc, (count + 1) * 5)) } else { None } }) //.inspect(|l| println!("Literal chunk became {:04b} [{} bits long]", l.0, l.1)) .last() .unwrap(); //println!("\tFound literal {:016b}, consuming {} bits", literal, count); (Symbol::Literal(literal), count) } fn parse_op(bits_in: &BitSlice<Msb0, u8>, op: u8) -> (Self, usize) { let mut count = 0; let length_type_id = bits_in[0]; count += 1; let (out, count) = if length_type_id { let packet_count_bits = &bits_in[count..count + 11]; count += 11; let packet_count = packet_count_bits.load_be::<usize>(); //println!("\tPacket count: {} packets", packet_count); let (out, more_count) = Packet::parse_group_n(&bits_in[count..], packet_count); count += more_count; //println!("\tFound op {} with {} children, consuming {} bits", op, out.len(), count); (out, count) } else { let payload_length_bits = &bits_in[count..count + 15]; //println!("\tPayload length bits: {:b}", payload_length_bits); count += 15; let payload_length = payload_length_bits.load_be::<usize>(); //println!("\tPayload length: {} bits", payload_length); let (out, _more_count) = Packet::parse_group(&bits_in[count..count + payload_length]); // TODO: this is probably off by one right now count += payload_length; // TODO: assert relationship between more_count and payload_length //println!("\tFound op {} with {} children, consuming {} bits", op, out.len(), count); (out, count) }; match op { 0 => (Symbol::OperatorSum(out), count), 1 => (Symbol::OperatorProduct(out), count), 2 => (Symbol::OperatorMin(out), count), 3 => (Symbol::OperatorMax(out), count), 5 => (Symbol::OperatorGt(out), count), 6 => (Symbol::OperatorLt(out), count), 7 => (Symbol::OperatorEq(out), count), _ => unreachable!(), } } fn children(&self) -> Vec<Packet> { match self { Symbol::Literal(_) => Vec::new(), Symbol::OperatorSum(children) => children.clone(), Symbol::OperatorProduct(children) => children.clone(), Symbol::OperatorMin(children) => children.clone(), Symbol::OperatorMax(children) => children.clone(), Symbol::OperatorGt(children) => children.clone(), Symbol::OperatorLt(children) => children.clone(), Symbol::OperatorEq(children) => children.clone(), } } fn eval(&self) -> u64 { let val = match self { Symbol::Literal(u) => *u as u64, Symbol::OperatorSum(children) => children.iter().map(|p| p.eval()).sum(), Symbol::OperatorProduct(children) => children .iter() .map(|p| p.eval()) .reduce(|accum, item| item * accum) .unwrap(), Symbol::OperatorMin(children) => children.iter().map(|p| p.eval()).min().unwrap(), Symbol::OperatorMax(children) => children.iter().map(|p| p.eval()).max().unwrap(), Symbol::OperatorGt(children) => (children[0].eval() > children[1].eval()) as u64, Symbol::OperatorLt(children) => (children[0].eval() < children[1].eval()) as u64, Symbol::OperatorEq(children) => (children[0].eval() == children[1].eval()) as u64, }; //println!("eval({:?}) => {}", self, val); val } } fn version_sum_packet(packet: &Packet) -> u32 { let mut sum = packet.version as u32; for child in packet.symbol.children() { sum += version_sum_packet(&child) as u32; } sum } pub fn star1(data: &[u8]) -> u32 { //println!("Scoring {:x?}", data); let bits = BitSlice::<Msb0, u8>::from_slice(data).unwrap(); let (packets, _) = Packet::parse_group(bits); let mut ver_sum: u32 = 0; for packet in packets { //println!("Packet: {:?}", packet); ver_sum += version_sum_packet(&packet); } ver_sum } pub fn star2(data: &[u8]) -> u64 { //println!("Scoring {:x?}", data); let bits = BitSlice::<Msb0, u8>::from_slice(data).unwrap(); let (packets, _) = Packet::parse_group(bits); if let Some(packet) = packets.into_iter().next() { packet.eval() } else { unreachable!(); } } #[cfg(test)] mod tests { use super::*; const SAMPLE_DATA_1: ([&'static str; 1], u32) = (["D2FE28"], 6); const SAMPLE_DATA_2: ([&'static str; 1], u32) = (["38006F45291200"], 9); const SAMPLE_DATA_3: ([&'static str; 1], u32) = (["EE00D40C823060"], 14); const SAMPLE_DATA_4: ([&'static str; 1], u32) = (["8A004A801A8002F478"], 16); const SAMPLE_DATA_5: ([&'static str; 1], u32) = (["620080001611562C8802118E34"], 12); const SAMPLE_DATA_6: ([&'static str; 1], u32) = (["C0015000016115A2E0802F182340"], 23); const SAMPLE_DATA_7: ([&'static str; 1], u32) = (["A0016C880162017C3686B18A3D4780"], 31); #[test] fn test_star1() { //println!("\n==== Test 1 ===="); let data = parse(SAMPLE_DATA_1.0.iter().map(|r| r.to_string())); assert_eq!(star1(&data), SAMPLE_DATA_1.1); //println!("\n==== Test 2 ===="); let data = parse(SAMPLE_DATA_2.0.iter().map(|r| r.to_string())); assert_eq!(star1(&data), SAMPLE_DATA_2.1); //println!("\n==== Test 3 ===="); let data = parse(SAMPLE_DATA_3.0.iter().map(|r| r.to_string())); assert_eq!(star1(&data), SAMPLE_DATA_3.1); //println!("\n==== Test 4 ===="); let data = parse(SAMPLE_DATA_4.0.iter().map(|r| r.to_string())); assert_eq!(star1(&data), SAMPLE_DATA_4.1); //println!("\n==== Test 5 ===="); let data = parse(SAMPLE_DATA_5.0.iter().map(|r| r.to_string())); assert_eq!(star1(&data), SAMPLE_DATA_5.1); //println!("\n==== Test 6 ===="); let data = parse(SAMPLE_DATA_6.0.iter().map(|r| r.to_string())); assert_eq!(star1(&data), SAMPLE_DATA_6.1); //println!("\n==== Test 7 ===="); let data = parse(SAMPLE_DATA_7.0.iter().map(|r| r.to_string())); assert_eq!(star1(&data), SAMPLE_DATA_7.1); } const SAMPLE_DATA_A: ([&'static str; 1], u64) = (["C200B40A82"], 3); const SAMPLE_DATA_B: ([&'static str; 1], u64) = (["04005AC33890"], 54); const SAMPLE_DATA_C: ([&'static str; 1], u64) = (["880086C3E88112"], 7); const SAMPLE_DATA_D: ([&'static str; 1], u64) = (["CE00C43D881120"], 9); const SAMPLE_DATA_E: ([&'static str; 1], u64) = (["D8005AC2A8F0"], 1); const SAMPLE_DATA_F: ([&'static str; 1], u64) = (["F600BC2D8F"], 0); const SAMPLE_DATA_G: ([&'static str; 1], u64) = (["9C005AC2F8F0"], 0); const SAMPLE_DATA_H: ([&'static str; 1], u64) = (["9C0141080250320F1802104A08"], 1); #[test] fn test_star2() { //println!("\n==== Test A ===="); let data = parse(SAMPLE_DATA_A.0.iter().map(|r| r.to_string())); assert_eq!(star2(&data), SAMPLE_DATA_A.1); //println!("\n==== Test B ===="); let data = parse(SAMPLE_DATA_B.0.iter().map(|r| r.to_string())); assert_eq!(star2(&data), SAMPLE_DATA_B.1); //println!("\n==== Test C ===="); let data = parse(SAMPLE_DATA_C.0.iter().map(|r| r.to_string())); assert_eq!(star2(&data), SAMPLE_DATA_C.1); //println!("\n==== Test D ===="); let data = parse(SAMPLE_DATA_D.0.iter().map(|r| r.to_string())); assert_eq!(star2(&data), SAMPLE_DATA_D.1); //println!("\n==== Test E ===="); let data = parse(SAMPLE_DATA_E.0.iter().map(|r| r.to_string())); assert_eq!(star2(&data), SAMPLE_DATA_E.1); //println!("\n==== Test F ===="); let data = parse(SAMPLE_DATA_F.0.iter().map(|r| r.to_string())); assert_eq!(star2(&data), SAMPLE_DATA_F.1); //println!("\n==== Test G ===="); let data = parse(SAMPLE_DATA_G.0.iter().map(|r| r.to_string())); assert_eq!(star2(&data), SAMPLE_DATA_G.1); //println!("\n==== Test H ===="); let data = parse(SAMPLE_DATA_H.0.iter().map(|r| r.to_string())); assert_eq!(star2(&data), SAMPLE_DATA_H.1); } }
36.406627
105
0.535617
67413e51cc70f2ac8cc9366735c16d4e147611a4
2,835
use std::os::raw::{c_char, c_void}; use wayland_sys::common::*; const NULLPTR: *const c_void = 0 as *const c_void; static mut types_null: [*const wl_interface; 7] = [ NULLPTR as *const wl_interface, NULLPTR as *const wl_interface, NULLPTR as *const wl_interface, NULLPTR as *const wl_interface, NULLPTR as *const wl_interface, NULLPTR as *const wl_interface, NULLPTR as *const wl_interface, ]; static mut wp_presentation_requests_feedback_types: [*const wl_interface; 2] = [ unsafe { &wl_surface_interface as *const wl_interface }, unsafe { &wp_presentation_feedback_interface as *const wl_interface }, ]; pub static mut wp_presentation_requests: [wl_message; 2] = [ wl_message { name: b"destroy\0" as *const u8 as *const c_char, signature: b"\0" as *const u8 as *const c_char, types: unsafe { &types_null as *const _ }, }, wl_message { name: b"feedback\0" as *const u8 as *const c_char, signature: b"on\0" as *const u8 as *const c_char, types: unsafe { &wp_presentation_requests_feedback_types as *const _ }, }, ]; pub static mut wp_presentation_events: [wl_message; 1] = [wl_message { name: b"clock_id\0" as *const u8 as *const c_char, signature: b"u\0" as *const u8 as *const c_char, types: unsafe { &types_null as *const _ }, }]; pub static mut wp_presentation_interface: wl_interface = wl_interface { name: b"wp_presentation\0" as *const u8 as *const c_char, version: 1, request_count: 2, requests: unsafe { &wp_presentation_requests as *const _ }, event_count: 1, events: unsafe { &wp_presentation_events as *const _ }, }; static mut wp_presentation_feedback_events_sync_output_types: [*const wl_interface; 1] = [unsafe { &wl_output_interface as *const wl_interface }]; pub static mut wp_presentation_feedback_events: [wl_message; 3] = [ wl_message { name: b"sync_output\0" as *const u8 as *const c_char, signature: b"o\0" as *const u8 as *const c_char, types: unsafe { &wp_presentation_feedback_events_sync_output_types as *const _ }, }, wl_message { name: b"presented\0" as *const u8 as *const c_char, signature: b"uuuuuuu\0" as *const u8 as *const c_char, types: unsafe { &types_null as *const _ }, }, wl_message { name: b"discarded\0" as *const u8 as *const c_char, signature: b"\0" as *const u8 as *const c_char, types: unsafe { &types_null as *const _ }, }, ]; pub static mut wp_presentation_feedback_interface: wl_interface = wl_interface { name: b"wp_presentation_feedback\0" as *const u8 as *const c_char, version: 1, request_count: 0, requests: NULLPTR as *const wl_message, event_count: 3, events: unsafe { &wp_presentation_feedback_events as *const _ }, };
41.086957
89
0.681129
724a312ea79e0a7427df2856805485c8cc146802
6,173
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use core::cell::*; use core::default::Default; use std::mem::drop; #[test] fn smoketest_cell() { let x = Cell::new(10); assert!(x == Cell::new(10)); assert!(x.get() == 10); x.set(20); assert!(x == Cell::new(20)); assert!(x.get() == 20); let y = Cell::new((30, 40)); assert!(y == Cell::new((30, 40))); assert!(y.get() == (30, 40)); } #[test] fn cell_has_sensible_show() { let x = Cell::new("foo bar"); assert!(format!("{:?}", x).contains(x.get())); x.set("baz qux"); assert!(format!("{:?}", x).contains(x.get())); } #[test] fn ref_and_refmut_have_sensible_show() { let refcell = RefCell::new("foo"); let refcell_refmut = refcell.borrow_mut(); assert!(format!("{:?}", refcell_refmut).contains("foo")); drop(refcell_refmut); let refcell_ref = refcell.borrow(); assert!(format!("{:?}", refcell_ref).contains("foo")); drop(refcell_ref); } #[test] fn double_imm_borrow() { let x = RefCell::new(0); let _b1 = x.borrow(); x.borrow(); } #[test] fn no_mut_then_imm_borrow() { let x = RefCell::new(0); let _b1 = x.borrow_mut(); assert!(x.try_borrow().is_err()); } #[test] fn no_imm_then_borrow_mut() { let x = RefCell::new(0); let _b1 = x.borrow(); assert!(x.try_borrow_mut().is_err()); } #[test] fn no_double_borrow_mut() { let x = RefCell::new(0); assert!(x.try_borrow().is_ok()); let _b1 = x.borrow_mut(); assert!(x.try_borrow().is_err()); } #[test] fn imm_release_borrow_mut() { let x = RefCell::new(0); { let _b1 = x.borrow(); } x.borrow_mut(); } #[test] fn mut_release_borrow_mut() { let x = RefCell::new(0); { let _b1 = x.borrow_mut(); } x.borrow(); } #[test] fn double_borrow_single_release_no_borrow_mut() { let x = RefCell::new(0); let _b1 = x.borrow(); { let _b2 = x.borrow(); } assert!(x.try_borrow().is_ok()); assert!(x.try_borrow_mut().is_err()); } #[test] #[should_panic] fn discard_doesnt_unborrow() { let x = RefCell::new(0); let _b = x.borrow(); let _ = _b; let _b = x.borrow_mut(); } #[test] fn ref_clone_updates_flag() { let x = RefCell::new(0); { let b1 = x.borrow(); assert!(x.try_borrow().is_ok()); assert!(x.try_borrow_mut().is_err()); { let _b2 = Ref::clone(&b1); assert!(x.try_borrow().is_ok()); assert!(x.try_borrow_mut().is_err()); } assert!(x.try_borrow().is_ok()); assert!(x.try_borrow_mut().is_err()); } assert!(x.try_borrow().is_ok()); assert!(x.try_borrow_mut().is_ok()); } #[test] fn ref_map_does_not_update_flag() { let x = RefCell::new(Some(5)); { let b1: Ref<Option<u32>> = x.borrow(); assert!(x.try_borrow().is_ok()); assert!(x.try_borrow_mut().is_err()); { let b2: Ref<u32> = Ref::map(b1, |o| o.as_ref().unwrap()); assert_eq!(*b2, 5); assert!(x.try_borrow().is_ok()); assert!(x.try_borrow_mut().is_err()); } assert!(x.try_borrow().is_ok()); assert!(x.try_borrow_mut().is_ok()); } assert!(x.try_borrow().is_ok()); assert!(x.try_borrow_mut().is_ok()); } #[test] fn ref_map_accessor() { struct X(RefCell<(u32, char)>); impl X { fn accessor(&self) -> Ref<u32> { Ref::map(self.0.borrow(), |tuple| &tuple.0) } } let x = X(RefCell::new((7, 'z'))); let d: Ref<u32> = x.accessor(); assert_eq!(*d, 7); } #[test] fn ref_mut_map_accessor() { struct X(RefCell<(u32, char)>); impl X { fn accessor(&self) -> RefMut<u32> { RefMut::map(self.0.borrow_mut(), |tuple| &mut tuple.0) } } let x = X(RefCell::new((7, 'z'))); { let mut d: RefMut<u32> = x.accessor(); assert_eq!(*d, 7); *d += 1; } assert_eq!(*x.0.borrow(), (8, 'z')); } #[test] fn as_ptr() { let c1: Cell<usize> = Cell::new(0); c1.set(1); assert_eq!(1, unsafe { *c1.as_ptr() }); let c2: Cell<usize> = Cell::new(0); unsafe { *c2.as_ptr() = 1; } assert_eq!(1, c2.get()); let r1: RefCell<usize> = RefCell::new(0); *r1.borrow_mut() = 1; assert_eq!(1, unsafe { *r1.as_ptr() }); let r2: RefCell<usize> = RefCell::new(0); unsafe { *r2.as_ptr() = 1; } assert_eq!(1, *r2.borrow()); } #[test] fn cell_default() { let cell: Cell<u32> = Default::default(); assert_eq!(0, cell.get()); } #[test] fn refcell_default() { let cell: RefCell<u64> = Default::default(); assert_eq!(0, *cell.borrow()); } #[test] fn unsafe_cell_unsized() { let cell: &UnsafeCell<[i32]> = &UnsafeCell::new([1, 2, 3]); { let val: &mut [i32] = unsafe { &mut *cell.get() }; val[0] = 4; val[2] = 5; } let comp: &mut [i32] = &mut [4, 2, 5]; assert_eq!(unsafe { &mut *cell.get() }, comp); } #[test] fn refcell_unsized() { let cell: &RefCell<[i32]> = &RefCell::new([1, 2, 3]); { let b = &mut *cell.borrow_mut(); b[0] = 4; b[2] = 5; } let comp: &mut [i32] = &mut [4, 2, 5]; assert_eq!(&*cell.borrow(), comp); } #[test] fn refcell_ref_coercion() { let cell: RefCell<[i32; 3]> = RefCell::new([1, 2, 3]); { let mut cellref: RefMut<[i32; 3]> = cell.borrow_mut(); cellref[0] = 4; let mut coerced: RefMut<[i32]> = cellref; coerced[2] = 5; } { let comp: &mut [i32] = &mut [4, 2, 5]; let cellref: Ref<[i32; 3]> = cell.borrow(); assert_eq!(&*cellref, comp); let coerced: Ref<[i32]> = cellref; assert_eq!(&*coerced, comp); } }
23.833977
69
0.54463
672903bd29041073aa0cf5711801f2925120115d
35,911
use nom::number::streaming as num; use nom::{error::ParseError, IResult}; use std::mem::MaybeUninit; #[derive(Debug, PartialEq)] #[non_exhaustive] pub enum DecodeError<'a> { Nom(nom::error::Error<&'a [u8]>), Malformed, } pub type DResult<'a, T> = IResult<&'a [u8], T, DecodeError<'a>>; pub trait Decode: Sized { fn decode(buf: &[u8]) -> DResult<'_, Self>; fn decode_many<const LEN: usize>(mut buf: &[u8]) -> DResult<'_, [Self; LEN]> { unsafe { let mut ret: [MaybeUninit<Self>; LEN] = MaybeUninit::uninit().assume_init(); for (i, elem) in ret.iter_mut().enumerate() { let (rem, val) = match Self::decode(buf) { Ok(res) => res, Err(e) => { return match e { nom::Err::Incomplete(needed) => { if i == LEN - 1 { Err(nom::Err::Incomplete(needed)) } else { Err(nom::Err::Incomplete(nom::Needed::Unknown)) } } _ => Err(e), }; } }; buf = rem; *elem = MaybeUninit::new(val); } Ok((buf, transmute(ret))) } } } pub trait DecodeBe: Sized { fn decode_be(buf: &[u8]) -> DResult<'_, Self>; fn decode_many_be<const LEN: usize>(mut buf: &[u8]) -> DResult<'_, [Self; LEN]> { unsafe { let mut ret: [MaybeUninit<Self>; LEN] = MaybeUninit::uninit().assume_init(); for (i, elem) in ret.iter_mut().enumerate() { let (rem, val) = match Self::decode_be(buf) { Ok(res) => res, Err(e) => { return match e { nom::Err::Incomplete(needed) => { if i == LEN - 1 { Err(nom::Err::Incomplete(needed)) } else { Err(nom::Err::Incomplete(nom::Needed::Unknown)) } } _ => Err(e), }; } }; buf = rem; *elem = MaybeUninit::new(val); } Ok((buf, transmute(ret))) } } } pub trait DecodeLe: Sized { fn decode_le(buf: &[u8]) -> DResult<'_, Self>; fn decode_many_le<const LEN: usize>(mut buf: &[u8]) -> DResult<'_, [Self; LEN]> { unsafe { let mut ret: [MaybeUninit<Self>; LEN] = MaybeUninit::uninit().assume_init(); for (i, elem) in ret.iter_mut().enumerate() { let (rem, val) = match Self::decode_le(buf) { Ok(res) => res, Err(e) => { return match e { nom::Err::Incomplete(needed) => { if i == LEN - 1 { Err(nom::Err::Incomplete(needed)) } else { Err(nom::Err::Incomplete(nom::Needed::Unknown)) } } _ => Err(e), }; } }; buf = rem; *elem = MaybeUninit::new(val); } Ok((buf, transmute(ret))) } } } pub fn decode<D: Decode>(buf: &[u8]) -> DResult<'_, D> { D::decode(buf) } pub fn decode_be<D: DecodeBe>(buf: &[u8]) -> DResult<'_, D> { D::decode_be(buf) } pub fn decode_le<D: DecodeLe>(buf: &[u8]) -> DResult<'_, D> { D::decode_le(buf) } impl<'a> ParseError<&'a [u8]> for DecodeError<'a> { fn from_error_kind(input: &'a [u8], kind: nom::error::ErrorKind) -> Self { Self::Nom(nom::error::Error::from_error_kind(input, kind)) } fn append(_: &'a [u8], _: nom::error::ErrorKind, other: Self) -> Self { other } } impl<'a> From<nom::error::Error<&'a [u8]>> for DecodeError<'a> { fn from(e: nom::error::Error<&'a [u8]>) -> Self { Self::Nom(e) } } impl<D: Decode, const LEN: usize> Decode for [D; LEN] { fn decode(buf: &[u8]) -> DResult<'_, Self> { D::decode_many(buf) } } impl<D: DecodeBe, const LEN: usize> DecodeBe for [D; LEN] { fn decode_be(buf: &[u8]) -> DResult<'_, Self> { D::decode_many_be(buf) } } impl<D: DecodeLe, const LEN: usize> DecodeLe for [D; LEN] { fn decode_le(buf: &[u8]) -> DResult<'_, Self> { D::decode_many_le(buf) } } /// Decodes a type, T, by directly filling the memory it occupies with /// the bytes contained in the in the byte slice, up to the size of the /// resulting type. /// /// # Safety /// Great care must be taken to ensure this function is safe to use on /// any given type, T. In general, it is unsound to decode any arbitrary /// type with this function, but the only necessary condition is that the /// first `std::mem::size_of::<T>()` bytes (agnostic of alignment) are /// guaranteed to constitute a valid instance of type T. Although it is /// possible for this function to be sound in more exotic scenarios, most /// uses of this function should be for built in types, such as integers /// and floating point types (not references!), and arrays, /// `repr(transparent)` types, and `repr(C)` types, consisting entirely /// of built in types; in short, types with well defined layout and which /// have no invalid representations. pub unsafe fn cast<T>(buf: &[u8]) -> DResult<'_, T> { let mut ret: MaybeUninit<T> = MaybeUninit::uninit(); if std::mem::size_of::<T>() != 0 { if buf.len() < std::mem::size_of::<T>() { return Err(nom::Err::Incomplete(nom::Needed::Size( std::num::NonZeroUsize::new_unchecked(std::mem::size_of::<T>() - buf.len()), ))); } let mut buf: &[u8] = &buf[..std::mem::size_of::<T>()]; if std::io::copy( &mut buf, &mut (std::slice::from_raw_parts_mut( std::ptr::addr_of_mut!(ret) as *mut u8, std::mem::size_of::<T>(), )), ) .is_err() { return Err(nom::Err::Incomplete(nom::Needed::Size( std::num::NonZeroUsize::new_unchecked(std::mem::size_of::<T>()), ))); }; } Ok((&buf[std::mem::size_of::<T>()..], transmute(ret))) } impl Decode for u8 { fn decode(buf: &[u8]) -> DResult<'_, Self> { num::u8(buf) } fn decode_many<const LEN: usize>(buf: &[u8]) -> DResult<'_, [Self; LEN]> { unsafe { cast(buf) } } } impl Decode for i8 { fn decode(buf: &[u8]) -> DResult<'_, Self> { num::i8(buf) } fn decode_many<const LEN: usize>(buf: &[u8]) -> DResult<'_, [Self; LEN]> { unsafe { cast(buf) } } } macro_rules! make_decode { ($t:ty, $be_func:ident, $le_func:ident) => { impl DecodeBe for $t { fn decode_be(buf: &[u8]) -> DResult<'_, Self> { num::$be_func(buf) } fn decode_many_be<const LEN: usize>(buf: &[u8]) -> DResult<'_, [Self; LEN]> { unsafe { match cast::<[Self; LEN]>(buf) { Ok((rem, mut ret)) => { if !cfg!(target_endian = "big") { for elem in ret.iter_mut() { *elem = Self::from_be_bytes(elem.to_ne_bytes()); } } Ok((rem, ret)) } Err(e) => Err(e), } } } } impl DecodeLe for $t { fn decode_le(buf: &[u8]) -> DResult<'_, Self> { num::$le_func(buf) } fn decode_many_le<const LEN: usize>(buf: &[u8]) -> DResult<'_, [Self; LEN]> { unsafe { match cast::<[Self; LEN]>(buf) { Ok((rem, mut ret)) => { if !cfg!(target_endian = "little") { for elem in ret.iter_mut() { *elem = Self::from_le_bytes(elem.to_ne_bytes()); } } Ok((rem, ret)) } Err(e) => Err(e), } } } } }; } unsafe fn transmute<T, U>(x: T) -> U { std::ptr::read(std::mem::transmute::<_, *const U>(std::ptr::addr_of!(x))) } make_decode!(u16, be_u16, le_u16); make_decode!(u32, be_u32, le_u32); make_decode!(u64, be_u64, le_u64); make_decode!(u128, be_u128, le_u128); make_decode!(i16, be_i16, le_i16); make_decode!(i32, be_i32, le_i32); make_decode!(i64, be_i64, le_i64); make_decode!(i128, be_i128, le_i128); make_decode!(f32, be_f32, le_f32); make_decode!(f64, be_f64, le_f64); #[cfg(test)] mod test { use super::*; macro_rules! incomplete { () => { nom::Err::Incomplete(nom::Needed::Unknown) }; ($size:expr) => { nom::Err::Incomplete(nom::Needed::Size( std::num::NonZeroUsize::new($size).unwrap(), )) }; } #[test] fn u8_decode() { let buf = &[1, 2, 3, 4][..]; assert_eq!(u8::decode(buf), Ok((&[2, 3, 4][..], 1))); assert_eq!(u8::decode(&buf[1..]), Ok((&[3, 4][..], 2))); assert_eq!(u8::decode(&buf[2..]), Ok((&[4][..], 3))); assert_eq!(u8::decode(&buf[3..]), Ok((&[][..], 4))); assert_eq!(u8::decode(&buf[4..]), Err(incomplete!(1))); } #[test] fn u8_array_decode() { let buf = &[1, 2, 3, 4][..]; assert_eq!(<[u8; 2]>::decode(buf), Ok((&[3, 4][..], [1, 2]))); assert_eq!(<[u8; 2]>::decode(&buf[1..]), Ok((&[4][..], [2, 3]))); assert_eq!(<[u8; 2]>::decode(&buf[2..]), Ok((&[][..], [3, 4]))); assert_eq!(<[u8; 2]>::decode(&buf[3..]), Err(incomplete!(1))); assert_eq!(<[u8; 2]>::decode(&buf[4..]), Err(incomplete!(2))); } #[test] fn u16_decode_be() { let buf = &[1, 2, 3, 4][..]; assert_eq!(u16::decode_be(buf), Ok((&[3, 4][..], 0x0102))); assert_eq!(u16::decode_be(&buf[1..]), Ok((&[4][..], 0x0203))); assert_eq!(u16::decode_be(&buf[2..]), Ok((&[][..], 0x0304))); assert_eq!(u16::decode_be(&buf[3..]), Err(incomplete!(1))); assert_eq!(u16::decode_be(&buf[4..]), Err(incomplete!(2))); } #[test] fn u16_array_decode_be() { let buf = &[1, 2, 3, 4, 5, 6, 7, 8][..]; assert_eq!( <[u16; 2]>::decode_be(buf), Ok((&[5, 6, 7, 8][..], [0x0102, 0x0304])) ); assert_eq!( <[u16; 2]>::decode_be(&buf[1..]), Ok((&[6, 7, 8][..], [0x0203, 0x0405])) ); assert_eq!( <[u16; 2]>::decode_be(&buf[2..]), Ok((&[7, 8][..], [0x0304, 0x0506])) ); assert_eq!( <[u16; 2]>::decode_be(&buf[3..]), Ok((&[8][..], [0x0405, 0x0607])) ); assert_eq!( <[u16; 2]>::decode_be(&buf[4..]), Ok((&[][..], [0x0506, 0x0708])) ); assert_eq!(<[u16; 2]>::decode_be(&buf[5..]), Err(incomplete!(1))); assert_eq!(<[u16; 2]>::decode_be(&buf[6..]), Err(incomplete!(2))); assert_eq!(<[u16; 2]>::decode_be(&buf[7..]), Err(incomplete!(3))); assert_eq!(<[u16; 2]>::decode_be(&buf[8..]), Err(incomplete!(4))); } #[test] fn u16_decode_le() { let buf = &[1, 2, 3, 4][..]; assert_eq!(u16::decode_le(buf), Ok((&[3, 4][..], 0x0201))); assert_eq!(u16::decode_le(&buf[1..]), Ok((&[4][..], 0x0302))); assert_eq!(u16::decode_le(&buf[2..]), Ok((&[][..], 0x0403))); assert_eq!(u16::decode_le(&buf[3..]), Err(incomplete!(1))); assert_eq!(u16::decode_le(&buf[4..]), Err(incomplete!(2))); } #[test] fn u16_array_decode_le() { let buf = &[1, 2, 3, 4, 5, 6, 7, 8][..]; assert_eq!( <[u16; 2]>::decode_le(buf), Ok((&[5, 6, 7, 8][..], [0x0201, 0x0403])) ); assert_eq!( <[u16; 2]>::decode_le(&buf[1..]), Ok((&[6, 7, 8][..], [0x0302, 0x0504])) ); assert_eq!( <[u16; 2]>::decode_le(&buf[2..]), Ok((&[7, 8][..], [0x0403, 0x0605])) ); assert_eq!( <[u16; 2]>::decode_le(&buf[3..]), Ok((&[8][..], [0x0504, 0x0706])) ); assert_eq!( <[u16; 2]>::decode_le(&buf[4..]), Ok((&[][..], [0x0605, 0x0807])) ); assert_eq!(<[u16; 2]>::decode_le(&buf[5..]), Err(incomplete!(1))); assert_eq!(<[u16; 2]>::decode_le(&buf[6..]), Err(incomplete!(2))); assert_eq!(<[u16; 2]>::decode_le(&buf[7..]), Err(incomplete!(3))); assert_eq!(<[u16; 2]>::decode_le(&buf[8..]), Err(incomplete!(4))); } #[test] fn u32_decode_be() { let buf = &[1, 2, 3, 4, 5, 6][..]; assert_eq!(u32::decode_be(buf), Ok((&[5, 6][..], 0x01020304))); assert_eq!(u32::decode_be(&buf[1..]), Ok((&[6][..], 0x02030405))); assert_eq!(u32::decode_be(&buf[2..]), Ok((&[][..], 0x03040506))); assert_eq!(u32::decode_be(&buf[3..]), Err(incomplete!(1))); assert_eq!(u32::decode_be(&buf[4..]), Err(incomplete!(2))); assert_eq!(u32::decode_be(&buf[5..]), Err(incomplete!(3))); assert_eq!(u32::decode_be(&buf[6..]), Err(incomplete!(4))); } #[test] fn u32_array_decode_be() { let buf = &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10][..]; assert_eq!( <[u32; 2]>::decode_be(buf), Ok((&[9, 10][..], [0x01020304, 0x05060708])) ); assert_eq!( <[u32; 2]>::decode_be(&buf[1..]), Ok((&[10][..], [0x02030405, 0x06070809])) ); assert_eq!( <[u32; 2]>::decode_be(&buf[2..]), Ok((&[][..], [0x03040506, 0x0708090A])) ); assert_eq!(<[u32; 2]>::decode_be(&buf[3..]), Err(incomplete!(1))); assert_eq!(<[u32; 2]>::decode_be(&buf[4..]), Err(incomplete!(2))); assert_eq!(<[u32; 2]>::decode_be(&buf[5..]), Err(incomplete!(3))); assert_eq!(<[u32; 2]>::decode_be(&buf[6..]), Err(incomplete!(4))); assert_eq!(<[u32; 2]>::decode_be(&buf[7..]), Err(incomplete!(5))); assert_eq!(<[u32; 2]>::decode_be(&buf[8..]), Err(incomplete!(6))); assert_eq!(<[u32; 2]>::decode_be(&buf[9..]), Err(incomplete!(7))); assert_eq!(<[u32; 2]>::decode_be(&buf[10..]), Err(incomplete!(8))); } #[test] fn u32_decode_le() { let buf = &[1, 2, 3, 4, 5, 6][..]; assert_eq!(u32::decode_le(buf), Ok((&[5, 6][..], 0x04030201))); assert_eq!(u32::decode_le(&buf[1..]), Ok((&[6][..], 0x05040302))); assert_eq!(u32::decode_le(&buf[2..]), Ok((&[][..], 0x06050403))); assert_eq!(u32::decode_le(&buf[3..]), Err(incomplete!(1))); assert_eq!(u32::decode_le(&buf[4..]), Err(incomplete!(2))); assert_eq!(u32::decode_le(&buf[5..]), Err(incomplete!(3))); assert_eq!(u32::decode_le(&buf[6..]), Err(incomplete!(4))); } #[test] fn u32_array_decode_le() { let buf = &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10][..]; assert_eq!( <[u32; 2]>::decode_le(buf), Ok((&[9, 10][..], [0x04030201, 0x08070605])) ); assert_eq!( <[u32; 2]>::decode_le(&buf[1..]), Ok((&[10][..], [0x05040302, 0x09080706])) ); assert_eq!( <[u32; 2]>::decode_le(&buf[2..]), Ok((&[][..], [0x06050403, 0x0A090807])) ); assert_eq!(<[u32; 2]>::decode_le(&buf[3..]), Err(incomplete!(1))); assert_eq!(<[u32; 2]>::decode_le(&buf[4..]), Err(incomplete!(2))); assert_eq!(<[u32; 2]>::decode_le(&buf[5..]), Err(incomplete!(3))); assert_eq!(<[u32; 2]>::decode_le(&buf[6..]), Err(incomplete!(4))); assert_eq!(<[u32; 2]>::decode_le(&buf[7..]), Err(incomplete!(5))); assert_eq!(<[u32; 2]>::decode_le(&buf[8..]), Err(incomplete!(6))); assert_eq!(<[u32; 2]>::decode_le(&buf[9..]), Err(incomplete!(7))); assert_eq!(<[u32; 2]>::decode_le(&buf[10..]), Err(incomplete!(8))); } #[test] fn u64_decode_be() { let buf = &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10][..]; assert_eq!(u64::decode_be(buf), Ok((&[9, 10][..], 0x0102030405060708))); assert_eq!( u64::decode_be(&buf[1..]), Ok((&[10][..], 0x0203040506070809)) ); assert_eq!(u64::decode_be(&buf[2..]), Ok((&[][..], 0x030405060708090A))); assert_eq!(u64::decode_be(&buf[3..]), Err(incomplete!(1))); assert_eq!(u64::decode_be(&buf[4..]), Err(incomplete!(2))); assert_eq!(u64::decode_be(&buf[5..]), Err(incomplete!(3))); assert_eq!(u64::decode_be(&buf[6..]), Err(incomplete!(4))); assert_eq!(u64::decode_be(&buf[7..]), Err(incomplete!(5))); assert_eq!(u64::decode_be(&buf[8..]), Err(incomplete!(6))); assert_eq!(u64::decode_be(&buf[9..]), Err(incomplete!(7))); assert_eq!(u64::decode_be(&buf[10..]), Err(incomplete!(8))); } #[test] fn u64_array_decode_be() { let buf = &[ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, ][..]; assert_eq!( <[u64; 2]>::decode_be(buf), Ok((&[17, 18][..], [0x0102030405060708, 0x090A0B0C0D0E0F10])) ); assert_eq!( <[u64; 2]>::decode_be(&buf[1..]), Ok((&[18][..], [0x0203040506070809, 0x0A0B0C0D0E0F1011])) ); assert_eq!( <[u64; 2]>::decode_be(&buf[2..]), Ok((&[][..], [0x030405060708090A, 0x0B0C0D0E0F101112])) ); assert_eq!(<[u64; 2]>::decode_be(&buf[3..]), Err(incomplete!(1))); assert_eq!(<[u64; 2]>::decode_be(&buf[4..]), Err(incomplete!(2))); assert_eq!(<[u64; 2]>::decode_be(&buf[5..]), Err(incomplete!(3))); assert_eq!(<[u64; 2]>::decode_be(&buf[6..]), Err(incomplete!(4))); assert_eq!(<[u64; 2]>::decode_be(&buf[7..]), Err(incomplete!(5))); assert_eq!(<[u64; 2]>::decode_be(&buf[8..]), Err(incomplete!(6))); assert_eq!(<[u64; 2]>::decode_be(&buf[9..]), Err(incomplete!(7))); assert_eq!(<[u64; 2]>::decode_be(&buf[10..]), Err(incomplete!(8))); assert_eq!(<[u64; 2]>::decode_be(&buf[11..]), Err(incomplete!(9))); assert_eq!(<[u64; 2]>::decode_be(&buf[12..]), Err(incomplete!(10))); assert_eq!(<[u64; 2]>::decode_be(&buf[13..]), Err(incomplete!(11))); assert_eq!(<[u64; 2]>::decode_be(&buf[14..]), Err(incomplete!(12))); assert_eq!(<[u64; 2]>::decode_be(&buf[15..]), Err(incomplete!(13))); assert_eq!(<[u64; 2]>::decode_be(&buf[16..]), Err(incomplete!(14))); assert_eq!(<[u64; 2]>::decode_be(&buf[17..]), Err(incomplete!(15))); assert_eq!(<[u64; 2]>::decode_be(&buf[18..]), Err(incomplete!(16))); } #[test] fn u64_decode_le() { let buf = &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10][..]; assert_eq!(u64::decode_le(buf), Ok((&[9, 10][..], 0x0807060504030201))); assert_eq!( u64::decode_le(&buf[1..]), Ok((&[10][..], 0x0908070605040302)) ); assert_eq!(u64::decode_le(&buf[2..]), Ok((&[][..], 0x0A09080706050403))); assert_eq!(u64::decode_le(&buf[3..]), Err(incomplete!(1))); assert_eq!(u64::decode_le(&buf[4..]), Err(incomplete!(2))); assert_eq!(u64::decode_le(&buf[5..]), Err(incomplete!(3))); assert_eq!(u64::decode_le(&buf[6..]), Err(incomplete!(4))); assert_eq!(u64::decode_le(&buf[7..]), Err(incomplete!(5))); assert_eq!(u64::decode_le(&buf[8..]), Err(incomplete!(6))); assert_eq!(u64::decode_le(&buf[9..]), Err(incomplete!(7))); assert_eq!(u64::decode_le(&buf[10..]), Err(incomplete!(8))); } #[test] fn u64_array_decode_le() { let buf = &[ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, ][..]; assert_eq!( <[u64; 2]>::decode_le(buf), Ok((&[17, 18][..], [0x0807060504030201, 0x100F0E0D0C0B0A09])) ); assert_eq!( <[u64; 2]>::decode_le(&buf[1..]), Ok((&[18][..], [0x0908070605040302, 0x11100F0E0D0C0B0A])) ); assert_eq!( <[u64; 2]>::decode_le(&buf[2..]), Ok((&[][..], [0x0A09080706050403, 0x1211100F0E0D0C0B])) ); assert_eq!(<[u64; 2]>::decode_le(&buf[3..]), Err(incomplete!(1))); assert_eq!(<[u64; 2]>::decode_le(&buf[4..]), Err(incomplete!(2))); assert_eq!(<[u64; 2]>::decode_le(&buf[5..]), Err(incomplete!(3))); assert_eq!(<[u64; 2]>::decode_le(&buf[6..]), Err(incomplete!(4))); assert_eq!(<[u64; 2]>::decode_le(&buf[7..]), Err(incomplete!(5))); assert_eq!(<[u64; 2]>::decode_le(&buf[8..]), Err(incomplete!(6))); assert_eq!(<[u64; 2]>::decode_le(&buf[9..]), Err(incomplete!(7))); assert_eq!(<[u64; 2]>::decode_le(&buf[10..]), Err(incomplete!(8))); assert_eq!(<[u64; 2]>::decode_le(&buf[11..]), Err(incomplete!(9))); assert_eq!(<[u64; 2]>::decode_le(&buf[12..]), Err(incomplete!(10))); assert_eq!(<[u64; 2]>::decode_le(&buf[13..]), Err(incomplete!(11))); assert_eq!(<[u64; 2]>::decode_le(&buf[14..]), Err(incomplete!(12))); assert_eq!(<[u64; 2]>::decode_le(&buf[15..]), Err(incomplete!(13))); assert_eq!(<[u64; 2]>::decode_le(&buf[16..]), Err(incomplete!(14))); assert_eq!(<[u64; 2]>::decode_le(&buf[17..]), Err(incomplete!(15))); assert_eq!(<[u64; 2]>::decode_le(&buf[18..]), Err(incomplete!(16))); } #[test] fn u128_decode_be() { let buf = &[ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, ][..]; assert_eq!( u128::decode_be(buf), Ok((&[17, 18][..], 0x0102030405060708090A0B0C0D0E0F10)) ); assert_eq!( u128::decode_be(&buf[1..]), Ok((&[18][..], 0x02030405060708090A0B0C0D0E0F1011)) ); assert_eq!( u128::decode_be(&buf[2..]), Ok((&[][..], 0x030405060708090A0B0C0D0E0F101112)) ); assert_eq!(u128::decode_be(&buf[3..]), Err(incomplete!(1))); assert_eq!(u128::decode_be(&buf[4..]), Err(incomplete!(2))); assert_eq!(u128::decode_be(&buf[5..]), Err(incomplete!(3))); assert_eq!(u128::decode_be(&buf[6..]), Err(incomplete!(4))); assert_eq!(u128::decode_be(&buf[7..]), Err(incomplete!(5))); assert_eq!(u128::decode_be(&buf[8..]), Err(incomplete!(6))); assert_eq!(u128::decode_be(&buf[9..]), Err(incomplete!(7))); assert_eq!(u128::decode_be(&buf[10..]), Err(incomplete!(8))); assert_eq!(u128::decode_be(&buf[11..]), Err(incomplete!(9))); assert_eq!(u128::decode_be(&buf[12..]), Err(incomplete!(10))); assert_eq!(u128::decode_be(&buf[13..]), Err(incomplete!(11))); assert_eq!(u128::decode_be(&buf[14..]), Err(incomplete!(12))); assert_eq!(u128::decode_be(&buf[15..]), Err(incomplete!(13))); assert_eq!(u128::decode_be(&buf[16..]), Err(incomplete!(14))); assert_eq!(u128::decode_be(&buf[17..]), Err(incomplete!(15))); assert_eq!(u128::decode_be(&buf[18..]), Err(incomplete!(16))); } #[test] fn u128_array_decode_be() { let buf = &[ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, ][..]; assert_eq!( <[u128; 2]>::decode_be(buf), Ok(( &[33, 34][..], [ 0x0102030405060708090A0B0C0D0E0F10, 0x1112131415161718191A1B1C1D1E1F20 ] )) ); assert_eq!( <[u128; 2]>::decode_be(&buf[1..]), Ok(( &[34][..], [ 0x02030405060708090A0B0C0D0E0F1011, 0x12131415161718191A1B1C1D1E1F2021 ] )) ); assert_eq!( <[u128; 2]>::decode_be(&buf[2..]), Ok(( &[][..], [ 0x030405060708090A0B0C0D0E0F101112, 0x131415161718191A1B1C1D1E1F202122 ] )) ); assert_eq!(<[u128; 2]>::decode_be(&buf[3..]), Err(incomplete!(1))); assert_eq!(<[u128; 2]>::decode_be(&buf[4..]), Err(incomplete!(2))); assert_eq!(<[u128; 2]>::decode_be(&buf[5..]), Err(incomplete!(3))); assert_eq!(<[u128; 2]>::decode_be(&buf[6..]), Err(incomplete!(4))); assert_eq!(<[u128; 2]>::decode_be(&buf[7..]), Err(incomplete!(5))); assert_eq!(<[u128; 2]>::decode_be(&buf[8..]), Err(incomplete!(6))); assert_eq!(<[u128; 2]>::decode_be(&buf[9..]), Err(incomplete!(7))); assert_eq!(<[u128; 2]>::decode_be(&buf[10..]), Err(incomplete!(8))); assert_eq!(<[u128; 2]>::decode_be(&buf[11..]), Err(incomplete!(9))); assert_eq!(<[u128; 2]>::decode_be(&buf[12..]), Err(incomplete!(10))); assert_eq!(<[u128; 2]>::decode_be(&buf[13..]), Err(incomplete!(11))); assert_eq!(<[u128; 2]>::decode_be(&buf[14..]), Err(incomplete!(12))); assert_eq!(<[u128; 2]>::decode_be(&buf[15..]), Err(incomplete!(13))); assert_eq!(<[u128; 2]>::decode_be(&buf[16..]), Err(incomplete!(14))); assert_eq!(<[u128; 2]>::decode_be(&buf[17..]), Err(incomplete!(15))); assert_eq!(<[u128; 2]>::decode_be(&buf[18..]), Err(incomplete!(16))); assert_eq!(<[u128; 2]>::decode_be(&buf[19..]), Err(incomplete!(17))); assert_eq!(<[u128; 2]>::decode_be(&buf[20..]), Err(incomplete!(18))); assert_eq!(<[u128; 2]>::decode_be(&buf[21..]), Err(incomplete!(19))); assert_eq!(<[u128; 2]>::decode_be(&buf[22..]), Err(incomplete!(20))); assert_eq!(<[u128; 2]>::decode_be(&buf[23..]), Err(incomplete!(21))); assert_eq!(<[u128; 2]>::decode_be(&buf[24..]), Err(incomplete!(22))); assert_eq!(<[u128; 2]>::decode_be(&buf[25..]), Err(incomplete!(23))); assert_eq!(<[u128; 2]>::decode_be(&buf[26..]), Err(incomplete!(24))); assert_eq!(<[u128; 2]>::decode_be(&buf[27..]), Err(incomplete!(25))); assert_eq!(<[u128; 2]>::decode_be(&buf[28..]), Err(incomplete!(26))); assert_eq!(<[u128; 2]>::decode_be(&buf[29..]), Err(incomplete!(27))); assert_eq!(<[u128; 2]>::decode_be(&buf[30..]), Err(incomplete!(28))); assert_eq!(<[u128; 2]>::decode_be(&buf[31..]), Err(incomplete!(29))); assert_eq!(<[u128; 2]>::decode_be(&buf[32..]), Err(incomplete!(30))); assert_eq!(<[u128; 2]>::decode_be(&buf[33..]), Err(incomplete!(31))); assert_eq!(<[u128; 2]>::decode_be(&buf[34..]), Err(incomplete!(32))); } #[test] fn u128_decode_le() { let buf = &[ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, ][..]; assert_eq!( u128::decode_le(buf), Ok((&[17, 18][..], 0x100F0E0D0C0B0A090807060504030201)) ); assert_eq!( u128::decode_le(&buf[1..]), Ok((&[18][..], 0x11100F0E0D0C0B0A0908070605040302)) ); assert_eq!( u128::decode_le(&buf[2..]), Ok((&[][..], 0x1211100F0E0D0C0B0A09080706050403)) ); assert_eq!(u128::decode_le(&buf[3..]), Err(incomplete!(1))); assert_eq!(u128::decode_le(&buf[4..]), Err(incomplete!(2))); assert_eq!(u128::decode_le(&buf[5..]), Err(incomplete!(3))); assert_eq!(u128::decode_le(&buf[6..]), Err(incomplete!(4))); assert_eq!(u128::decode_le(&buf[7..]), Err(incomplete!(5))); assert_eq!(u128::decode_le(&buf[8..]), Err(incomplete!(6))); assert_eq!(u128::decode_le(&buf[9..]), Err(incomplete!(7))); assert_eq!(u128::decode_le(&buf[10..]), Err(incomplete!(8))); assert_eq!(u128::decode_le(&buf[11..]), Err(incomplete!(9))); assert_eq!(u128::decode_le(&buf[12..]), Err(incomplete!(10))); assert_eq!(u128::decode_le(&buf[13..]), Err(incomplete!(11))); assert_eq!(u128::decode_le(&buf[14..]), Err(incomplete!(12))); assert_eq!(u128::decode_le(&buf[15..]), Err(incomplete!(13))); assert_eq!(u128::decode_le(&buf[16..]), Err(incomplete!(14))); assert_eq!(u128::decode_le(&buf[17..]), Err(incomplete!(15))); assert_eq!(u128::decode_le(&buf[18..]), Err(incomplete!(16))); } #[test] fn u128_array_decode_le() { let buf = &[ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, ][..]; assert_eq!( <[u128; 2]>::decode_le(buf), Ok(( &[33, 34][..], [ 0x100F0E0D0C0B0A090807060504030201, 0x201F1E1D1C1B1A191817161514131211 ] )) ); assert_eq!( <[u128; 2]>::decode_le(&buf[1..]), Ok(( &[34][..], [ 0x11100F0E0D0C0B0A0908070605040302, 0x21201F1E1D1C1B1A1918171615141312 ] )) ); assert_eq!( <[u128; 2]>::decode_le(&buf[2..]), Ok(( &[][..], [ 0x1211100F0E0D0C0B0A09080706050403, 0x2221201F1E1D1C1B1A19181716151413 ] )) ); assert_eq!(<[u128; 2]>::decode_le(&buf[3..]), Err(incomplete!(1))); assert_eq!(<[u128; 2]>::decode_le(&buf[4..]), Err(incomplete!(2))); assert_eq!(<[u128; 2]>::decode_le(&buf[5..]), Err(incomplete!(3))); assert_eq!(<[u128; 2]>::decode_le(&buf[6..]), Err(incomplete!(4))); assert_eq!(<[u128; 2]>::decode_le(&buf[7..]), Err(incomplete!(5))); assert_eq!(<[u128; 2]>::decode_le(&buf[8..]), Err(incomplete!(6))); assert_eq!(<[u128; 2]>::decode_le(&buf[9..]), Err(incomplete!(7))); assert_eq!(<[u128; 2]>::decode_le(&buf[10..]), Err(incomplete!(8))); assert_eq!(<[u128; 2]>::decode_le(&buf[11..]), Err(incomplete!(9))); assert_eq!(<[u128; 2]>::decode_le(&buf[12..]), Err(incomplete!(10))); assert_eq!(<[u128; 2]>::decode_le(&buf[13..]), Err(incomplete!(11))); assert_eq!(<[u128; 2]>::decode_le(&buf[14..]), Err(incomplete!(12))); assert_eq!(<[u128; 2]>::decode_le(&buf[15..]), Err(incomplete!(13))); assert_eq!(<[u128; 2]>::decode_le(&buf[16..]), Err(incomplete!(14))); assert_eq!(<[u128; 2]>::decode_le(&buf[17..]), Err(incomplete!(15))); assert_eq!(<[u128; 2]>::decode_le(&buf[18..]), Err(incomplete!(16))); assert_eq!(<[u128; 2]>::decode_le(&buf[19..]), Err(incomplete!(17))); assert_eq!(<[u128; 2]>::decode_le(&buf[20..]), Err(incomplete!(18))); assert_eq!(<[u128; 2]>::decode_le(&buf[21..]), Err(incomplete!(19))); assert_eq!(<[u128; 2]>::decode_le(&buf[22..]), Err(incomplete!(20))); assert_eq!(<[u128; 2]>::decode_le(&buf[23..]), Err(incomplete!(21))); assert_eq!(<[u128; 2]>::decode_le(&buf[24..]), Err(incomplete!(22))); assert_eq!(<[u128; 2]>::decode_le(&buf[25..]), Err(incomplete!(23))); assert_eq!(<[u128; 2]>::decode_le(&buf[26..]), Err(incomplete!(24))); assert_eq!(<[u128; 2]>::decode_le(&buf[27..]), Err(incomplete!(25))); assert_eq!(<[u128; 2]>::decode_le(&buf[28..]), Err(incomplete!(26))); assert_eq!(<[u128; 2]>::decode_le(&buf[29..]), Err(incomplete!(27))); assert_eq!(<[u128; 2]>::decode_le(&buf[30..]), Err(incomplete!(28))); assert_eq!(<[u128; 2]>::decode_le(&buf[31..]), Err(incomplete!(29))); assert_eq!(<[u128; 2]>::decode_le(&buf[32..]), Err(incomplete!(30))); assert_eq!(<[u128; 2]>::decode_le(&buf[33..]), Err(incomplete!(31))); assert_eq!(<[u128; 2]>::decode_le(&buf[34..]), Err(incomplete!(32))); } #[test] fn i8_decode() { let buf = &[1, 2, 0xFF, 0x80][..]; assert_eq!(i8::decode(buf), Ok((&[2, 0xFF, 0x80][..], 1))); assert_eq!(i8::decode(&buf[1..]), Ok((&[0xFF, 0x80][..], 2))); assert_eq!(i8::decode(&buf[2..]), Ok((&[0x80][..], -1))); assert_eq!(i8::decode(&buf[3..]), Ok((&[][..], -128))); assert_eq!(i8::decode(&buf[4..]), Err(incomplete!(1))); } #[test] fn i8_array_decode() { let buf = &[1, 2, 0xFF, 0x80][..]; assert_eq!(<[i8; 2]>::decode(buf), Ok((&[0xFF, 0x80][..], [1, 2]))); assert_eq!(<[i8; 2]>::decode(&buf[1..]), Ok((&[0x80][..], [2, -1]))); assert_eq!(<[i8; 2]>::decode(&buf[2..]), Ok((&[][..], [-1, -128]))); assert_eq!(<[i8; 2]>::decode(&buf[3..]), Err(incomplete!(1))); assert_eq!(<[i8; 2]>::decode(&buf[4..]), Err(incomplete!(2))); } #[test] fn i16_decode_be() { let buf = &[1, 2, 0xFF, 0][..]; assert_eq!(i16::decode_be(buf), Ok((&[0xFF, 0][..], 0x0102))); assert_eq!(i16::decode_be(&buf[1..]), Ok((&[0][..], 0x02FF))); assert_eq!(i16::decode_be(&buf[2..]), Ok((&[][..], -256))); assert_eq!(i16::decode_be(&buf[3..]), Err(incomplete!(1))); assert_eq!(i16::decode_be(&buf[4..]), Err(incomplete!(2))); } #[test] fn i16_array_decode_be() { let buf = &[1, 2, 0xFF, 0, 0x80, 1][..]; assert_eq!( <[i16; 2]>::decode_be(buf), Ok((&[0x80, 1][..], [0x0102, -256])) ); assert_eq!( <[i16; 2]>::decode_be(&buf[1..]), Ok((&[1][..], [0x02FF, 0x80])) ); assert_eq!( <[i16; 2]>::decode_be(&buf[2..]), Ok((&[][..], [-256, -32767])) ); assert_eq!(<[i16; 2]>::decode_be(&buf[3..]), Err(incomplete!(1))); assert_eq!(<[i16; 2]>::decode_be(&buf[4..]), Err(incomplete!(2))); assert_eq!(<[i16; 2]>::decode_be(&buf[5..]), Err(incomplete!(3))); assert_eq!(<[i16; 2]>::decode_be(&buf[6..]), Err(incomplete!(4))); } #[test] fn i16_decode_le() { let buf = &[1, 2, 0, 0xFF][..]; assert_eq!(i16::decode_le(buf), Ok((&[0, 0xFF][..], 0x0201))); assert_eq!(i16::decode_le(&buf[1..]), Ok((&[0xFF][..], 2))); assert_eq!(i16::decode_le(&buf[2..]), Ok((&[][..], -256))); assert_eq!(i16::decode_le(&buf[3..]), Err(incomplete!(1))); assert_eq!(i16::decode_le(&buf[4..]), Err(incomplete!(2))); } #[test] fn i16_array_decode_le() { let buf = &[1, 2, 0, 0xFF, 1, 0x80][..]; assert_eq!( <[i16; 2]>::decode_le(buf), Ok((&[1, 0x80][..], [0x0201, -256])) ); assert_eq!( <[i16; 2]>::decode_le(&buf[1..]), Ok((&[0x80][..], [2, 0x1FF])) ); assert_eq!( <[i16; 2]>::decode_le(&buf[2..]), Ok((&[][..], [-256, -32767])) ); assert_eq!(<[i16; 2]>::decode_le(&buf[3..]), Err(incomplete!(1))); assert_eq!(<[i16; 2]>::decode_le(&buf[4..]), Err(incomplete!(2))); assert_eq!(<[i16; 2]>::decode_le(&buf[5..]), Err(incomplete!(3))); assert_eq!(<[i16; 2]>::decode_le(&buf[6..]), Err(incomplete!(4))); } }
41.611819
98
0.474562
fc1ee6939f787e551b30ccba26d493004a9d59d2
376
/// Positive difference of two 32-bit floating-point numbers. #[no_mangle] #[inline] pub extern "C" fn fdimf(l: f32, r: f32) -> f32 { if l <= r { return 0.0; } l - r } /// Positive difference of two 64-bit floating-point numbers. #[no_mangle] #[inline] pub extern "C" fn fdim(l: f64, r: f64) -> f64 { if l <= r { return 0.0; } l - r }
18.8
61
0.555851
bb67eda1bf67412897680b23cd2c858558d57919
14,648
use super::gen_website::*; use super::pedia::*; use crate::rsz::*; use anyhow::*; use std::collections::BTreeMap; use std::fs::{create_dir, write}; use std::path::*; use typed_html::{dom::*, elements::*, html, text}; pub fn gen_quest_list(quests: &[Quest], root: &Path) -> Result<()> { let mut quests_ordered: BTreeMap<_, BTreeMap<_, Vec<&Quest>>> = BTreeMap::new(); for quest in quests { quests_ordered .entry(quest.param.enemy_level) .or_default() .entry(quest.param.quest_level) .or_default() .push(quest); } let doc: DOMTree<String> = html!( <html> <head> <title>{text!("Quests - MHRice")}</title> { head_common() } </head> <body> { navbar() } <main> <div class="container"> <h1 class="title">"Quests"</h1> { quests_ordered.into_iter().map(|(enemy_level, quests)|{ html!(<section> <h2 class="title">{text!("{:?}", enemy_level)}</h2> <ul class="mh-list-quest">{ quests.into_iter().map(|(quest_level, quests)|{ html!( <li class="mh-list-quest"> <h3 class="title">{text!("{:?}", quest_level)}</h3> <ul>{ quests.into_iter().map(|quest|{ let link = format!("/quest/{:06}.html", quest.param.quest_no); let name = quest.name.map_or( html!(<span>{text!("Quest {:06}", quest.param.quest_no)}</span>), gen_multi_lang ); let img = format!("/resources/questtype_{}.png", quest.param.quest_type.icon_index()); html!{<li> <a href={link} class="mh-icon-text"> <img src={img} class="mh-quest-icon"/> {name} </a> </li>} }) }</ul> </li> ) }) }</ul></section>) }) } </div> </main> </body> </html> ); let quests_path = root.join("quest.html"); write(&quests_path, doc.to_string())?; Ok(()) } pub fn gen_quest_monster_data( enemy_param: Option<&SharedEnemyParam>, em_type: EmTypes, index: usize, pedia: &Pedia, pedia_ex: &PediaEx<'_>, ) -> impl IntoIterator<Item = Box<td<String>>> { let enemy_param = if let Some(enemy_param) = enemy_param.as_ref() { enemy_param } else { return vec![html!(<td colspan=11>"[NO DATA]"</td>)]; }; let size = if let (Some(scale_tbl_i), Some(base_scale)) = ( enemy_param.scale_tbl.get(index), enemy_param.scale.get(index), ) { if let (Some(size), Some(size_dist)) = ( pedia_ex.sizes.get(&em_type), pedia_ex.size_dists.get(scale_tbl_i), ) { let mut small_chance = 0; let mut large_chance = 0; for sample in *size_dist { let scale = sample.scale * (*base_scale as f32) / 100.0; if scale <= size.small_boarder { small_chance += sample.rate; } if scale >= size.king_boarder { large_chance += sample.rate; } } let small = (small_chance != 0).then(|| { html!(<span class="tag"> <img src="/resources/small_crown.png" /> {text!("{}%", small_chance)} </span>) }); let large = (large_chance != 0).then(|| { html!(<span class="tag"> <img src="/resources/king_crown.png" /> {text!("{}%", large_chance)} </span>) }); html!(<span>{small}{large}</span>) } else { html!(<span>"-"</span>) } } else { html!(<span>"-"</span>) }; let hp = enemy_param.vital_tbl.get(index).map_or_else( || "-".to_owned(), |v| { pedia .difficulty_rate .vital_rate_table_list .get(usize::from(*v)) .map_or_else(|| format!("~ {}", v), |r| format!("x{}", r.vital_rate)) }, ); let attack = enemy_param.attack_tbl.get(index).map_or_else( || "-".to_owned(), |v| { pedia .difficulty_rate .attack_rate_table_list .get(usize::from(*v)) .map_or_else(|| format!("~ {}", v), |r| format!("x{}", r.attack_rate)) }, ); let parts = enemy_param.parts_tbl.get(index).map_or_else( || "-".to_owned(), |v| { pedia .difficulty_rate .parts_rate_table_list .get(usize::from(*v)) .map_or_else( || format!("~ {}", v), |r| format!("x{}", r.parts_vital_rate), ) }, ); let defense; let element_a; let element_b; let stun; let exhaust; let ride; if let Some(v) = enemy_param.other_tbl.get(index) { if let Some(r) = pedia .difficulty_rate .other_rate_table_list .get(usize::from(*v)) { defense = format!("x{}", r.defense_rate); element_a = format!("x{}", r.damage_element_rate_a); element_b = format!("x{}", r.damage_element_rate_b); stun = format!("x{}", r.stun_rate); exhaust = format!("x{}", r.tired_rate); ride = format!("x{}", r.marionette_rate); } else { let placeholder = format!("~ {}", v); defense = placeholder.clone(); element_a = placeholder.clone(); element_b = placeholder.clone(); stun = placeholder.clone(); exhaust = placeholder.clone(); ride = placeholder; } } else { defense = "-".to_owned(); element_a = "-".to_owned(); element_b = "-".to_owned(); stun = "-".to_owned(); exhaust = "-".to_owned(); ride = "-".to_owned(); }; let stamina = enemy_param .stamina_tbl .get(index) .map_or_else(|| "-".to_owned(), |v| format!("{}", v)); vec![ html!(<td>{size}</td>), html!(<td>{text!("{}", hp)}</td>), html!(<td>{text!("{}", attack)}</td>), html!(<td>{text!("{}", parts)}</td>), html!(<td>{text!("{}", defense)}</td>), html!(<td>{text!("{}", element_a)}</td>), html!(<td>{text!("{}", element_b)}</td>), html!(<td>{text!("{}", stun)}</td>), html!(<td>{text!("{}", exhaust)}</td>), html!(<td>{text!("{}", ride)}</td>), html!(<td>{text!("{}", stamina)}</td>), ] } fn gen_multi_factor(data: &MultiData) -> Box<div<String>> { html!(<div><ul class="mh-multi-factor"> <li><span>"2: "</span><span>{text!("x{}", data.two)}</span></li> <li><span>"3: "</span><span>{text!("x{}", data.three)}</span></li> <li><span>"4: "</span><span>{text!("x{}", data.four)}</span></li> </ul></div>) } fn gen_quest_monster_multi_player_data( enemy_param: Option<&SharedEnemyParam>, index: usize, pedia: &Pedia, ) -> impl IntoIterator<Item = Box<td<String>>> { let no_data = || vec![html!(<td colspan=9>"[NO DATA]"</td>)]; let enemy_param = if let Some(enemy_param) = enemy_param.as_ref() { enemy_param } else { return no_data(); }; let multi = if let Some(multi) = enemy_param.boss_multi.get(index) { *multi } else { return no_data(); }; let table = if let Some(table) = pedia .difficulty_rate .multi_rate_table_list .get(usize::from(multi)) { &table.multi_data_list } else { return no_data(); }; table .iter() .map(|d| html!(<td>{gen_multi_factor(d)}</td>)) .collect() } fn gen_monster_tag(quest: &Quest, pedia: &Pedia, em_type: EmTypes) -> Box<td<String>> { let id = match em_type { EmTypes::Em(id) => id, EmTypes::Ems(_) => return html!(<td>"Unexpected small monster"</td>), }; let monster = pedia.monsters.iter().find(|m| (m.id | m.sub_id << 8) == id); let monster_name = (|| { let name_name = format!("EnemyIndex{:03}", monster?.enemy_type?); Some(gen_multi_lang(pedia.monster_names.get_entry(&name_name)?)) })() .unwrap_or(html!(<span>{text!("Monster {0:03}_{1:02}", id & 0xFF, id >> 8)}</span>)); let icon_path = format!("/resources/em{0:03}_{1:02}_icon.png", id & 0xFF, id >> 8); let target_tag = if quest.param.has_target(em_type) { html!(<span class="tag is-primary">"Target"</span>) } else { html!(<span />) }; html!(<td> <a href={format!("/monster/{:03}_{1:02}.html", id & 0xFF, id >> 8)}> <img class="mh-quest-list-monster-icon" src=icon_path /> <span class="mh-quest-list-monster-name"> {monster_name} </span> </a> {target_tag} </td>) } fn gen_quest(quest: &Quest, pedia: &Pedia, pedia_ex: &PediaEx<'_>, path: &Path) -> Result<()> { let img = format!( "/resources/questtype_{}.png", quest.param.quest_type.icon_index() ); let doc: DOMTree<String> = html!( <html> <head> <title>{text!("Quest {:06}", quest.param.quest_no)}</title> { head_common() } </head> <body> { navbar() } <main> <div class="container"> <div class="content"> <div class="mh-title-icon"> <img src={img} class="mh-quest-icon"/> </div> <h1 class="title"> <span class="tag">{text!("{:?}-{:?}", quest.param.enemy_level, quest.param.quest_level)}</span> { quest.name.map_or( html!(<span>{text!("Quest {:06}", quest.param.quest_no)}</span>), gen_multi_lang ) }</h1> <p><span>"Objective: "</span><span> { quest.target.map_or( html!(<span>"-"</span>), gen_multi_lang ) }</span></p> <section class="section"> <h2 class="title">"Monster stats"</h2> <table> <thead><tr> <th>"Monster"</th> <th>"Size (?)"</th> <th>"HP"</th> <th>"Attack"</th> <th>"Parts"</th> <th>"Defense"</th> <th>"Element A"</th> <th>"Element B"</th> <th>"Stun"</th> <th>"Exhaust"</th> <th>"Ride"</th> <th>"Stamina"</th> </tr></thead> <tbody> { quest.param.boss_em_type.iter().copied().enumerate() .filter(|&(_, em_type)|em_type != EmTypes::Em(0)) .map(|(i, em_type)|{ html!(<tr> { gen_monster_tag(quest, pedia, em_type) } { gen_quest_monster_data(quest.enemy_param.as_ref().map(|p|&p.param), em_type, i, pedia, pedia_ex) } </tr>) }) } </tbody> </table> </section> <section class="section"> <h2 class="title">"Multiplayer Factor (Column header might be wrong)"</h2> <table> <thead><tr> <th>"Monster"</th> <th>"HP"</th> <th>"Attack"</th> <th>"Parts"</th> <th>"Other parts"</th> <th>"Multi parts"</th> <th>"Defense"</th> <th>"Element A"</th> <th>"Element B"</th> <th>"Stun"</th> <th>"Exhaust"</th> <th>"Ride"</th> <th>"Monster to monster"</th> </tr></thead> <tbody> { quest.param.boss_em_type.iter().copied().enumerate() .filter(|&(_, em_type)|em_type != EmTypes::Em(0)) .map(|(i, em_type)|{ html!(<tr> { gen_monster_tag(quest, pedia, em_type) } { gen_quest_monster_multi_player_data( quest.enemy_param.as_ref().map(|p|&p.param), i, pedia) } </tr>) }) } </tbody> </table> </section> </div> </div> </main> </body> </html> ); write(&path, doc.to_string())?; Ok(()) } pub fn gen_quests(pedia: &Pedia, pedia_ex: &PediaEx<'_>, root: &Path) -> Result<()> { let quest_path = root.join("quest"); create_dir(&quest_path)?; for quest in &pedia_ex.quests { let path = quest_path.join(format!("{:06}.html", quest.param.quest_no)); gen_quest(quest, pedia, pedia_ex, &path)? } Ok(()) }
35.990172
117
0.41364
4ad2337de3ecef61aede82c4f764ba7e2072927e
88
pub mod chain_spec; pub mod rpc; pub mod service; pub mod silly_rpc; pub mod custom_rpc;
17.6
19
0.784091
e59113fc59c944dc6ea9f3992046705fb5a4c7b6
790
// LNP Node: node running lightning network protocol and generalized lightning // channels. // Written in 2020-2022 by // Dr. Maxim Orlovsky <[email protected]> // // To the extent possible under law, the author(s) have dedicated all // copyright and related and neighboring rights to this software to // the public domain worldwide. This software is distributed without // any warranty. // // You should have received a copy of the MIT License // along with this software. // If not, see <https://opensource.org/licenses/MIT>. use std::any::Any; use lnp::p2p::bolt::ChannelId; use crate::Error; pub trait Driver { fn init(channel_id: ChannelId, config: Box<dyn Any>) -> Result<Self, Error> where Self: Sized; fn store(&mut self) -> Result<(), Error>; }
28.214286
79
0.706329
38caa09f06c4264ce154d65b2366fac90a7ad66d
16,110
//! Provides userspace with access to a serial interface. //! //! Setup //! ----- //! //! You need a device that provides the `hil::uart::UART` trait. //! //! ```rust //! # use kernel::static_init; //! # use capsules::console::Console; //! //! let console = static_init!( //! Console<usart::USART>, //! Console::new(&usart::USART0, //! 115200, //! &mut console::WRITE_BUF, //! &mut console::READ_BUF, //! board_kernel.create_grant(&grant_cap))); //! hil::uart::UART::set_client(&usart::USART0, console); //! ``` //! //! Usage //! ----- //! //! The user must perform three steps in order to write a buffer: //! //! ```c //! // (Optional) Set a callback to be invoked when the buffer has been written //! subscribe(CONSOLE_DRIVER_NUM, 1, my_callback); //! // Share the buffer from userspace with the driver //! allow(CONSOLE_DRIVER_NUM, buffer, buffer_len_in_bytes); //! // Initiate the transaction //! command(CONSOLE_DRIVER_NUM, 1, len_to_write_in_bytes) //! ``` //! //! When the buffer has been written successfully, the buffer is released from //! the driver. Successive writes must call `allow` each time a buffer is to be //! written. use core::{cmp, mem}; use kernel::grant::Grant; use kernel::hil::uart; use kernel::processbuffer::{ReadOnlyProcessBuffer, ReadWriteProcessBuffer}; use kernel::processbuffer::{ReadableProcessBuffer, WriteableProcessBuffer}; use kernel::syscall::{CommandReturn, SyscallDriver}; use kernel::utilities::cells::{OptionalCell, TakeCell}; use kernel::{ErrorCode, ProcessId}; /// Syscall driver number. use crate::driver; pub const DRIVER_NUM: usize = driver::NUM::Console as usize; #[derive(Default)] pub struct App { write_buffer: ReadOnlyProcessBuffer, write_len: usize, write_remaining: usize, // How many bytes didn't fit in the buffer and still need to be printed. pending_write: bool, read_buffer: ReadWriteProcessBuffer, read_len: usize, } pub static mut WRITE_BUF: [u8; 64] = [0; 64]; pub static mut READ_BUF: [u8; 64] = [0; 64]; pub struct Console<'a> { uart: &'a dyn uart::UartData<'a>, apps: Grant<App, 3>, tx_in_progress: OptionalCell<ProcessId>, tx_buffer: TakeCell<'static, [u8]>, rx_in_progress: OptionalCell<ProcessId>, rx_buffer: TakeCell<'static, [u8]>, } impl<'a> Console<'a> { pub fn new( uart: &'a dyn uart::UartData<'a>, tx_buffer: &'static mut [u8], rx_buffer: &'static mut [u8], grant: Grant<App, 3>, ) -> Console<'a> { Console { uart: uart, apps: grant, tx_in_progress: OptionalCell::empty(), tx_buffer: TakeCell::new(tx_buffer), rx_in_progress: OptionalCell::empty(), rx_buffer: TakeCell::new(rx_buffer), } } /// Internal helper function for setting up a new send transaction fn send_new(&self, app_id: ProcessId, app: &mut App, len: usize) -> Result<(), ErrorCode> { app.write_len = cmp::min(len, app.write_buffer.len()); app.write_remaining = app.write_len; self.send(app_id, app); Ok(()) } /// Internal helper function for continuing a previously set up transaction. /// Returns `true` if this send is still active, or `false` if it has /// completed. fn send_continue(&self, app_id: ProcessId, app: &mut App) -> bool { if app.write_remaining > 0 { self.send(app_id, app); true } else { false } } /// Internal helper function for sending data for an existing transaction. /// Cannot fail. If can't send now, it will schedule for sending later. fn send(&self, app_id: ProcessId, app: &mut App) { if self.tx_in_progress.is_none() { self.tx_in_progress.set(app_id); self.tx_buffer.take().map(|buffer| { let len = app.write_buffer.enter(|data| data.len()).unwrap_or(0); if app.write_remaining > len { // A slice has changed under us and is now smaller than // what we need to write -- just write what we can. app.write_remaining = len; } let transaction_len = app .write_buffer .enter(|data| { for (i, c) in data[data.len() - app.write_remaining..data.len()] .iter() .enumerate() { if buffer.len() <= i { return i; // Short circuit on partial send } buffer[i] = c.get(); } app.write_remaining }) .unwrap_or(0); app.write_remaining -= transaction_len; let _ = self.uart.transmit_buffer(buffer, transaction_len); }); } else { app.pending_write = true; } } /// Internal helper function for starting a receive operation fn receive_new(&self, app_id: ProcessId, app: &mut App, len: usize) -> Result<(), ErrorCode> { if self.rx_buffer.is_none() { // For now, we tolerate only one concurrent receive operation on this console. // Competing apps will have to retry until success. return Err(ErrorCode::BUSY); } let read_len = cmp::min(len, app.read_buffer.len()); if read_len > self.rx_buffer.map_or(0, |buf| buf.len()) { // For simplicity, impose a small maximum receive length // instead of doing incremental reads Err(ErrorCode::INVAL) } else { // Note: We have ensured above that rx_buffer is present app.read_len = read_len; self.rx_buffer.take().map(|buffer| { self.rx_in_progress.set(app_id); let _ = self.uart.receive_buffer(buffer, app.read_len); }); Ok(()) } } } impl SyscallDriver for Console<'_> { /// Setup shared buffers. /// /// ### `allow_num` /// /// - `1`: Writeable buffer for read buffer fn allow_readwrite( &self, appid: ProcessId, allow_num: usize, mut slice: ReadWriteProcessBuffer, ) -> Result<ReadWriteProcessBuffer, (ReadWriteProcessBuffer, ErrorCode)> { let res = match allow_num { 1 => self .apps .enter(appid, |app, _| { mem::swap(&mut app.read_buffer, &mut slice); }) .map_err(ErrorCode::from), _ => Err(ErrorCode::NOSUPPORT), }; if let Err(e) = res { Err((slice, e)) } else { Ok(slice) } } /// Setup shared buffers. /// /// ### `allow_num` /// /// - `1`: Readonly buffer for write buffer fn allow_readonly( &self, appid: ProcessId, allow_num: usize, mut slice: ReadOnlyProcessBuffer, ) -> Result<ReadOnlyProcessBuffer, (ReadOnlyProcessBuffer, ErrorCode)> { let res = match allow_num { 1 => self .apps .enter(appid, |app, _| { mem::swap(&mut app.write_buffer, &mut slice); }) .map_err(ErrorCode::from), _ => Err(ErrorCode::NOSUPPORT), }; if let Err(e) = res { Err((slice, e)) } else { Ok(slice) } } // Setup callbacks. // // ### `subscribe_num` // // - `1`: Write buffer completed callback // - `2`: Read buffer completed callback /// Initiate serial transfers /// /// ### `command_num` /// /// - `0`: Driver check. /// - `1`: Transmits a buffer passed via `allow`, up to the length /// passed in `arg1` /// - `2`: Receives into a buffer passed via `allow`, up to the length /// passed in `arg1` /// - `3`: Cancel any in progress receives and return (via callback) /// what has been received so far. fn command(&self, cmd_num: usize, arg1: usize, _: usize, appid: ProcessId) -> CommandReturn { let res = self .apps .enter(appid, |app, _| { match cmd_num { 0 => Ok(()), 1 => { // putstr let len = arg1; self.send_new(appid, app, len) } 2 => { // getnstr let len = arg1; self.receive_new(appid, app, len) } 3 => { // Abort RX let _ = self.uart.receive_abort(); Ok(()) } _ => Err(ErrorCode::NOSUPPORT), } }) .map_err(ErrorCode::from); match res { Ok(Ok(())) => CommandReturn::success(), Ok(Err(e)) => CommandReturn::failure(e), Err(e) => CommandReturn::failure(e), } } fn allocate_grant(&self, processid: ProcessId) -> Result<(), kernel::process::Error> { self.apps.enter(processid, |_, _| {}) } } impl uart::TransmitClient for Console<'_> { fn transmitted_buffer( &self, buffer: &'static mut [u8], _tx_len: usize, _rcode: Result<(), ErrorCode>, ) { // Either print more from the AppSlice or send a callback to the // application. self.tx_buffer.replace(buffer); self.tx_in_progress.take().map(|appid| { self.apps.enter(appid, |app, upcalls| { match self.send_continue(appid, app) { true => { // Still more to send. Wait to notify the process. } false => { // Go ahead and signal the application let written = app.write_len; app.write_len = 0; upcalls.schedule_upcall(1, (written, 0, 0)).ok(); } } }) }); // If we are not printing more from the current AppSlice, // see if any other applications have pending messages. if self.tx_in_progress.is_none() { for cntr in self.apps.iter() { let appid = cntr.processid(); let started_tx = cntr.enter(|app, _upcalls| { if app.pending_write { app.pending_write = false; self.send_continue(appid, app) } else { false } }); if started_tx { break; } } } } } impl uart::ReceiveClient for Console<'_> { fn received_buffer( &self, buffer: &'static mut [u8], rx_len: usize, rcode: Result<(), ErrorCode>, error: uart::Error, ) { self.rx_in_progress .take() .map(|appid| { self.apps .enter(appid, |app, upcalls| { // An iterator over the returned buffer yielding only the first `rx_len` // bytes let rx_buffer = buffer.iter().take(rx_len); match error { uart::Error::None | uart::Error::Aborted => { // Receive some bytes, signal error type and return bytes to process buffer let count = app .read_buffer .mut_enter(|data| { let mut c = 0; for (a, b) in data.iter().zip(rx_buffer) { c = c + 1; a.set(*b); } c }) .unwrap_or(-1); // Make sure we report the same number // of bytes that we actually copied into // the app's buffer. This is defensive: // we shouldn't ever receive more bytes // than will fit in the app buffer since // we use the app_buffer's length when // calling `receive()`. However, a buggy // lower layer could return more bytes // than we asked for, and we don't want // to propagate that length error to // userspace. However, we do return an // error code so that userspace knows // something went wrong. // // If count < 0 this means the buffer // disappeared: return NOMEM. let (ret, received_length) = if count < 0 { (Err(ErrorCode::NOMEM), 0) } else if rx_len > app.read_buffer.len() { // Return `SIZE` indicating that // some received bytes were dropped. // We report the length that we // actually copied into the buffer, // but also indicate that there was // an issue in the kernel with the // receive. (Err(ErrorCode::SIZE), app.read_buffer.len()) } else { // This is the normal and expected // case. (rcode, rx_len) }; upcalls .schedule_upcall( 2, ( kernel::errorcode::into_statuscode(ret), received_length, 0, ), ) .ok(); } _ => { // Some UART error occurred upcalls .schedule_upcall( 2, ( kernel::errorcode::into_statuscode(Err( ErrorCode::FAIL, )), 0, 0, ), ) .ok(); } } }) .unwrap_or_default(); }) .unwrap_or_default(); // Whatever happens, we want to make sure to replace the rx_buffer for future transactions self.rx_buffer.replace(buffer); } }
37.205543
107
0.444382
e2a1c5fb9be08528ec82ac3908573b45ba79fb82
5,431
use crate::backend::Backend; use crate::tensor::{Element, Tensor}; use std::fmt; // From ndarray /// Default threshold, below this element count, we don't ellipsize const ARRAY_MANY_ELEMENT_LIMIT: usize = 500; /// Limit of element count for non-last axes before overflowing with an ellipsis. const AXIS_LIMIT_STACKED: usize = 6; /// Limit for next to last axis (printed as column) /// An odd number because one element uses the same space as the ellipsis. const AXIS_LIMIT_COL: usize = 11; /// Limit for last axis (printed as row) /// An odd number because one element uses approximately the space of the ellipsis. const AXIS_LIMIT_ROW: usize = 11; #[cfg(test)] // Test value to use for size of overflowing 2D arrays const AXIS_2D_OVERFLOW_LIMIT: usize = 22; /// The string used as an ellipsis. const ELLIPSIS: &str = "..."; #[derive(Clone, Debug)] struct FormatOptions { axis_collapse_limit: usize, axis_collapse_limit_next_last: usize, axis_collapse_limit_last: usize, } impl FormatOptions { pub(crate) fn default_for_array(nelem: usize, no_limit: bool) -> Self { let default = Self { axis_collapse_limit: AXIS_LIMIT_STACKED, axis_collapse_limit_next_last: AXIS_LIMIT_COL, axis_collapse_limit_last: AXIS_LIMIT_ROW, }; default.set_no_limit(no_limit || nelem < ARRAY_MANY_ELEMENT_LIMIT) } fn set_no_limit(mut self, no_limit: bool) -> Self { if no_limit { self.axis_collapse_limit = usize::MAX; self.axis_collapse_limit_next_last = usize::MAX; self.axis_collapse_limit_last = usize::MAX; self } else { self } } /// Axis length collapse limit before ellipsizing, where `axis_rindex` is /// the index of the axis from the back. pub(crate) fn collapse_limit(&self, axis_rindex: usize) -> usize { match axis_rindex { 0 => self.axis_collapse_limit_last, 1 => self.axis_collapse_limit_next_last, _ => self.axis_collapse_limit, } } } /// Formats the contents of a list of items, using an ellipsis to indicate when /// the `length` of the list is greater than `limit`. /// /// # Parameters /// /// * `f`: The formatter. /// * `length`: The length of the list. /// * `limit`: The maximum number of items before overflow. /// * `separator`: Separator to write between items. /// * `ellipsis`: Ellipsis for indicating overflow. /// * `fmt_elem`: A function that formats an element in the list, given the /// formatter and the index of the item in the list. fn format_with_overflow( f: &mut fmt::Formatter<'_>, length: usize, limit: usize, separator: &str, ellipsis: &str, fmt_elem: &mut dyn FnMut(&mut fmt::Formatter, usize) -> fmt::Result, ) -> fmt::Result { if length == 0 { // no-op } else if length <= limit { fmt_elem(f, 0)?; for i in 1..length { f.write_str(separator)?; fmt_elem(f, i)? } } else { let edge = limit / 2; fmt_elem(f, 0)?; for i in 1..edge { f.write_str(separator)?; fmt_elem(f, i)?; } f.write_str(separator)?; f.write_str(ellipsis)?; for i in length - edge..length { f.write_str(separator)?; fmt_elem(f, i)? } } Ok(()) } fn format<T, F>( tensor: &Tensor<T>, f: &mut fmt::Formatter<'_>, format: F, fmt_opt: &FormatOptions, ) -> fmt::Result where T: Element, F: FnMut(&T, &mut fmt::Formatter<'_>) -> fmt::Result + Clone, { format_inner(tensor, f, format, fmt_opt, 0, tensor.order()) } fn format_inner<T, F>( tensor: &Tensor<T>, f: &mut fmt::Formatter<'_>, mut format: F, fmt_opt: &FormatOptions, depth: usize, full_ndim: usize, ) -> fmt::Result where T: Element, F: FnMut(&T, &mut fmt::Formatter<'_>) -> fmt::Result + Clone, { match tensor.shape() { &[len] => { let buffer_data = tensor.buffer.as_native().data(); f.write_str("[")?; format_with_overflow( f, len, fmt_opt.collapse_limit(0), ", ", ELLIPSIS, &mut |f, index| format(&buffer_data[tensor.mem_layout.translate_default(index)], f), )?; f.write_str("]")?; } shape => { let blank_lines = "\n".repeat(shape.len() - 2); let indent = " ".repeat(depth + 1); let separator = format!(",\n{}{}", blank_lines, indent); f.write_str("[")?; let limit = fmt_opt.collapse_limit(full_ndim - depth - 1); format_with_overflow(f, shape[0], limit, &separator, ELLIPSIS, &mut |f, index| { format_inner( &tensor.index(index, 0), f, format.clone(), fmt_opt, depth + 1, full_ndim, ) })?; f.write_str("]")?; } } Ok(()) } impl<T> fmt::Debug for Tensor<T> where T: Element + fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let fmt_opt = FormatOptions::default_for_array(self.size(), f.alternate()); format(&self.assign(Backend::Native), f, <_>::fmt, &fmt_opt) } }
30.172222
100
0.573375
efb145dbb9d273f38e3eeac77a9221774ede238f
1,319
use failure::format_err; use failure::Error; use protobuf::stream::CodedInputStream; use protobuf::well_known_types::Any; use serde_protobuf::descriptor::{Descriptors, MessageDescriptor}; use serde_protobuf::value::Message; pub fn make_any(type_url: &str, msg: &Message) -> Result<Any, Error> { let mut ret = Any::new(); let buf = msg.write_to_bytes(); ret.set_type_url( format!( "type.googleapis.com/{}", type_url.to_string().strip_prefix(".").unwrap_or(&type_url) ) .to_string(), ); ret.set_value(buf?); Ok(ret) } pub fn unpack_any<'a>( msg: &'a Any, descriptors: &'a Descriptors, ) -> Result<(&'a MessageDescriptor, Message), Error> { // TODO(melvin): figure out what the deal is with these descriptor paths. All this path munging is annoying. let parts: Vec<&str> = msg.get_type_url().split(".").collect(); let mtype = format!(".{}", parts[parts.len() - 1]); if let Some(desc) = descriptors.message_by_name(&mtype) { let mut stream = CodedInputStream::from_bytes(msg.get_value()); let mut ret = Message::new(desc); ret.merge_from(descriptors, desc, &mut stream)?; return Ok((desc, ret)); } else { return Err(format_err!("could not find descriptor for {}", mtype)); } }
34.710526
112
0.63533
d907942dc5c0a74839a13b18752fbf5f018903c4
1,995
use super::state::{CasesState, CASES}; use crate::welcome::state::{GlobalScene, SceneState, SCENE}; use anyhow::Error; use rate_ui::shared_object::{DataChanged, SharedObject}; use rate_ui::widget::{Context, NotificationHandler, Widget, WidgetRuntime}; use yew::{html, Html}; pub type Dashboard = WidgetRuntime<DashboardWidget>; pub struct DashboardWidget { scene: SharedObject<SceneState>, cases: SharedObject<CasesState>, } impl Default for DashboardWidget { fn default() -> Self { Self { scene: SCENE.with(SharedObject::clone), cases: CASES.with(SharedObject::clone), } } } #[derive(Clone)] pub enum Msg { ToExplorer, } impl Widget for DashboardWidget { type Event = Msg; type Tag = (); type Properties = (); type Meta = (); fn init(&mut self, ctx: &mut Context<Self>) { self.cases.subscribe(ctx); } fn on_event(&mut self, event: Self::Event, _ctx: &mut Context<Self>) { match event { Msg::ToExplorer => { let mut scene = self.scene.write(); scene.global_scene = GlobalScene::Explorer; } } } fn view(&self, ctx: &Context<Self>) -> Html { let state = self.cases.read(); if let Some(layout) = state.get_layout_tab() { html! { <super::LayoutViewer layout=layout.clone() /> } } else { html! { <div> <p> { "No cases available. Try to use: " } <a class="link click" onclick=ctx.event(Msg::ToExplorer)>{ "Explorer" }</a> </p> </div> } } } } impl NotificationHandler<DataChanged<CasesState>> for DashboardWidget { fn handle( &mut self, _event: DataChanged<CasesState>, ctx: &mut Context<Self>, ) -> Result<(), Error> { ctx.redraw(); Ok(()) } }
25.909091
99
0.540852
38f81684e657b090f8cadf3337537802b7d60737
46,988
// Generated from definition io.k8s.api.extensions.v1beta1.NetworkPolicy /// DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. NetworkPolicy describes what network traffic is allowed for a set of Pods #[derive(Clone, Debug, Default, PartialEq)] pub struct NetworkPolicy { /// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata pub metadata: Option<crate::v1_10::apimachinery::pkg::apis::meta::v1::ObjectMeta>, /// Specification of the desired behavior for this NetworkPolicy. pub spec: Option<crate::v1_10::api::extensions::v1beta1::NetworkPolicySpec>, } // Begin extensions/v1beta1/NetworkPolicy // Generated from operation createExtensionsV1beta1NamespacedNetworkPolicy impl NetworkPolicy { /// create a NetworkPolicy /// /// Use the returned [`crate::ResponseBody`]`<`[`CreateNamespacedNetworkPolicyResponse`]`>` constructor, or [`CreateNamespacedNetworkPolicyResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `namespace` /// /// object name and auth scope, such as for teams and projects /// /// * `body` /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn create_namespaced_network_policy( namespace: &str, body: &crate::v1_10::api::extensions::v1beta1::NetworkPolicy, optional: CreateNamespacedNetworkPolicyOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<CreateNamespacedNetworkPolicyResponse>), crate::RequestError> { let CreateNamespacedNetworkPolicyOptional { pretty, } = optional; let __url = format!("/apis/extensions/v1beta1/namespaces/{namespace}/networkpolicies?", namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET), ); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); if let Some(pretty) = pretty { __query_pairs.append_pair("pretty", pretty); } let __url = __query_pairs.finish(); let mut __request = http::Request::post(__url); let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?; __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json")); match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Optional parameters of [`NetworkPolicy::create_namespaced_network_policy`] #[cfg(feature = "api")] #[derive(Clone, Copy, Debug, Default)] pub struct CreateNamespacedNetworkPolicyOptional<'a> { /// If 'true', then the output is pretty printed. pub pretty: Option<&'a str>, } /// Use `<CreateNamespacedNetworkPolicyResponse as Response>::try_from_parts` to parse the HTTP response body of [`NetworkPolicy::create_namespaced_network_policy`] #[cfg(feature = "api")] #[derive(Debug)] pub enum CreateNamespacedNetworkPolicyResponse { Ok(crate::v1_10::api::extensions::v1beta1::NetworkPolicy), Created(crate::v1_10::api::extensions::v1beta1::NetworkPolicy), Accepted(crate::v1_10::api::extensions::v1beta1::NetworkPolicy), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for CreateNamespacedNetworkPolicyResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let result = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((CreateNamespacedNetworkPolicyResponse::Ok(result), buf.len())) }, http::StatusCode::CREATED => { let result = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((CreateNamespacedNetworkPolicyResponse::Created(result), buf.len())) }, http::StatusCode::ACCEPTED => { let result = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((CreateNamespacedNetworkPolicyResponse::Accepted(result), buf.len())) }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((CreateNamespacedNetworkPolicyResponse::Other(result), read)) }, } } } // Generated from operation deleteExtensionsV1beta1CollectionNamespacedNetworkPolicy impl NetworkPolicy { /// delete collection of NetworkPolicy /// /// Use the returned [`crate::ResponseBody`]`<`[`DeleteCollectionNamespacedNetworkPolicyResponse`]`>` constructor, or [`DeleteCollectionNamespacedNetworkPolicyResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `namespace` /// /// object name and auth scope, such as for teams and projects /// /// * `delete_optional` /// /// Delete options. Use `Default::default()` to not pass any. /// /// * `list_optional` /// /// List options. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn delete_collection_namespaced_network_policy( namespace: &str, delete_optional: crate::v1_10::DeleteOptional<'_>, list_optional: crate::v1_10::ListOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<DeleteCollectionNamespacedNetworkPolicyResponse>), crate::RequestError> { let __url = format!("/apis/extensions/v1beta1/namespaces/{namespace}/networkpolicies?", namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET), ); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); list_optional.__serialize(&mut __query_pairs); let __url = __query_pairs.finish(); let mut __request = http::Request::delete(__url); let __body = serde_json::to_vec(&delete_optional).map_err(crate::RequestError::Json)?; __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json")); match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Use `<DeleteCollectionNamespacedNetworkPolicyResponse as Response>::try_from_parts` to parse the HTTP response body of [`NetworkPolicy::delete_collection_namespaced_network_policy`] #[cfg(feature = "api")] #[derive(Debug)] pub enum DeleteCollectionNamespacedNetworkPolicyResponse { OkStatus(crate::v1_10::apimachinery::pkg::apis::meta::v1::Status), OkValue(crate::v1_10::api::extensions::v1beta1::NetworkPolicyList), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for DeleteCollectionNamespacedNetworkPolicyResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let result: serde_json::Map<String, serde_json::Value> = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; let is_status = match result.get("kind") { Some(serde_json::Value::String(s)) if s == "Status" => true, _ => false, }; if is_status { let result = serde::Deserialize::deserialize(serde_json::Value::Object(result)); let result = result.map_err(crate::ResponseError::Json)?; Ok((DeleteCollectionNamespacedNetworkPolicyResponse::OkStatus(result), buf.len())) } else { let result = serde::Deserialize::deserialize(serde_json::Value::Object(result)); let result = result.map_err(crate::ResponseError::Json)?; Ok((DeleteCollectionNamespacedNetworkPolicyResponse::OkValue(result), buf.len())) } }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((DeleteCollectionNamespacedNetworkPolicyResponse::Other(result), read)) }, } } } // Generated from operation deleteExtensionsV1beta1NamespacedNetworkPolicy impl NetworkPolicy { /// delete a NetworkPolicy /// /// Use the returned [`crate::ResponseBody`]`<`[`DeleteNamespacedNetworkPolicyResponse`]`>` constructor, or [`DeleteNamespacedNetworkPolicyResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `name` /// /// name of the NetworkPolicy /// /// * `namespace` /// /// object name and auth scope, such as for teams and projects /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn delete_namespaced_network_policy( name: &str, namespace: &str, optional: crate::v1_10::DeleteOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<DeleteNamespacedNetworkPolicyResponse>), crate::RequestError> { let __url = format!("/apis/extensions/v1beta1/namespaces/{namespace}/networkpolicies/{name}", name = crate::url::percent_encoding::percent_encode(name.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET), namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET), ); let mut __request = http::Request::delete(__url); let __body = serde_json::to_vec(&optional).map_err(crate::RequestError::Json)?; __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json")); match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Use `<DeleteNamespacedNetworkPolicyResponse as Response>::try_from_parts` to parse the HTTP response body of [`NetworkPolicy::delete_namespaced_network_policy`] #[cfg(feature = "api")] #[derive(Debug)] pub enum DeleteNamespacedNetworkPolicyResponse { OkStatus(crate::v1_10::apimachinery::pkg::apis::meta::v1::Status), OkValue(crate::v1_10::api::extensions::v1beta1::NetworkPolicy), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for DeleteNamespacedNetworkPolicyResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let result: serde_json::Map<String, serde_json::Value> = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; let is_status = match result.get("kind") { Some(serde_json::Value::String(s)) if s == "Status" => true, _ => false, }; if is_status { let result = serde::Deserialize::deserialize(serde_json::Value::Object(result)); let result = result.map_err(crate::ResponseError::Json)?; Ok((DeleteNamespacedNetworkPolicyResponse::OkStatus(result), buf.len())) } else { let result = serde::Deserialize::deserialize(serde_json::Value::Object(result)); let result = result.map_err(crate::ResponseError::Json)?; Ok((DeleteNamespacedNetworkPolicyResponse::OkValue(result), buf.len())) } }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((DeleteNamespacedNetworkPolicyResponse::Other(result), read)) }, } } } // Generated from operation listExtensionsV1beta1NamespacedNetworkPolicy impl NetworkPolicy { /// list or watch objects of kind NetworkPolicy /// /// This operation only supports listing all items of this type. /// /// Use the returned [`crate::ResponseBody`]`<`[`ListNamespacedNetworkPolicyResponse`]`>` constructor, or [`ListNamespacedNetworkPolicyResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `namespace` /// /// object name and auth scope, such as for teams and projects /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn list_namespaced_network_policy( namespace: &str, optional: crate::v1_10::ListOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ListNamespacedNetworkPolicyResponse>), crate::RequestError> { let __url = format!("/apis/extensions/v1beta1/namespaces/{namespace}/networkpolicies?", namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET), ); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); optional.__serialize(&mut __query_pairs); let __url = __query_pairs.finish(); let mut __request = http::Request::get(__url); let __body = vec![]; match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Use `<ListNamespacedNetworkPolicyResponse as Response>::try_from_parts` to parse the HTTP response body of [`NetworkPolicy::list_namespaced_network_policy`] #[cfg(feature = "api")] #[derive(Debug)] pub enum ListNamespacedNetworkPolicyResponse { Ok(crate::v1_10::api::extensions::v1beta1::NetworkPolicyList), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for ListNamespacedNetworkPolicyResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let result = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((ListNamespacedNetworkPolicyResponse::Ok(result), buf.len())) }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((ListNamespacedNetworkPolicyResponse::Other(result), read)) }, } } } // Generated from operation listExtensionsV1beta1NetworkPolicyForAllNamespaces impl NetworkPolicy { /// list or watch objects of kind NetworkPolicy /// /// This operation only supports listing all items of this type. /// /// Use the returned [`crate::ResponseBody`]`<`[`ListNetworkPolicyForAllNamespacesResponse`]`>` constructor, or [`ListNetworkPolicyForAllNamespacesResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn list_network_policy_for_all_namespaces( optional: crate::v1_10::ListOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ListNetworkPolicyForAllNamespacesResponse>), crate::RequestError> { let __url = "/apis/extensions/v1beta1/networkpolicies?".to_owned(); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); optional.__serialize(&mut __query_pairs); let __url = __query_pairs.finish(); let mut __request = http::Request::get(__url); let __body = vec![]; match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Use `<ListNetworkPolicyForAllNamespacesResponse as Response>::try_from_parts` to parse the HTTP response body of [`NetworkPolicy::list_network_policy_for_all_namespaces`] #[cfg(feature = "api")] #[derive(Debug)] pub enum ListNetworkPolicyForAllNamespacesResponse { Ok(crate::v1_10::api::extensions::v1beta1::NetworkPolicyList), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for ListNetworkPolicyForAllNamespacesResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let result = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((ListNetworkPolicyForAllNamespacesResponse::Ok(result), buf.len())) }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((ListNetworkPolicyForAllNamespacesResponse::Other(result), read)) }, } } } // Generated from operation patchExtensionsV1beta1NamespacedNetworkPolicy impl NetworkPolicy { /// partially update the specified NetworkPolicy /// /// Use the returned [`crate::ResponseBody`]`<`[`PatchNamespacedNetworkPolicyResponse`]`>` constructor, or [`PatchNamespacedNetworkPolicyResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `name` /// /// name of the NetworkPolicy /// /// * `namespace` /// /// object name and auth scope, such as for teams and projects /// /// * `body` /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn patch_namespaced_network_policy( name: &str, namespace: &str, body: &crate::v1_10::apimachinery::pkg::apis::meta::v1::Patch, optional: crate::v1_10::PatchOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<PatchNamespacedNetworkPolicyResponse>), crate::RequestError> { let __url = format!("/apis/extensions/v1beta1/namespaces/{namespace}/networkpolicies/{name}?", name = crate::url::percent_encoding::percent_encode(name.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET), namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET), ); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); optional.__serialize(&mut __query_pairs); let __url = __query_pairs.finish(); let mut __request = http::Request::patch(__url); let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?; __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static(match body { crate::v1_10::apimachinery::pkg::apis::meta::v1::Patch::Json(_) => "application/json-patch+json", crate::v1_10::apimachinery::pkg::apis::meta::v1::Patch::Merge(_) => "application/merge-patch+json", crate::v1_10::apimachinery::pkg::apis::meta::v1::Patch::StrategicMerge(_) => "application/strategic-merge-patch+json", })); match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Use `<PatchNamespacedNetworkPolicyResponse as Response>::try_from_parts` to parse the HTTP response body of [`NetworkPolicy::patch_namespaced_network_policy`] #[cfg(feature = "api")] #[derive(Debug)] pub enum PatchNamespacedNetworkPolicyResponse { Ok(crate::v1_10::api::extensions::v1beta1::NetworkPolicy), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for PatchNamespacedNetworkPolicyResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let result = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((PatchNamespacedNetworkPolicyResponse::Ok(result), buf.len())) }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((PatchNamespacedNetworkPolicyResponse::Other(result), read)) }, } } } // Generated from operation readExtensionsV1beta1NamespacedNetworkPolicy impl NetworkPolicy { /// read the specified NetworkPolicy /// /// Use the returned [`crate::ResponseBody`]`<`[`ReadNamespacedNetworkPolicyResponse`]`>` constructor, or [`ReadNamespacedNetworkPolicyResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `name` /// /// name of the NetworkPolicy /// /// * `namespace` /// /// object name and auth scope, such as for teams and projects /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn read_namespaced_network_policy( name: &str, namespace: &str, optional: ReadNamespacedNetworkPolicyOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ReadNamespacedNetworkPolicyResponse>), crate::RequestError> { let ReadNamespacedNetworkPolicyOptional { exact, export, pretty, } = optional; let __url = format!("/apis/extensions/v1beta1/namespaces/{namespace}/networkpolicies/{name}?", name = crate::url::percent_encoding::percent_encode(name.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET), namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET), ); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); if let Some(exact) = exact { __query_pairs.append_pair("exact", &exact.to_string()); } if let Some(export) = export { __query_pairs.append_pair("export", &export.to_string()); } if let Some(pretty) = pretty { __query_pairs.append_pair("pretty", pretty); } let __url = __query_pairs.finish(); let mut __request = http::Request::get(__url); let __body = vec![]; match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Optional parameters of [`NetworkPolicy::read_namespaced_network_policy`] #[cfg(feature = "api")] #[derive(Clone, Copy, Debug, Default)] pub struct ReadNamespacedNetworkPolicyOptional<'a> { /// Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. pub exact: Option<bool>, /// Should this value be exported. Export strips fields that a user can not specify. pub export: Option<bool>, /// If 'true', then the output is pretty printed. pub pretty: Option<&'a str>, } /// Use `<ReadNamespacedNetworkPolicyResponse as Response>::try_from_parts` to parse the HTTP response body of [`NetworkPolicy::read_namespaced_network_policy`] #[cfg(feature = "api")] #[derive(Debug)] pub enum ReadNamespacedNetworkPolicyResponse { Ok(crate::v1_10::api::extensions::v1beta1::NetworkPolicy), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for ReadNamespacedNetworkPolicyResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let result = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((ReadNamespacedNetworkPolicyResponse::Ok(result), buf.len())) }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((ReadNamespacedNetworkPolicyResponse::Other(result), read)) }, } } } // Generated from operation replaceExtensionsV1beta1NamespacedNetworkPolicy impl NetworkPolicy { /// replace the specified NetworkPolicy /// /// Use the returned [`crate::ResponseBody`]`<`[`ReplaceNamespacedNetworkPolicyResponse`]`>` constructor, or [`ReplaceNamespacedNetworkPolicyResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `name` /// /// name of the NetworkPolicy /// /// * `namespace` /// /// object name and auth scope, such as for teams and projects /// /// * `body` /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn replace_namespaced_network_policy( name: &str, namespace: &str, body: &crate::v1_10::api::extensions::v1beta1::NetworkPolicy, optional: ReplaceNamespacedNetworkPolicyOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ReplaceNamespacedNetworkPolicyResponse>), crate::RequestError> { let ReplaceNamespacedNetworkPolicyOptional { pretty, } = optional; let __url = format!("/apis/extensions/v1beta1/namespaces/{namespace}/networkpolicies/{name}?", name = crate::url::percent_encoding::percent_encode(name.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET), namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET), ); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); if let Some(pretty) = pretty { __query_pairs.append_pair("pretty", pretty); } let __url = __query_pairs.finish(); let mut __request = http::Request::put(__url); let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?; __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json")); match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Optional parameters of [`NetworkPolicy::replace_namespaced_network_policy`] #[cfg(feature = "api")] #[derive(Clone, Copy, Debug, Default)] pub struct ReplaceNamespacedNetworkPolicyOptional<'a> { /// If 'true', then the output is pretty printed. pub pretty: Option<&'a str>, } /// Use `<ReplaceNamespacedNetworkPolicyResponse as Response>::try_from_parts` to parse the HTTP response body of [`NetworkPolicy::replace_namespaced_network_policy`] #[cfg(feature = "api")] #[derive(Debug)] pub enum ReplaceNamespacedNetworkPolicyResponse { Ok(crate::v1_10::api::extensions::v1beta1::NetworkPolicy), Created(crate::v1_10::api::extensions::v1beta1::NetworkPolicy), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for ReplaceNamespacedNetworkPolicyResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let result = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((ReplaceNamespacedNetworkPolicyResponse::Ok(result), buf.len())) }, http::StatusCode::CREATED => { let result = match serde_json::from_slice(buf) { Ok(value) => value, Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => return Err(crate::ResponseError::Json(err)), }; Ok((ReplaceNamespacedNetworkPolicyResponse::Created(result), buf.len())) }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((ReplaceNamespacedNetworkPolicyResponse::Other(result), read)) }, } } } // Generated from operation watchExtensionsV1beta1NamespacedNetworkPolicy impl NetworkPolicy { /// list or watch objects of kind NetworkPolicy /// /// This operation only supports watching one item, or a list of items, of this type for changes. /// /// Use the returned [`crate::ResponseBody`]`<`[`WatchNamespacedNetworkPolicyResponse`]`>` constructor, or [`WatchNamespacedNetworkPolicyResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `namespace` /// /// object name and auth scope, such as for teams and projects /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn watch_namespaced_network_policy( namespace: &str, optional: crate::v1_10::WatchOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<WatchNamespacedNetworkPolicyResponse>), crate::RequestError> { let __url = format!("/apis/extensions/v1beta1/namespaces/{namespace}/networkpolicies?", namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET), ); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); optional.__serialize(&mut __query_pairs); let __url = __query_pairs.finish(); let mut __request = http::Request::get(__url); let __body = vec![]; match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Use `<WatchNamespacedNetworkPolicyResponse as Response>::try_from_parts` to parse the HTTP response body of [`NetworkPolicy::watch_namespaced_network_policy`] #[cfg(feature = "api")] #[derive(Debug)] pub enum WatchNamespacedNetworkPolicyResponse { Ok(crate::v1_10::apimachinery::pkg::apis::meta::v1::WatchEvent<NetworkPolicy>), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for WatchNamespacedNetworkPolicyResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let mut deserializer = serde_json::Deserializer::from_slice(buf).into_iter(); let (result, byte_offset) = match deserializer.next() { Some(Ok(value)) => (value, deserializer.byte_offset()), Some(Err(ref err)) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Some(Err(err)) => return Err(crate::ResponseError::Json(err)), None => return Err(crate::ResponseError::NeedMoreData), }; Ok((WatchNamespacedNetworkPolicyResponse::Ok(result), byte_offset)) }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((WatchNamespacedNetworkPolicyResponse::Other(result), read)) }, } } } // Generated from operation watchExtensionsV1beta1NetworkPolicyForAllNamespaces impl NetworkPolicy { /// list or watch objects of kind NetworkPolicy /// /// This operation only supports watching one item, or a list of items, of this type for changes. /// /// Use the returned [`crate::ResponseBody`]`<`[`WatchNetworkPolicyForAllNamespacesResponse`]`>` constructor, or [`WatchNetworkPolicyForAllNamespacesResponse`] directly, to parse the HTTP response. /// /// # Arguments /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn watch_network_policy_for_all_namespaces( optional: crate::v1_10::WatchOptional<'_>, ) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<WatchNetworkPolicyForAllNamespacesResponse>), crate::RequestError> { let __url = "/apis/extensions/v1beta1/networkpolicies?".to_owned(); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); optional.__serialize(&mut __query_pairs); let __url = __query_pairs.finish(); let mut __request = http::Request::get(__url); let __body = vec![]; match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } /// Use `<WatchNetworkPolicyForAllNamespacesResponse as Response>::try_from_parts` to parse the HTTP response body of [`NetworkPolicy::watch_network_policy_for_all_namespaces`] #[cfg(feature = "api")] #[derive(Debug)] pub enum WatchNetworkPolicyForAllNamespacesResponse { Ok(crate::v1_10::apimachinery::pkg::apis::meta::v1::WatchEvent<NetworkPolicy>), Other(Result<Option<serde_json::Value>, serde_json::Error>), } #[cfg(feature = "api")] impl crate::Response for WatchNetworkPolicyForAllNamespacesResponse { fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> { match status_code { http::StatusCode::OK => { let mut deserializer = serde_json::Deserializer::from_slice(buf).into_iter(); let (result, byte_offset) = match deserializer.next() { Some(Ok(value)) => (value, deserializer.byte_offset()), Some(Err(ref err)) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Some(Err(err)) => return Err(crate::ResponseError::Json(err)), None => return Err(crate::ResponseError::NeedMoreData), }; Ok((WatchNetworkPolicyForAllNamespacesResponse::Ok(result), byte_offset)) }, _ => { let (result, read) = if buf.is_empty() { (Ok(None), 0) } else { match serde_json::from_slice(buf) { Ok(value) => (Ok(Some(value)), buf.len()), Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData), Err(err) => (Err(err), 0), } }; Ok((WatchNetworkPolicyForAllNamespacesResponse::Other(result), read)) }, } } } // End extensions/v1beta1/NetworkPolicy impl crate::Resource for NetworkPolicy { fn api_version() -> &'static str { "extensions/v1beta1" } fn group() -> &'static str { "extensions" } fn kind() -> &'static str { "NetworkPolicy" } fn version() -> &'static str { "v1beta1" } } impl crate::Metadata for NetworkPolicy { type Ty = crate::v1_10::apimachinery::pkg::apis::meta::v1::ObjectMeta; fn metadata(&self) -> Option<&<Self as crate::Metadata>::Ty> { self.metadata.as_ref() } } impl<'de> serde::Deserialize<'de> for NetworkPolicy { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> { #[allow(non_camel_case_types)] enum Field { Key_api_version, Key_kind, Key_metadata, Key_spec, Other, } impl<'de> serde::Deserialize<'de> for Field { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> { struct Visitor; impl<'de> serde::de::Visitor<'de> for Visitor { type Value = Field; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "field identifier") } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error { Ok(match v { "apiVersion" => Field::Key_api_version, "kind" => Field::Key_kind, "metadata" => Field::Key_metadata, "spec" => Field::Key_spec, _ => Field::Other, }) } } deserializer.deserialize_identifier(Visitor) } } struct Visitor; impl<'de> serde::de::Visitor<'de> for Visitor { type Value = NetworkPolicy; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "struct NetworkPolicy") } fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> { let mut value_metadata: Option<crate::v1_10::apimachinery::pkg::apis::meta::v1::ObjectMeta> = None; let mut value_spec: Option<crate::v1_10::api::extensions::v1beta1::NetworkPolicySpec> = None; while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? { match key { Field::Key_api_version => { let value_api_version: String = serde::de::MapAccess::next_value(&mut map)?; if value_api_version != <Self::Value as crate::Resource>::api_version() { return Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(&value_api_version), &<Self::Value as crate::Resource>::api_version())); } }, Field::Key_kind => { let value_kind: String = serde::de::MapAccess::next_value(&mut map)?; if value_kind != <Self::Value as crate::Resource>::kind() { return Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(&value_kind), &<Self::Value as crate::Resource>::kind())); } }, Field::Key_metadata => value_metadata = serde::de::MapAccess::next_value(&mut map)?, Field::Key_spec => value_spec = serde::de::MapAccess::next_value(&mut map)?, Field::Other => { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; }, } } Ok(NetworkPolicy { metadata: value_metadata, spec: value_spec, }) } } deserializer.deserialize_struct( "NetworkPolicy", &[ "apiVersion", "kind", "metadata", "spec", ], Visitor, ) } } impl serde::Serialize for NetworkPolicy { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer { let mut state = serializer.serialize_struct( "NetworkPolicy", 2 + self.metadata.as_ref().map_or(0, |_| 1) + self.spec.as_ref().map_or(0, |_| 1), )?; serde::ser::SerializeStruct::serialize_field(&mut state, "apiVersion", <Self as crate::Resource>::api_version())?; serde::ser::SerializeStruct::serialize_field(&mut state, "kind", <Self as crate::Resource>::kind())?; if let Some(value) = &self.metadata { serde::ser::SerializeStruct::serialize_field(&mut state, "metadata", value)?; } if let Some(value) = &self.spec { serde::ser::SerializeStruct::serialize_field(&mut state, "spec", value)?; } serde::ser::SerializeStruct::end(state) } }
44.750476
211
0.582383
674108c2535e471670fe1cba33c66b6e04e2fdb9
3,743
//! Rust crate to help perform deferred execution of code logic. //! //! # Problems that `deferred` crate helps to solve //! Probably at some point in your project you will want to make a function that can have //! partitioned logic and you want to call each of that parts at some strictly defined time //! specified by you. //! ``` //! # #[macro_use] extern crate deferred; //! # use deferred::*; //! # fn main() { //! fn foo(v: i32) -> Deferred<i32> { //! deferred!(v, [ //! |c| state!(c.state() + 1), //! |c| foo2(c.state()).into(), //! |c| state!(c.state() + 2) //! ]) //! } //! //! fn foo2(v: i32) -> Deferred<i32> { //! deferred!(v, [ //! |c| state!(c.state() * 2), //! |c| state!(c.state() * 3) //! ]) //! } //! //! let d = foo(1); //! assert_eq!(d.state(), Some(&1)); //! let d = d.resume().unwrap(); //! assert_eq!(d.state(), Some(&2)); //! let d = d.resume().unwrap(); //! assert_eq!(d.state(), Some(&4)); //! let d = d.resume().unwrap(); //! assert_eq!(d.state(), Some(&12)); //! let d = d.resume().unwrap(); //! assert_eq!(d.state(), Some(&14)); //! assert_eq!(d.can_resume(), false); //! # } //! ``` //! //! You can think of it as staticaly defined `Promise`-like abstraction known in JavaScript or //! other languages with high abstraction of deferred code execution. //! //! # It is not based on threads //! Main reason that this crate was created is that when you work with WASM target, you cannot use //! `Futures` or threads but you still need to run some of your code asynchronously, most likely //! execute heavy/long calculations "in background" and you cannot make browser freeze. //! //! # Need to use undefined state type? Look, there is `Value` wrapper! //! Sometimes you cannot have the same context input and output types, for example: //! ```ignore //! fn foo(v: i32) -> Deferred<String> { //! deferred!(v, [ //! |c| state!(c.state() + 1), //! |c| state!(format!("{}", c.state())) //! ]) //! } //! //! let result: String = foo(42).consume(); //! ``` //! Code above gets `i32` as input and expects that at the end we get `String` value and it does //! not compile. You could solve it by making tuple with options of each types used in context //! inputs and outputs, like this: //! ``` //! # #[macro_use] extern crate deferred; //! # use deferred::*; //! # fn main() { //! type State = (Option<i32>, Option<String>); //! //! fn foo(v: i32) -> Deferred<State> { //! deferred!((Some(v), None), [ //! |c| state!((Some(c.state().0.unwrap() + 1), None)), //! |c| state!((None, Some(format!("{}", c.state().0.unwrap())))) //! ]) //! } //! //! let result = foo(41).consume().1.unwrap(); //! assert_eq!(&result, "42"); //! # } //! ``` //! but this looks ugly and gets even worse when you have much much more types to use - we do not //! want that. We can use `Value` type which is basically a boxed wrapper of any value (that means: //! you have to deal with a little runtime overhead because of storing and accessing value on heap). //! //! Here is how to use `Value` as state: //! ``` //! # #[macro_use] extern crate deferred; //! # use deferred::*; //! # fn main() { //! fn foo(v: i32) -> Deferred<Value> { //! deferred!(value!(v), [ //! |c| state!(value!(c.state().consume::<i32>() + 1)), //! |c| state!(value!(format!("{}", c.state().consume::<i32>()))) //! ]) //! } //! //! let result = foo(41).consume().consume::<String>(); //! assert_eq!(&result, "42"); //! # } //! ``` pub mod context; pub mod deferred; pub mod deferred_manager; mod macros; mod tests; pub mod value; pub use crate::context::*; pub use crate::deferred::*; pub use crate::deferred_manager::*; pub use crate::value::*;
33.419643
100
0.583222
11c8d1272fba13002a403a3df25756340af5d04a
22,500
use crate::background::Background; use crate::config::Config; use crate::droplets::Droplets; use crate::quad::Quad; use crate::render_gl::buffer::ArrayBuffer; use crate::render_gl::{ ColorBuffer, Error, FrameBuffer, Program, Shader, Texture, TextureLoadOptions, Viewport, }; use nalgebra as na; use nalgebra::{Matrix4, Orthographic3, Point3, Translation3, Vector2, Vector3, Vector4}; use ncollide2d::na::Isometry2; use ncollide2d::pipeline::{CollisionGroups, CollisionObjectSlabHandle, GeometricQueryType}; use ncollide2d::query::Proximity; use ncollide2d::shape::{Ball, ShapeHandle}; use ncollide2d::world::CollisionWorld; use rand::prelude::*; use std::rc::Rc; use std::time::Duration; const VIEW_DISTANCE: f32 = 10.0; const DROPLETS_PER_SECOND: usize = 50; const DROPLET_SIZE_GRAVITY_THRESHOLD: f32 = 5.0; const PRIVATE_GRAVITY_FORCE_FACTOR_Y: f32 = 0.25; const PRIVATE_GRAVITY_FORCE_FACTOR_X: f32 = 0.0; const DROP_VERT: &str = include_str!("../assets/shaders/drop.vert"); const DROP_FRAG: &str = include_str!("../assets/shaders/drop.frag"); const DROP_WIPE_VERT: &str = include_str!("../assets/shaders/drop_wipe.vert"); const DROP_WIPE_FRAG: &str = include_str!("../assets/shaders/drop_wipe.frag"); const COLORED_QUAD_VERT: &str = include_str!("../assets/shaders/colored_quad.vert"); const COLORED_QUAD_FRAG: &str = include_str!("../assets/shaders/colored_quad.frag"); const QUAD_VERT: &str = include_str!("../assets/shaders/quad.vert"); const FINAL_FRAG: &str = include_str!("../assets/shaders/final.frag"); fn load_shader(gl: &gl::Gl, vert_source: &str, frag_source: &str, debug_name: &str) -> Program { Program::from_shaders( &gl, &[ Shader::from_vert_source_str(&gl, vert_source).unwrap(), Shader::from_frag_source_str(&gl, frag_source).unwrap(), ], ) .map_err(|msg| Error::LinkError { message: msg, name: debug_name.to_string(), }) .unwrap() } pub struct Rain { gl: gl::Gl, droplet_size_range: (f32, f32), updates: Vec<(CollisionObjectSlabHandle, CollisionObjectSlabHandle)>, world: CollisionWorld<f32, usize>, collision_group: CollisionGroups, contacts_query: GeometricQueryType<f32>, viewport: Viewport, view_matrix: Matrix4<f32>, projection_matrix: Matrix4<f32>, time_accumulator: f64, pub(crate) droplets_accumulator: usize, pub(crate) droplets: Droplets, black_color_buffer: ColorBuffer, background_texture: Rc<Texture>, background_mask: Texture, background_buffer: Texture, background: Background, drop_quad: Quad, fullscreen_quad: Quad, drop_program: Program, drop_wipe_program: Program, colored_quad_program: Program, final_program: Program, frame_buffer: FrameBuffer, } impl Rain { pub fn new( gl: &gl::Gl, max_droplet_count: usize, droplet_size_range: (f32, f32), window_size: (u32, u32), config: &Config, ) -> Result<Self, failure::Error> { let droplets: Droplets = Droplets::with_capacity(max_droplet_count); let world = CollisionWorld::new(2.0); let collision_group = CollisionGroups::new(); let contacts_query = GeometricQueryType::Proximity(0.0); let viewport = Viewport::for_window(window_size.0 as i32, window_size.1 as i32); viewport.set_used(&gl); let view_matrix: Matrix4<f32> = (Translation3::<f32>::from(Point3::origin().coords) * Translation3::<f32>::from(Vector3::z() * VIEW_DISTANCE)) .inverse() .to_homogeneous(); let projection_matrix: Matrix4<f32> = Orthographic3::new( 0.0, window_size.0 as f32, 0.0, window_size.1 as f32, 0.01, 1000.0, ) .into_inner(); let background_texture = { let fallback_background = config.backgrounds_directory().join("background.jpg"); let path = config .cached_background() .unwrap_or_else(|| fallback_background.clone()); let mut options = TextureLoadOptions::rgb(); options.gen_mipmaps = true; let image = image::open(&path) .or_else(|_| image::open(&fallback_background)) .unwrap(); Texture::from_image(options, &gl, &image).unwrap() }; let texture_rc = Rc::<Texture>::new(background_texture); let drop_quad = Quad::default(&gl); let background = Background::new(&gl, texture_rc.clone(), window_size.0, window_size.1, 1.0)?; let drop_program = load_shader(&gl, DROP_VERT, DROP_FRAG, "drop"); let drop_wipe_program = load_shader(&gl, DROP_WIPE_VERT, DROP_WIPE_FRAG, "drop_wipe"); let colored_quad_program = load_shader(&gl, COLORED_QUAD_VERT, COLORED_QUAD_FRAG, "colored_quad"); let final_program = load_shader(&gl, QUAD_VERT, FINAL_FRAG, "final"); let background_mask = Texture::new(&gl, window_size.0, window_size.1)?; let background_buffer = Texture::new(&gl, window_size.0, window_size.1)?; let fullscreen_quad = Quad::new_with_size(&gl, 0.0, 0.0, window_size.1 as f32, window_size.0 as f32); let black_color_buffer = ColorBuffer::from_rgba(0.0, 0.0, 0.0, 1.0); let frame_buffer = FrameBuffer::new(&gl); { frame_buffer.bind(); frame_buffer.attach_texture(&background_mask); black_color_buffer.set_used(&gl); black_color_buffer.clear(&gl); frame_buffer.unbind(); } Ok(Rain { gl: gl.clone(), droplet_size_range, updates: Vec::<(CollisionObjectSlabHandle, CollisionObjectSlabHandle)>::new(), viewport, time_accumulator: 0.0, droplets_accumulator: DROPLETS_PER_SECOND, droplets, world, collision_group, contacts_query, view_matrix, projection_matrix, black_color_buffer, background_texture: texture_rc, background_mask, background_buffer, background, drop_quad, fullscreen_quad, drop_program, drop_wipe_program, colored_quad_program, final_program, frame_buffer, }) } pub fn update(&mut self, delta: &Duration) { let mut rng = rand::thread_rng(); self.time_accumulator += delta.as_secs_f64(); if self.time_accumulator > 1.0 { self.time_accumulator -= 1.0; self.droplets_accumulator += DROPLETS_PER_SECOND; } // Updates { Self::gravity_non_linear(&mut self.droplets, &mut self.world, &mut rng, delta); Self::trail( &mut self.droplets, &mut self.world, &mut rng, &self.collision_group, &self.contacts_query, &delta, ); self.updates.clear(); // We get an "allowance" of DROPLETS_PER_SECOND every second. // This part of the loop will attempt to spend them at random times, and is more likely to // spend them the more time has past. // TODO: Any better way to spend these more evenly? // TODO: What happens when budget > fps? if self.droplets_accumulator > 0 && rng.gen_bool(self.time_accumulator.max(0.0).min(1.0)) { if let Some((i, d)) = self.droplets.checkout() { d.pos = Vector2::new( rng.gen_range(0.0, self.viewport.w as f32), rng.gen_range(0.0, self.viewport.h as f32), ); d.size = rng.gen_range(self.droplet_size_range.0, self.droplet_size_range.1); let shape_handle = ShapeHandle::new(Ball::new(d.size * 0.5)); let handle = self .world .add( Isometry2::new(d.pos.clone_owned(), na::zero()), shape_handle, self.collision_group, self.contacts_query, i, ) .0; d.collision_handle = handle; self.droplets_accumulator -= 1; } } for ev in self.world.proximity_events().iter().collect::<Vec<_>>() { if ev.new_status == Proximity::Intersecting { if let (Some(obj1), Some(obj2)) = ( self.world.collision_object(ev.collider1), self.world.collision_object(ev.collider2), ) { let sphere1 = obj1.shape().local_bounding_sphere(); let sphere2 = obj2.shape().local_bounding_sphere(); let rad1 = sphere1.radius(); let rad2 = sphere2.radius(); let pair = if rad1 > rad2 { (ev.collider1, ev.collider2) } else if rad1 < rad2 { (ev.collider2, ev.collider1) } else if sphere1.center().y > sphere2.center().y { (ev.collider1, ev.collider2) } else { (ev.collider2, ev.collider1) }; self.updates.push(pair); } } } for (keep_handle, delete_handle) in self.updates.iter() { if let (Some(keep), Some(delete)) = self .world .collision_object_pair_mut(*keep_handle, *delete_handle) { let keep_droplet_index = *keep.data(); let delete_droplet_index = *delete.data(); let delete_droplet_size = self.droplets[delete_droplet_index].size; let keep_droplet = &mut self.droplets[keep_droplet_index]; // TODO: How much does a droplet grow when is absorbs another? keep_droplet.size = ((keep_droplet.size * 0.5).powf(3.0) + (delete_droplet_size * 0.5).powf(3.0)) .cbrt() * 2.0; keep.set_shape(ShapeHandle::new(Ball::new(keep_droplet.size * 0.5))); } } for (_, delete_handle) in self.updates.iter() { if let Some(delete) = self.world.collision_object(*delete_handle) { self.droplets.free(*delete.data()); self.world.remove(&[*delete_handle]); } } } } pub fn render(&self, delta: &Duration) { let matrix = &self.projection_matrix * &self.view_matrix; let resolution = Vector2::new(self.viewport.w as f32, self.viewport.h as f32); // Background pass { self.background.prepass( &self.gl, &self.view_matrix, &self.projection_matrix, &resolution, ); self.frame_buffer.bind(); self.frame_buffer.attach_texture(&self.background_buffer); self.background.render( &self.gl, &self.view_matrix, &self.projection_matrix, &resolution, ); self.frame_buffer.unbind(); } // Mask pass { self.frame_buffer.bind(); self.frame_buffer.attach_texture(&self.background_mask); unsafe { self.gl.BlendFuncSeparate( gl::SRC_ALPHA, gl::ONE_MINUS_SRC_ALPHA, gl::ZERO, gl::ONE, ); } { self.colored_quad_program.set_used(); if let Some(loc) = self.colored_quad_program.get_uniform_location("MVP") { self.colored_quad_program .set_uniform_matrix_4fv(loc, &matrix); } if let Some(loc) = self.colored_quad_program.get_uniform_location("Color") { self.colored_quad_program.set_uniform_4f( loc, &Vector4::new(0.0, 0.0, 0.0, 0.25 * delta.as_secs_f32()), ); } self.fullscreen_quad.render(&self.gl); } { self.drop_wipe_program.set_used(); if let Some(loc) = self.drop_wipe_program.get_uniform_location("Resolution") { self.drop_wipe_program.set_uniform_2f(loc, &resolution); } if let Some(loc) = self.drop_wipe_program.get_uniform_location("MVP") { self.drop_wipe_program.set_uniform_matrix_4fv(loc, &matrix); } self.render_droplets(&self.gl, &self.drop_quad, &self.droplets); } unsafe { self.gl.BlendFunc(gl::SRC_ALPHA, gl::ONE_MINUS_SRC_ALPHA); } self.frame_buffer.unbind(); } // Merge pass { self.final_program.set_used(); self.black_color_buffer.set_used(&self.gl); self.black_color_buffer.clear(&self.gl); if let Some(loc) = self.final_program.get_uniform_location("MVP") { self.final_program.set_uniform_matrix_4fv(loc, &matrix); } if let Some(loc) = self.final_program.get_uniform_location("Texture0") { self.background_buffer.bind_at(0); self.final_program.set_uniform_1i(loc, 0); } if let Some(loc) = self.final_program.get_uniform_location("Texture1") { self.background_texture.bind_at(1); self.final_program.set_uniform_1i(loc, 1); } if let Some(loc) = self.final_program.get_uniform_location("Mask") { self.background_mask.bind_at(2); self.final_program.set_uniform_1i(loc, 2); } self.fullscreen_quad.render(&self.gl); } { self.drop_program.set_used(); if let Some(loc) = self.drop_program.get_uniform_location("Resolution") { self.drop_program.set_uniform_2f(loc, &resolution); } if let Some(loc) = self.drop_program.get_uniform_location("MVP") { self.drop_program.set_uniform_matrix_4fv(loc, &matrix); } if let Some(loc) = self.drop_program.get_uniform_location("Texture") { self.background_texture.bind_at(0); self.drop_program.set_uniform_1i(loc, 0); } self.render_droplets(&self.gl, &self.drop_quad, &self.droplets); } } fn render_droplets(&self, gl: &gl::Gl, quad: &Quad, droplets: &Droplets) { quad.vao.bind(); let instance_vbo: ArrayBuffer = ArrayBuffer::new(&gl); instance_vbo.bind(); let offsets: Vec<na::Vector3<f32>> = droplets .into_iter() .filter(|d| !d.deleted) .map(|d| na::Vector3::new(d.pos.x, d.pos.y, d.size)) .collect(); instance_vbo.static_draw_data(&offsets); unsafe { gl.EnableVertexAttribArray(3); gl.VertexAttribPointer( 3, 3, // the number of components per generic vertex attribute gl::FLOAT, // data type gl::FALSE, std::mem::size_of::<na::Vector3<f32>>() as gl::types::GLint, std::ptr::null(), ); } instance_vbo.unbind(); unsafe { gl.VertexAttribDivisor(3, 1); } unsafe { gl.DrawElementsInstanced( gl::TRIANGLES, 6, gl::UNSIGNED_BYTE, ::std::ptr::null(), offsets.len() as i32, ); } quad.vao.unbind(); } fn gravity_non_linear( droplets: &mut Droplets, world: &mut CollisionWorld<f32, usize>, rng: &mut ThreadRng, dt: &Duration, ) { let fps = 1.0 / dt.as_secs_f32(); let gravity_y = PRIVATE_GRAVITY_FORCE_FACTOR_Y * dt.as_secs_f32(); for i in 0..droplets.len() { let mut delete_index: Option<usize> = None; { let droplet = &mut droplets[i]; if droplet.deleted || droplet.size < DROPLET_SIZE_GRAVITY_THRESHOLD { continue; } if droplet.size < DROPLET_SIZE_GRAVITY_THRESHOLD && droplet.seed > 0 { droplet.slowing = true; } let movement_probability = 0.01 * dt.as_secs_f64(); if droplet.seed <= 0 { droplet.seed = (droplet.size * 0.5 * rng.gen_range(0.0, 1.0) * fps).floor() as i32; droplet.skipping = !droplet.skipping; droplet.slowing = true; } droplet.seed -= 1; assert!(droplet.size >= 1.0); if droplet.speed.y > 0.0 { if droplet.slowing { droplet.speed *= 0.9; if droplet.speed.y < gravity_y { droplet.slowing = false; } } else if droplet.skipping { droplet.speed.y = gravity_y; droplet.speed.x = PRIVATE_GRAVITY_FORCE_FACTOR_X; } else { droplet.speed.y += gravity_y * droplet.size; droplet.speed.x += PRIVATE_GRAVITY_FORCE_FACTOR_X * droplet.size; } } else if rng.gen_bool((1.0 - 1.0 / droplet.size as f64) * movement_probability) { droplet.speed.y = gravity_y; droplet.speed.x = PRIVATE_GRAVITY_FORCE_FACTOR_X; } // if this.options.gravityAngleVariance != 0 { // droplet.x_speed += // (rnd.gen() * 2 - 1) * droplet.y_speed * this.options.gravityAngleVariance // } droplet.pos.y -= droplet.speed.y; droplet.pos.x += droplet.speed.x; if droplet.pos.y + droplet.size * 0.5 < 0.0 { delete_index = Some(i); world.remove(&[droplet.collision_handle]); } else if droplet.speed.x != 0.0 || droplet.speed.y != 0.0 { let handle = droplet.collision_handle; let object = world.get_mut(handle).unwrap(); object.set_position(Isometry2::new(droplet.pos.clone_owned(), na::zero())); } } if let Some(delete_index) = delete_index { droplets.free(delete_index); } } world.update(); } fn trail( droplets: &mut Droplets, world: &mut CollisionWorld<f32, usize>, rng: &mut ThreadRng, collision_group: &CollisionGroups, contacts_query: &GeometricQueryType<f32>, dt: &Duration, ) { let gravity_y = PRIVATE_GRAVITY_FORCE_FACTOR_Y * dt.as_secs_f32(); for i in 0..droplets.len() { let pos; let size; { let droplet = &mut droplets[i]; if droplet.speed.y <= gravity_y { continue; } if droplet.size >= 6.0 && (droplet.last_trail_y.is_none() || (droplet.last_trail_y.unwrap_or(0.0) - droplet.pos.y) >= rng.gen_range(0.1, 1.0) * 200.0) { droplet.last_trail_y = Some(droplet.pos.y); size = rng.gen_range(0.9, 1.1) * droplet.size * 0.25; pos = Vector2::new( droplet.pos.x + rng.gen_range(-1.0, 1.0), droplet.pos.y + droplet.size * 0.5 + droplet.speed.y + size * 0.5, ); droplet.size = ((droplet.size * 0.5).powf(3.0) - (size * 0.5).powf(3.0)).cbrt() * 2.0; if let Some(droplet_collision) = world.get_mut(droplet.collision_handle) { droplet_collision.set_shape(ShapeHandle::new(Ball::new(droplet.size * 0.5))) } } else { continue; } } if let Some((i, d)) = droplets.checkout() { d.pos = pos; d.size = size; let shape_handle = ShapeHandle::new(Ball::new(d.size * 0.5)); let handle = world .add( Isometry2::new(d.pos.clone_owned(), na::zero()), shape_handle, *collision_group, *contacts_query, i, ) .0; d.collision_handle = handle; } } } }
34.246575
108
0.500844
8a52dfe1cdc6bfc8bf4b8ddd00bfb00d1db97032
2,537
use rodio::*; use std::fs::File; use std::io::BufReader; use std::thread::sleep; use std::time::Duration; use std::collections::HashMap; pub struct AudioContext { tracks: Vec<Sink> } enum ThreadStatus { Playing, Paused, Inactive } /// An unsigned integer between 0 and 100 that specifies volume to play an audio track at. pub type Volume = u8; fn volume_bounds_check(vol: Volume) -> bool { vol <= 100 } impl AudioContext { pub fn new() -> AudioContext { AudioContext { tracks: Vec::new() } } /// Loads an audio track from a designated file, and maps it to a designated string identifier. Returns Err(()) on failure to open the file. pub fn load(&mut self, path: &str, name: &str) -> Result<(), ()> { unimplemented!() } /// Plays a specific audio track on a newly generated thread. Returns Err(()) if sound is not loaded. On success, returns a thread identifier, /// represented by a u64. pub fn play_on_new_thread(&mut self, sound_name: &str) -> Result<u64, ()> { unimplemented!() } /// Plays a specific audio track on an already existing thread. Fails if sound or thread is uninitialized pub fn play_once_thread_is_available(&mut self, sound_name: &str, thread: u64) -> Result<(), ()> { unimplemented!() } /// Unlinks a certain sound track identifier and unloads the sound from memory. May cause unexpected results if a thread is using the track. pub fn unload(&mut self, name: &str) -> Result<(), ()> { unimplemented!() } /// Stops playback on a thread, but does not set it to inactive pub fn pause(&mut self, thread: u64) -> Result<(), ()> { unimplemented!() } /// Resumes audio playback on a thread if it has been paused pub fn resume(&mut self, thread: u64) -> Result<(), ()> { unimplemented!() } /// Set a volume for a specific thread. Volume must be between 0 and 100 (inclusive) pub fn set_volume(&mut self, thread: u64, volume: Volume) -> Result<(), ()> { unimplemented!() } /// Forcibly stops thread and marks it inactive pub fn stop_thread(&mut self, thread: u64) -> Result<(), ()> { unimplemented!() } /// Cleans up any unused threads. Returns a new mapping of threads that were still active to their new identifiers. pub fn cleanup(&mut self) -> HashMap<u64, u64> { unimplemented!() } /// Not finished pub fn set_audio_device(&mut self) { unimplemented!() } }
35.236111
146
0.632243
56f3ef3db25d131080954bab94cf10279ed4f222
780
#![feature(const_fn)] type Field1 = i32; type Field2 = f32; type Field3 = i64; union DummyUnion { field1: Field1, field2: Field2, field3: Field3, } const FLOAT1_AS_I32: i32 = 1065353216; const UNION: DummyUnion = DummyUnion { field1: FLOAT1_AS_I32 }; const fn read_field1() -> Field1 { const FIELD1: Field1 = unsafe { UNION.field1 }; FIELD1 } const fn read_field2() -> Field2 { const FIELD2: Field2 = unsafe { UNION.field2 }; FIELD2 } const fn read_field3() -> Field3 { const FIELD3: Field3 = unsafe { UNION.field3 }; //~^ ERROR it is undefined behavior to use this value FIELD3 } fn main() { assert_eq!(read_field1(), FLOAT1_AS_I32); assert_eq!(read_field2(), 1.0); assert_eq!(read_field3(), unsafe { UNION.field3 }); }
21.081081
63
0.660256
11f2ca35207a84f63cb9f0aebf194549c8413045
1,614
use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode}; use ::RegType::*; use ::instruction_def::*; use ::Operand::*; use ::Reg::*; use ::RegScale::*; fn phminposuw_1() { run_test(&Instruction { mnemonic: Mnemonic::PHMINPOSUW, operand1: Some(Direct(XMM3)), operand2: Some(Direct(XMM4)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 65, 220], OperandSize::Dword) } fn phminposuw_2() { run_test(&Instruction { mnemonic: Mnemonic::PHMINPOSUW, operand1: Some(Direct(XMM2)), operand2: Some(IndirectScaledIndexedDisplaced(EBX, EBX, Two, 572675103, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 65, 148, 91, 31, 84, 34, 34], OperandSize::Dword) } fn phminposuw_3() { run_test(&Instruction { mnemonic: Mnemonic::PHMINPOSUW, operand1: Some(Direct(XMM3)), operand2: Some(Direct(XMM1)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 65, 217], OperandSize::Qword) } fn phminposuw_4() { run_test(&Instruction { mnemonic: Mnemonic::PHMINPOSUW, operand1: Some(Direct(XMM0)), operand2: Some(IndirectScaledIndexed(RDX, RDX, Four, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[102, 15, 56, 65, 4, 146], OperandSize::Qword) }
67.25
389
0.712515
6915bfcaa0b2b25853b5fff3261063da372b043d
584
use super::state::*; use components::{ backgrounds::dom::render_backgrounds_raw, module::_common::play::prelude::DomRenderable, }; use dominator::{html, Dom}; use std::rc::Rc; use super::game::{dom::render as render_game, state::Game}; impl DomRenderable for Base { fn render(state: Rc<Base>) -> Dom { html!("empty-fragment", { .property("slot", "main") .children(&mut [ render_backgrounds_raw(&state.backgrounds, state.theme_id, None), render_game(Game::new(state.clone())), ]) }) } }
27.809524
92
0.589041
1150346f752faa006071e119a620a6114c8c1d0f
1,241
// Note: the borrowck analysis is currently flow-insensitive. // Therefore, some of these errors are marked as spurious and could be // corrected by a simple change to the analysis. The others are // either genuine or would require more advanced changes. The latter // cases are noted. #![feature(box_syntax)] fn borrow(_v: &isize) {} fn borrow_mut(_v: &mut isize) {} fn cond() -> bool { panic!() } fn for_func<F>(_f: F) where F: FnOnce() -> bool { panic!() } fn produce<T>() -> T { panic!(); } fn inc(v: &mut Box<isize>) { *v = box (**v + 1); } fn pre_freeze_cond() { // In this instance, the freeze is conditional and starts before // the mut borrow. let u = box 0; let mut v: Box<_> = box 3; let mut _w = &u; if cond() { _w = &v; } borrow_mut(&mut *v); //~ ERROR cannot borrow _w.use_ref(); } fn pre_freeze_else() { // In this instance, the freeze and mut borrow are on separate sides // of the if. let u = box 0; let mut v: Box<_> = box 3; let mut _w = &u; if cond() { _w = &v; } else { borrow_mut(&mut *v); } _w.use_ref(); } fn main() {} trait Fake { fn use_mut(&mut self) { } fn use_ref(&self) { } } impl<T> Fake for T { }
23.865385
72
0.585818
cc6ee727ef2b87722b4fc99e960e2434f8be4369
1,853
//! Validation-related functions to work with the validator crate. use crate::errors::ApiError; use actix_web::web::Json; use validator::{Validate, ValidationErrors}; /// Validate a struct and collect and return the errors pub fn validate<T>(params: &Json<T>) -> Result<(), ApiError> where T: Validate, { match params.validate() { Ok(()) => Ok(()), Err(error) => Err(ApiError::ValidationError(collect_errors(error))), } } /// Collect ValidationErrors and return a vector of the messages /// Adds a default_error when none is supplied fn collect_errors(error: ValidationErrors) -> Vec<String> { error .field_errors() .into_iter() .map(|error| { let default_error = format!("{} is required", error.0); error.1[0] .message .as_ref() .unwrap_or(&std::borrow::Cow::Owned(default_error)) .to_string() }) .collect() } #[cfg(test)] mod tests { use super::*; use serde_json::json; #[derive(Debug, Deserialize, Serialize, Validate)] pub struct TestRequest { #[validate(length( min = 3, message = "name is required and must be at least 3 characters" ))] pub name: String, } fn get_test_request() -> TestRequest { let json = json!({"name": "a"}); serde_json::from_value::<TestRequest>(json).unwrap() } #[test] fn it_collects_errors() { let request = get_test_request(); let errors = request.validate().unwrap_err(); let response = collect_errors(errors); assert!(response.len() > 0); } /* #[test] fn it_validates() { let request = get_test_request(); let response = validate(&Json(request)).unwrap_err(); let expected_error = ApiError::ValidationError(vec![ "name is required and must be at least 3 characters".to_string(), ]); assert_eq!(response, expected_error); } */ }
25.040541
72
0.641123
5098dea4253e57326adb2ef7c4373d126e10463d
7,321
use std::sync::atomic::{AtomicUsize, Ordering}; use diesel; use diesel::prelude::LoadDsl; use diesel::prelude::LimitDsl; use diesel::prelude::ExecuteDsl; use diesel::prelude::FilterDsl; use diesel::ExpressionMethods; use rocket::request; use rocket::request::Request; use rocket::request::FromRequest; use rocket::http::Status; use rocket::outcome::IntoOutcome; use time; use serde_json; use super::db::schema::users; use super::db::schema::user_infos; use super::db::schema::posts; use super::error::ModelError; use super::db::conn::Pool; use super::db::conn::init_pool; use super::db::models::NewUser; use super::db::models::NewPost; use super::db::models::User; use super::db::models::UserInfo; use super::db::models::Post; use super::settings::ENV; use super::settings::AUTH_TOKEN_NAME; use rocket::http::{Cookie, Cookies}; use std::collections::hash_map::DefaultHasher; use std::hash::Hash; use std::hash::Hasher; pub struct Model { pool: Pool, counter: AtomicUsize, } #[derive(Hash, Debug)] struct TokenHash<'a> { user_id: i32, expires: i64, extra_data: &'a str, secret: u64, } #[derive(Hash)] struct PasswordHash<'a> { name: &'a str, password: &'a str, secret: u64, } #[derive(Serialize, Deserialize, Debug)] pub struct AuthToken { user_id: i32, expires: i64, hash: u64, } #[derive(Serialize)] pub struct AuthInfo { user_id: i32, expires: i64, } pub type ModelResult<T> = Result<T, ModelError>; pub fn hash<T>(v: &T) -> u64 where T: Hash, { let mut s = DefaultHasher::new(); v.hash(&mut s); s.finish() } impl Model { pub fn new() -> Model { Model { pool: init_pool(&ENV.database_url), counter: AtomicUsize::new(0), } } pub fn inc(&self) { self.counter.fetch_add(1, Ordering::Relaxed); } pub fn get(&self) -> usize { self.counter.load(Ordering::Relaxed) } pub fn user(&self, name: &str) -> ModelResult<UserInfo> { let conn = self.pool.get()?; user_infos::table .filter(user_infos::name.eq(name)) .limit(1) .load::<UserInfo>(&*conn)? .pop() .ok_or(ModelError::UserNotFound) } pub fn users(&self) -> ModelResult<Vec<UserInfo>> { let conn = self.pool.get()?; Ok(user_infos::table.load::<UserInfo>(&*conn)?) } pub fn register(&self, name: &str, email: &str, password: &str) -> ModelResult<()> { let conn = self.pool.get()?; let user = users::table .filter(users::name.eq(name)) .limit(1) .load::<User>(&*conn)?; if user.len() > 0 { return Err(ModelError::UserExists); }; let password_hash = hash(&PasswordHash { name: name, password: password, secret: ENV.secret, }) as i64; diesel::insert(&NewUser { name: name, email: email, password_hash: password_hash, }).into(users::table) .execute(&*conn)?; Ok(()) } pub fn login( &self, name: &str, password: &str, extra_data: &str, duration: u32, ) -> ModelResult<AuthToken> { let conn = self.pool.get()?; let user: User = users::table .filter(users::name.eq(name)) .limit(1) .load::<User>(&*conn)? .pop() .ok_or(ModelError::UserNotFound)?; let password_hash = hash(&PasswordHash { name: name, password: password, secret: ENV.secret, }) as i64; if user.password_hash != password_hash { return Err(ModelError::PasswordWrong); }; let expires = time::now_utc().to_timespec().sec + duration as i64; let token_hash = hash(&TokenHash { user_id: user.id, expires: expires, extra_data: extra_data, secret: ENV.secret, }); Ok(AuthToken { user_id: user.id, expires: expires, hash: token_hash, }) } pub fn authorize(token: AuthToken, extra_data: &str) -> ModelResult<AuthInfo> { if time::now_utc().to_timespec().sec > token.expires { return Err(ModelError::AuthTokenExpired); }; let token_hash = hash(&TokenHash { user_id: token.user_id, expires: token.expires, extra_data: extra_data, secret: ENV.secret, }); if token.hash != token_hash { return Err(ModelError::AuthTokenInvalid); } Ok(AuthInfo { user_id: token.user_id, expires: token.expires, }) } pub fn get_post(&self, post_id: i32) -> ModelResult<Post> { let conn = self.pool.get()?; posts::table .filter(posts::id.eq(post_id)) .limit(1) .load::<Post>(&*conn)? .pop() .ok_or(ModelError::PostNotFound) } pub fn new_post(&self, auth: &AuthInfo, title: &str, body: &str) -> ModelResult<()> { let conn = self.pool.get()?; let timestamp = time::now_utc().to_timespec().sec; diesel::insert(&NewPost { user_id: auth.user_id, created: timestamp, edited: timestamp, title: title, body: body, }).into(posts::table) .execute(&*conn)?; Ok(()) } pub fn edit_post( &self, auth: &AuthInfo, post_id: i32, title: &str, body: &str, ) -> ModelResult<()> { let conn = self.pool.get()?; let post = self.get_post(post_id)?; if post.user_id != auth.user_id { return Err(ModelError::AccessDenied); }; // TODO: implement roles let timestamp = time::now_utc().to_timespec().sec; diesel::update(posts::table) .set(&Post { edited: timestamp, title: title.to_string(), body: body.to_string(), ..post }) .execute(&*conn)?; Ok(()) } pub fn posts(&self) -> ModelResult<Vec<Post>> { let conn = self.pool.get()?; Ok(posts::table.load::<Post>(&*conn)?) } } impl<'a, 'r> FromRequest<'a, 'r> for AuthInfo { type Error = ModelError; fn from_request(request: &'a Request) -> request::Outcome<Self, Self::Error> { (|| { let cookie = request.cookies().get_private(AUTH_TOKEN_NAME).ok_or( ModelError::AuthTokenNotFound, )?; let token: AuthToken = serde_json::from_str(cookie.value())?; Model::authorize(token, "") })().into_outcome(Status::Unauthorized) } } pub fn set_auth_cookie( model: &Model, cookies: &mut Cookies, name: &str, password: &str, extra_data: &str, duration: u32, ) -> ModelResult<AuthInfo> { let token = model.login(name, password, extra_data, duration)?; cookies.add_private(Cookie::new(AUTH_TOKEN_NAME, serde_json::to_string(&token)?)); Model::authorize(token, extra_data) } pub fn clear_auth_cookie(cookies: &mut Cookies) { cookies.remove_private(Cookie::named(AUTH_TOKEN_NAME)); }
27.942748
89
0.553067
e2259ac849bb9a75aa5fab9ce34666bdf7740185
302
// pub struct VariableTable { // pub variable_count: u16, // pub variables: Vec<Variable>, // } // #[derive(Debug, Copy, Clone)] // pub struct Variable { // pub start_pc: u16, // pub length: u16, // pub name_index: u16, // pub descriptor_index: u16, // pub index: u16, // }
23.230769
36
0.586093
d93165f3f52968171963c36559eaac5f70404fd6
270
//! This module provides important and useful traits to help to format tokens and nodes //! when implementing the [crate::ToFormatElement] trait. pub use crate::formatter_traits::{ FormatOptionalTokenAndNode as _, FormatTokenAndNode as _, IntoFormatResult as _, };
38.571429
87
0.777778
6229988bfda3ca7bd4f7fd9ffcf93e073d8c3e17
54
pub mod calculus; pub mod expression; pub mod maf;
13.5
20
0.722222
e2ceb5846767720ebcd2c3e882fe604c3a04e465
227,137
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum ExceptionCause { #[allow(missing_docs)] // documentation missing in model InsufficientS3BucketPolicy, #[allow(missing_docs)] // documentation missing in model S3AccessDenied, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for ExceptionCause { fn from(s: &str) -> Self { match s { "InsufficientS3BucketPolicy" => ExceptionCause::InsufficientS3BucketPolicy, "S3AccessDenied" => ExceptionCause::S3AccessDenied, other => ExceptionCause::Unknown(other.to_owned()), } } } impl std::str::FromStr for ExceptionCause { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(ExceptionCause::from(s)) } } impl ExceptionCause { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { ExceptionCause::InsufficientS3BucketPolicy => "InsufficientS3BucketPolicy", ExceptionCause::S3AccessDenied => "S3AccessDenied", ExceptionCause::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &["InsufficientS3BucketPolicy", "S3AccessDenied"] } } impl AsRef<str> for ExceptionCause { fn as_ref(&self) -> &str { self.as_str() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum ResourceType { #[allow(missing_docs)] // documentation missing in model Asset, #[allow(missing_docs)] // documentation missing in model DataSet, #[allow(missing_docs)] // documentation missing in model EventAction, #[allow(missing_docs)] // documentation missing in model Job, #[allow(missing_docs)] // documentation missing in model Revision, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for ResourceType { fn from(s: &str) -> Self { match s { "ASSET" => ResourceType::Asset, "DATA_SET" => ResourceType::DataSet, "EVENT_ACTION" => ResourceType::EventAction, "JOB" => ResourceType::Job, "REVISION" => ResourceType::Revision, other => ResourceType::Unknown(other.to_owned()), } } } impl std::str::FromStr for ResourceType { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(ResourceType::from(s)) } } impl ResourceType { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { ResourceType::Asset => "ASSET", ResourceType::DataSet => "DATA_SET", ResourceType::EventAction => "EVENT_ACTION", ResourceType::Job => "JOB", ResourceType::Revision => "REVISION", ResourceType::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &["ASSET", "DATA_SET", "EVENT_ACTION", "JOB", "REVISION"] } } impl AsRef<str> for ResourceType { fn as_ref(&self) -> &str { self.as_str() } } /// <p>What occurs to start an action.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct Event { /// <p>What occurs to start the revision publish action.</p> pub revision_published: std::option::Option<crate::model::RevisionPublished>, } impl Event { /// <p>What occurs to start the revision publish action.</p> pub fn revision_published(&self) -> std::option::Option<&crate::model::RevisionPublished> { self.revision_published.as_ref() } } impl std::fmt::Debug for Event { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("Event"); formatter.field("revision_published", &self.revision_published); formatter.finish() } } /// See [`Event`](crate::model::Event) pub mod event { /// A builder for [`Event`](crate::model::Event) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) revision_published: std::option::Option<crate::model::RevisionPublished>, } impl Builder { /// <p>What occurs to start the revision publish action.</p> pub fn revision_published(mut self, input: crate::model::RevisionPublished) -> Self { self.revision_published = Some(input); self } /// <p>What occurs to start the revision publish action.</p> pub fn set_revision_published( mut self, input: std::option::Option<crate::model::RevisionPublished>, ) -> Self { self.revision_published = input; self } /// Consumes the builder and constructs a [`Event`](crate::model::Event) pub fn build(self) -> crate::model::Event { crate::model::Event { revision_published: self.revision_published, } } } } impl Event { /// Creates a new builder-style object to manufacture [`Event`](crate::model::Event) pub fn builder() -> crate::model::event::Builder { crate::model::event::Builder::default() } } /// <p>Information about the published revision.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct RevisionPublished { /// <p>The data set ID of the published revision.</p> pub data_set_id: std::option::Option<std::string::String>, } impl RevisionPublished { /// <p>The data set ID of the published revision.</p> pub fn data_set_id(&self) -> std::option::Option<&str> { self.data_set_id.as_deref() } } impl std::fmt::Debug for RevisionPublished { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("RevisionPublished"); formatter.field("data_set_id", &self.data_set_id); formatter.finish() } } /// See [`RevisionPublished`](crate::model::RevisionPublished) pub mod revision_published { /// A builder for [`RevisionPublished`](crate::model::RevisionPublished) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) data_set_id: std::option::Option<std::string::String>, } impl Builder { /// <p>The data set ID of the published revision.</p> pub fn data_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.data_set_id = Some(input.into()); self } /// <p>The data set ID of the published revision.</p> pub fn set_data_set_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.data_set_id = input; self } /// Consumes the builder and constructs a [`RevisionPublished`](crate::model::RevisionPublished) pub fn build(self) -> crate::model::RevisionPublished { crate::model::RevisionPublished { data_set_id: self.data_set_id, } } } } impl RevisionPublished { /// Creates a new builder-style object to manufacture [`RevisionPublished`](crate::model::RevisionPublished) pub fn builder() -> crate::model::revision_published::Builder { crate::model::revision_published::Builder::default() } } /// <p>What occurs after a certain event.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct Action { /// <p>Details for the export revision to Amazon S3 action.</p> pub export_revision_to_s3: std::option::Option<crate::model::AutoExportRevisionToS3RequestDetails>, } impl Action { /// <p>Details for the export revision to Amazon S3 action.</p> pub fn export_revision_to_s3( &self, ) -> std::option::Option<&crate::model::AutoExportRevisionToS3RequestDetails> { self.export_revision_to_s3.as_ref() } } impl std::fmt::Debug for Action { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("Action"); formatter.field("export_revision_to_s3", &self.export_revision_to_s3); formatter.finish() } } /// See [`Action`](crate::model::Action) pub mod action { /// A builder for [`Action`](crate::model::Action) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) export_revision_to_s3: std::option::Option<crate::model::AutoExportRevisionToS3RequestDetails>, } impl Builder { /// <p>Details for the export revision to Amazon S3 action.</p> pub fn export_revision_to_s3( mut self, input: crate::model::AutoExportRevisionToS3RequestDetails, ) -> Self { self.export_revision_to_s3 = Some(input); self } /// <p>Details for the export revision to Amazon S3 action.</p> pub fn set_export_revision_to_s3( mut self, input: std::option::Option<crate::model::AutoExportRevisionToS3RequestDetails>, ) -> Self { self.export_revision_to_s3 = input; self } /// Consumes the builder and constructs a [`Action`](crate::model::Action) pub fn build(self) -> crate::model::Action { crate::model::Action { export_revision_to_s3: self.export_revision_to_s3, } } } } impl Action { /// Creates a new builder-style object to manufacture [`Action`](crate::model::Action) pub fn builder() -> crate::model::action::Builder { crate::model::action::Builder::default() } } /// <p>Details of the operation to be performed by the job.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct AutoExportRevisionToS3RequestDetails { /// <p>Encryption configuration for the auto export job.</p> pub encryption: std::option::Option<crate::model::ExportServerSideEncryption>, /// <p>A revision destination is the Amazon S3 bucket folder destination to where the export will be sent.</p> pub revision_destination: std::option::Option<crate::model::AutoExportRevisionDestinationEntry>, } impl AutoExportRevisionToS3RequestDetails { /// <p>Encryption configuration for the auto export job.</p> pub fn encryption(&self) -> std::option::Option<&crate::model::ExportServerSideEncryption> { self.encryption.as_ref() } /// <p>A revision destination is the Amazon S3 bucket folder destination to where the export will be sent.</p> pub fn revision_destination( &self, ) -> std::option::Option<&crate::model::AutoExportRevisionDestinationEntry> { self.revision_destination.as_ref() } } impl std::fmt::Debug for AutoExportRevisionToS3RequestDetails { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("AutoExportRevisionToS3RequestDetails"); formatter.field("encryption", &self.encryption); formatter.field("revision_destination", &self.revision_destination); formatter.finish() } } /// See [`AutoExportRevisionToS3RequestDetails`](crate::model::AutoExportRevisionToS3RequestDetails) pub mod auto_export_revision_to_s3_request_details { /// A builder for [`AutoExportRevisionToS3RequestDetails`](crate::model::AutoExportRevisionToS3RequestDetails) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) encryption: std::option::Option<crate::model::ExportServerSideEncryption>, pub(crate) revision_destination: std::option::Option<crate::model::AutoExportRevisionDestinationEntry>, } impl Builder { /// <p>Encryption configuration for the auto export job.</p> pub fn encryption(mut self, input: crate::model::ExportServerSideEncryption) -> Self { self.encryption = Some(input); self } /// <p>Encryption configuration for the auto export job.</p> pub fn set_encryption( mut self, input: std::option::Option<crate::model::ExportServerSideEncryption>, ) -> Self { self.encryption = input; self } /// <p>A revision destination is the Amazon S3 bucket folder destination to where the export will be sent.</p> pub fn revision_destination( mut self, input: crate::model::AutoExportRevisionDestinationEntry, ) -> Self { self.revision_destination = Some(input); self } /// <p>A revision destination is the Amazon S3 bucket folder destination to where the export will be sent.</p> pub fn set_revision_destination( mut self, input: std::option::Option<crate::model::AutoExportRevisionDestinationEntry>, ) -> Self { self.revision_destination = input; self } /// Consumes the builder and constructs a [`AutoExportRevisionToS3RequestDetails`](crate::model::AutoExportRevisionToS3RequestDetails) pub fn build(self) -> crate::model::AutoExportRevisionToS3RequestDetails { crate::model::AutoExportRevisionToS3RequestDetails { encryption: self.encryption, revision_destination: self.revision_destination, } } } } impl AutoExportRevisionToS3RequestDetails { /// Creates a new builder-style object to manufacture [`AutoExportRevisionToS3RequestDetails`](crate::model::AutoExportRevisionToS3RequestDetails) pub fn builder() -> crate::model::auto_export_revision_to_s3_request_details::Builder { crate::model::auto_export_revision_to_s3_request_details::Builder::default() } } /// <p>A revision destination is the Amazon S3 bucket folder destination to where the export will be sent.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct AutoExportRevisionDestinationEntry { /// <p>The S3 bucket that is the destination for the event action.</p> pub bucket: std::option::Option<std::string::String>, /// <p>A string representing the pattern for generated names of the individual assets in the revision. For more information about key patterns, see <a href="https://docs.aws.amazon.com/data-exchange/latest/userguide/jobs.html#revision-export-keypatterns">Key patterns when exporting revisions</a>.</p> pub key_pattern: std::option::Option<std::string::String>, } impl AutoExportRevisionDestinationEntry { /// <p>The S3 bucket that is the destination for the event action.</p> pub fn bucket(&self) -> std::option::Option<&str> { self.bucket.as_deref() } /// <p>A string representing the pattern for generated names of the individual assets in the revision. For more information about key patterns, see <a href="https://docs.aws.amazon.com/data-exchange/latest/userguide/jobs.html#revision-export-keypatterns">Key patterns when exporting revisions</a>.</p> pub fn key_pattern(&self) -> std::option::Option<&str> { self.key_pattern.as_deref() } } impl std::fmt::Debug for AutoExportRevisionDestinationEntry { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("AutoExportRevisionDestinationEntry"); formatter.field("bucket", &self.bucket); formatter.field("key_pattern", &self.key_pattern); formatter.finish() } } /// See [`AutoExportRevisionDestinationEntry`](crate::model::AutoExportRevisionDestinationEntry) pub mod auto_export_revision_destination_entry { /// A builder for [`AutoExportRevisionDestinationEntry`](crate::model::AutoExportRevisionDestinationEntry) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) bucket: std::option::Option<std::string::String>, pub(crate) key_pattern: std::option::Option<std::string::String>, } impl Builder { /// <p>The S3 bucket that is the destination for the event action.</p> pub fn bucket(mut self, input: impl Into<std::string::String>) -> Self { self.bucket = Some(input.into()); self } /// <p>The S3 bucket that is the destination for the event action.</p> pub fn set_bucket(mut self, input: std::option::Option<std::string::String>) -> Self { self.bucket = input; self } /// <p>A string representing the pattern for generated names of the individual assets in the revision. For more information about key patterns, see <a href="https://docs.aws.amazon.com/data-exchange/latest/userguide/jobs.html#revision-export-keypatterns">Key patterns when exporting revisions</a>.</p> pub fn key_pattern(mut self, input: impl Into<std::string::String>) -> Self { self.key_pattern = Some(input.into()); self } /// <p>A string representing the pattern for generated names of the individual assets in the revision. For more information about key patterns, see <a href="https://docs.aws.amazon.com/data-exchange/latest/userguide/jobs.html#revision-export-keypatterns">Key patterns when exporting revisions</a>.</p> pub fn set_key_pattern(mut self, input: std::option::Option<std::string::String>) -> Self { self.key_pattern = input; self } /// Consumes the builder and constructs a [`AutoExportRevisionDestinationEntry`](crate::model::AutoExportRevisionDestinationEntry) pub fn build(self) -> crate::model::AutoExportRevisionDestinationEntry { crate::model::AutoExportRevisionDestinationEntry { bucket: self.bucket, key_pattern: self.key_pattern, } } } } impl AutoExportRevisionDestinationEntry { /// Creates a new builder-style object to manufacture [`AutoExportRevisionDestinationEntry`](crate::model::AutoExportRevisionDestinationEntry) pub fn builder() -> crate::model::auto_export_revision_destination_entry::Builder { crate::model::auto_export_revision_destination_entry::Builder::default() } } /// <p>Encryption configuration of the export job. Includes the encryption type in addition to the AWS KMS key. The KMS key is only necessary if you chose the KMS encryption. type.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ExportServerSideEncryption { /// <p>The Amazon Resource Name (ARN) of the AWS KMS key you want to use to encrypt the Amazon S3 objects. This parameter is required if you choose aws:kms as an encryption type.</p> pub kms_key_arn: std::option::Option<std::string::String>, /// <p>The type of server side encryption used for encrypting the objects in Amazon S3.</p> pub r#type: std::option::Option<crate::model::ServerSideEncryptionTypes>, } impl ExportServerSideEncryption { /// <p>The Amazon Resource Name (ARN) of the AWS KMS key you want to use to encrypt the Amazon S3 objects. This parameter is required if you choose aws:kms as an encryption type.</p> pub fn kms_key_arn(&self) -> std::option::Option<&str> { self.kms_key_arn.as_deref() } /// <p>The type of server side encryption used for encrypting the objects in Amazon S3.</p> pub fn r#type(&self) -> std::option::Option<&crate::model::ServerSideEncryptionTypes> { self.r#type.as_ref() } } impl std::fmt::Debug for ExportServerSideEncryption { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ExportServerSideEncryption"); formatter.field("kms_key_arn", &self.kms_key_arn); formatter.field("r#type", &self.r#type); formatter.finish() } } /// See [`ExportServerSideEncryption`](crate::model::ExportServerSideEncryption) pub mod export_server_side_encryption { /// A builder for [`ExportServerSideEncryption`](crate::model::ExportServerSideEncryption) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) kms_key_arn: std::option::Option<std::string::String>, pub(crate) r#type: std::option::Option<crate::model::ServerSideEncryptionTypes>, } impl Builder { /// <p>The Amazon Resource Name (ARN) of the AWS KMS key you want to use to encrypt the Amazon S3 objects. This parameter is required if you choose aws:kms as an encryption type.</p> pub fn kms_key_arn(mut self, input: impl Into<std::string::String>) -> Self { self.kms_key_arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) of the AWS KMS key you want to use to encrypt the Amazon S3 objects. This parameter is required if you choose aws:kms as an encryption type.</p> pub fn set_kms_key_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.kms_key_arn = input; self } /// <p>The type of server side encryption used for encrypting the objects in Amazon S3.</p> pub fn r#type(mut self, input: crate::model::ServerSideEncryptionTypes) -> Self { self.r#type = Some(input); self } /// <p>The type of server side encryption used for encrypting the objects in Amazon S3.</p> pub fn set_type( mut self, input: std::option::Option<crate::model::ServerSideEncryptionTypes>, ) -> Self { self.r#type = input; self } /// Consumes the builder and constructs a [`ExportServerSideEncryption`](crate::model::ExportServerSideEncryption) pub fn build(self) -> crate::model::ExportServerSideEncryption { crate::model::ExportServerSideEncryption { kms_key_arn: self.kms_key_arn, r#type: self.r#type, } } } } impl ExportServerSideEncryption { /// Creates a new builder-style object to manufacture [`ExportServerSideEncryption`](crate::model::ExportServerSideEncryption) pub fn builder() -> crate::model::export_server_side_encryption::Builder { crate::model::export_server_side_encryption::Builder::default() } } /// <p>The types of encryption supported in export jobs to Amazon S3.</p> #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum ServerSideEncryptionTypes { #[allow(missing_docs)] // documentation missing in model Aes256, #[allow(missing_docs)] // documentation missing in model AwsKms, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for ServerSideEncryptionTypes { fn from(s: &str) -> Self { match s { "AES256" => ServerSideEncryptionTypes::Aes256, "aws:kms" => ServerSideEncryptionTypes::AwsKms, other => ServerSideEncryptionTypes::Unknown(other.to_owned()), } } } impl std::str::FromStr for ServerSideEncryptionTypes { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(ServerSideEncryptionTypes::from(s)) } } impl ServerSideEncryptionTypes { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { ServerSideEncryptionTypes::Aes256 => "AES256", ServerSideEncryptionTypes::AwsKms => "aws:kms", ServerSideEncryptionTypes::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &["AES256", "aws:kms"] } } impl AsRef<str> for ServerSideEncryptionTypes { fn as_ref(&self) -> &str { self.as_str() } } /// <p>Information about the origin of the data set.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct OriginDetails { /// <p>The product ID of the origin of the data set.</p> pub product_id: std::option::Option<std::string::String>, } impl OriginDetails { /// <p>The product ID of the origin of the data set.</p> pub fn product_id(&self) -> std::option::Option<&str> { self.product_id.as_deref() } } impl std::fmt::Debug for OriginDetails { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("OriginDetails"); formatter.field("product_id", &self.product_id); formatter.finish() } } /// See [`OriginDetails`](crate::model::OriginDetails) pub mod origin_details { /// A builder for [`OriginDetails`](crate::model::OriginDetails) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) product_id: std::option::Option<std::string::String>, } impl Builder { /// <p>The product ID of the origin of the data set.</p> pub fn product_id(mut self, input: impl Into<std::string::String>) -> Self { self.product_id = Some(input.into()); self } /// <p>The product ID of the origin of the data set.</p> pub fn set_product_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.product_id = input; self } /// Consumes the builder and constructs a [`OriginDetails`](crate::model::OriginDetails) pub fn build(self) -> crate::model::OriginDetails { crate::model::OriginDetails { product_id: self.product_id, } } } } impl OriginDetails { /// Creates a new builder-style object to manufacture [`OriginDetails`](crate::model::OriginDetails) pub fn builder() -> crate::model::origin_details::Builder { crate::model::origin_details::Builder::default() } } /// <p>A property that defines the data set as OWNED by the account (for providers) or ENTITLED to the account (for subscribers). When an owned data set is published in a product, AWS Data Exchange creates a copy of the data set. Subscribers can access that copy of the data set as an entitled data set.</p> #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum Origin { #[allow(missing_docs)] // documentation missing in model Entitled, #[allow(missing_docs)] // documentation missing in model Owned, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for Origin { fn from(s: &str) -> Self { match s { "ENTITLED" => Origin::Entitled, "OWNED" => Origin::Owned, other => Origin::Unknown(other.to_owned()), } } } impl std::str::FromStr for Origin { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(Origin::from(s)) } } impl Origin { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { Origin::Entitled => "ENTITLED", Origin::Owned => "OWNED", Origin::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &["ENTITLED", "OWNED"] } } impl AsRef<str> for Origin { fn as_ref(&self) -> &str { self.as_str() } } /// <p>The type of asset that is added to a data set.</p> #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum AssetType { #[allow(missing_docs)] // documentation missing in model RedshiftDataShare, #[allow(missing_docs)] // documentation missing in model S3Snapshot, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for AssetType { fn from(s: &str) -> Self { match s { "REDSHIFT_DATA_SHARE" => AssetType::RedshiftDataShare, "S3_SNAPSHOT" => AssetType::S3Snapshot, other => AssetType::Unknown(other.to_owned()), } } } impl std::str::FromStr for AssetType { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(AssetType::from(s)) } } impl AssetType { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { AssetType::RedshiftDataShare => "REDSHIFT_DATA_SHARE", AssetType::S3Snapshot => "S3_SNAPSHOT", AssetType::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &["REDSHIFT_DATA_SHARE", "S3_SNAPSHOT"] } } impl AsRef<str> for AssetType { fn as_ref(&self) -> &str { self.as_str() } } /// <p>Information about the asset.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct AssetDetails { /// <p>The S3 object that is the asset.</p> pub s3_snapshot_asset: std::option::Option<crate::model::S3SnapshotAsset>, /// <p>The Amazon Redshift datashare that is the asset.</p> pub redshift_data_share_asset: std::option::Option<crate::model::RedshiftDataShareAsset>, } impl AssetDetails { /// <p>The S3 object that is the asset.</p> pub fn s3_snapshot_asset(&self) -> std::option::Option<&crate::model::S3SnapshotAsset> { self.s3_snapshot_asset.as_ref() } /// <p>The Amazon Redshift datashare that is the asset.</p> pub fn redshift_data_share_asset( &self, ) -> std::option::Option<&crate::model::RedshiftDataShareAsset> { self.redshift_data_share_asset.as_ref() } } impl std::fmt::Debug for AssetDetails { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("AssetDetails"); formatter.field("s3_snapshot_asset", &self.s3_snapshot_asset); formatter.field("redshift_data_share_asset", &self.redshift_data_share_asset); formatter.finish() } } /// See [`AssetDetails`](crate::model::AssetDetails) pub mod asset_details { /// A builder for [`AssetDetails`](crate::model::AssetDetails) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) s3_snapshot_asset: std::option::Option<crate::model::S3SnapshotAsset>, pub(crate) redshift_data_share_asset: std::option::Option<crate::model::RedshiftDataShareAsset>, } impl Builder { /// <p>The S3 object that is the asset.</p> pub fn s3_snapshot_asset(mut self, input: crate::model::S3SnapshotAsset) -> Self { self.s3_snapshot_asset = Some(input); self } /// <p>The S3 object that is the asset.</p> pub fn set_s3_snapshot_asset( mut self, input: std::option::Option<crate::model::S3SnapshotAsset>, ) -> Self { self.s3_snapshot_asset = input; self } /// <p>The Amazon Redshift datashare that is the asset.</p> pub fn redshift_data_share_asset( mut self, input: crate::model::RedshiftDataShareAsset, ) -> Self { self.redshift_data_share_asset = Some(input); self } /// <p>The Amazon Redshift datashare that is the asset.</p> pub fn set_redshift_data_share_asset( mut self, input: std::option::Option<crate::model::RedshiftDataShareAsset>, ) -> Self { self.redshift_data_share_asset = input; self } /// Consumes the builder and constructs a [`AssetDetails`](crate::model::AssetDetails) pub fn build(self) -> crate::model::AssetDetails { crate::model::AssetDetails { s3_snapshot_asset: self.s3_snapshot_asset, redshift_data_share_asset: self.redshift_data_share_asset, } } } } impl AssetDetails { /// Creates a new builder-style object to manufacture [`AssetDetails`](crate::model::AssetDetails) pub fn builder() -> crate::model::asset_details::Builder { crate::model::asset_details::Builder::default() } } /// The Amazon Redshift datashare asset. #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct RedshiftDataShareAsset { /// The Amazon Resource Name (ARN) of the datashare asset. pub arn: std::option::Option<std::string::String>, } impl RedshiftDataShareAsset { /// The Amazon Resource Name (ARN) of the datashare asset. pub fn arn(&self) -> std::option::Option<&str> { self.arn.as_deref() } } impl std::fmt::Debug for RedshiftDataShareAsset { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("RedshiftDataShareAsset"); formatter.field("arn", &self.arn); formatter.finish() } } /// See [`RedshiftDataShareAsset`](crate::model::RedshiftDataShareAsset) pub mod redshift_data_share_asset { /// A builder for [`RedshiftDataShareAsset`](crate::model::RedshiftDataShareAsset) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) arn: std::option::Option<std::string::String>, } impl Builder { /// The Amazon Resource Name (ARN) of the datashare asset. pub fn arn(mut self, input: impl Into<std::string::String>) -> Self { self.arn = Some(input.into()); self } /// The Amazon Resource Name (ARN) of the datashare asset. pub fn set_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.arn = input; self } /// Consumes the builder and constructs a [`RedshiftDataShareAsset`](crate::model::RedshiftDataShareAsset) pub fn build(self) -> crate::model::RedshiftDataShareAsset { crate::model::RedshiftDataShareAsset { arn: self.arn } } } } impl RedshiftDataShareAsset { /// Creates a new builder-style object to manufacture [`RedshiftDataShareAsset`](crate::model::RedshiftDataShareAsset) pub fn builder() -> crate::model::redshift_data_share_asset::Builder { crate::model::redshift_data_share_asset::Builder::default() } } /// <p>The S3 object that is the asset.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct S3SnapshotAsset { /// <p>The size of the S3 object that is the object.</p> pub size: f64, } impl S3SnapshotAsset { /// <p>The size of the S3 object that is the object.</p> pub fn size(&self) -> f64 { self.size } } impl std::fmt::Debug for S3SnapshotAsset { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("S3SnapshotAsset"); formatter.field("size", &self.size); formatter.finish() } } /// See [`S3SnapshotAsset`](crate::model::S3SnapshotAsset) pub mod s3_snapshot_asset { /// A builder for [`S3SnapshotAsset`](crate::model::S3SnapshotAsset) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) size: std::option::Option<f64>, } impl Builder { /// <p>The size of the S3 object that is the object.</p> pub fn size(mut self, input: f64) -> Self { self.size = Some(input); self } /// <p>The size of the S3 object that is the object.</p> pub fn set_size(mut self, input: std::option::Option<f64>) -> Self { self.size = input; self } /// Consumes the builder and constructs a [`S3SnapshotAsset`](crate::model::S3SnapshotAsset) pub fn build(self) -> crate::model::S3SnapshotAsset { crate::model::S3SnapshotAsset { size: self.size.unwrap_or_default(), } } } } impl S3SnapshotAsset { /// Creates a new builder-style object to manufacture [`S3SnapshotAsset`](crate::model::S3SnapshotAsset) pub fn builder() -> crate::model::s3_snapshot_asset::Builder { crate::model::s3_snapshot_asset::Builder::default() } } /// <p>An asset in AWS Data Exchange is a piece of data. The asset can be a structured data file, an image file, or some other data file that can be stored as an S3 object, or an Amazon Redshift datashare (Preview). When you create an import job for your files, you create an asset in AWS Data Exchange for each of those files.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct AssetEntry { /// <p>The ARN for the asset.</p> pub arn: std::option::Option<std::string::String>, /// <p>Information about the asset.</p> pub asset_details: std::option::Option<crate::model::AssetDetails>, /// <p>The type of asset that is added to a data set.</p> pub asset_type: std::option::Option<crate::model::AssetType>, /// <p>The date and time that the asset was created, in ISO 8601 format.</p> pub created_at: std::option::Option<aws_smithy_types::DateTime>, /// <p>The unique identifier for the data set associated with this asset.</p> pub data_set_id: std::option::Option<std::string::String>, /// <p>The unique identifier for the asset.</p> pub id: std::option::Option<std::string::String>, /// <p>The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.</p> pub name: std::option::Option<std::string::String>, /// <p>The unique identifier for the revision associated with this asset.</p> pub revision_id: std::option::Option<std::string::String>, /// <p>The asset ID of the owned asset corresponding to the entitled asset being viewed. This parameter is returned when an asset owner is viewing the entitled copy of its owned asset.</p> pub source_id: std::option::Option<std::string::String>, /// <p>The date and time that the asset was last updated, in ISO 8601 format.</p> pub updated_at: std::option::Option<aws_smithy_types::DateTime>, } impl AssetEntry { /// <p>The ARN for the asset.</p> pub fn arn(&self) -> std::option::Option<&str> { self.arn.as_deref() } /// <p>Information about the asset.</p> pub fn asset_details(&self) -> std::option::Option<&crate::model::AssetDetails> { self.asset_details.as_ref() } /// <p>The type of asset that is added to a data set.</p> pub fn asset_type(&self) -> std::option::Option<&crate::model::AssetType> { self.asset_type.as_ref() } /// <p>The date and time that the asset was created, in ISO 8601 format.</p> pub fn created_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> { self.created_at.as_ref() } /// <p>The unique identifier for the data set associated with this asset.</p> pub fn data_set_id(&self) -> std::option::Option<&str> { self.data_set_id.as_deref() } /// <p>The unique identifier for the asset.</p> pub fn id(&self) -> std::option::Option<&str> { self.id.as_deref() } /// <p>The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.</p> pub fn name(&self) -> std::option::Option<&str> { self.name.as_deref() } /// <p>The unique identifier for the revision associated with this asset.</p> pub fn revision_id(&self) -> std::option::Option<&str> { self.revision_id.as_deref() } /// <p>The asset ID of the owned asset corresponding to the entitled asset being viewed. This parameter is returned when an asset owner is viewing the entitled copy of its owned asset.</p> pub fn source_id(&self) -> std::option::Option<&str> { self.source_id.as_deref() } /// <p>The date and time that the asset was last updated, in ISO 8601 format.</p> pub fn updated_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> { self.updated_at.as_ref() } } impl std::fmt::Debug for AssetEntry { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("AssetEntry"); formatter.field("arn", &self.arn); formatter.field("asset_details", &self.asset_details); formatter.field("asset_type", &self.asset_type); formatter.field("created_at", &self.created_at); formatter.field("data_set_id", &self.data_set_id); formatter.field("id", &self.id); formatter.field("name", &self.name); formatter.field("revision_id", &self.revision_id); formatter.field("source_id", &self.source_id); formatter.field("updated_at", &self.updated_at); formatter.finish() } } /// See [`AssetEntry`](crate::model::AssetEntry) pub mod asset_entry { /// A builder for [`AssetEntry`](crate::model::AssetEntry) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) arn: std::option::Option<std::string::String>, pub(crate) asset_details: std::option::Option<crate::model::AssetDetails>, pub(crate) asset_type: std::option::Option<crate::model::AssetType>, pub(crate) created_at: std::option::Option<aws_smithy_types::DateTime>, pub(crate) data_set_id: std::option::Option<std::string::String>, pub(crate) id: std::option::Option<std::string::String>, pub(crate) name: std::option::Option<std::string::String>, pub(crate) revision_id: std::option::Option<std::string::String>, pub(crate) source_id: std::option::Option<std::string::String>, pub(crate) updated_at: std::option::Option<aws_smithy_types::DateTime>, } impl Builder { /// <p>The ARN for the asset.</p> pub fn arn(mut self, input: impl Into<std::string::String>) -> Self { self.arn = Some(input.into()); self } /// <p>The ARN for the asset.</p> pub fn set_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.arn = input; self } /// <p>Information about the asset.</p> pub fn asset_details(mut self, input: crate::model::AssetDetails) -> Self { self.asset_details = Some(input); self } /// <p>Information about the asset.</p> pub fn set_asset_details( mut self, input: std::option::Option<crate::model::AssetDetails>, ) -> Self { self.asset_details = input; self } /// <p>The type of asset that is added to a data set.</p> pub fn asset_type(mut self, input: crate::model::AssetType) -> Self { self.asset_type = Some(input); self } /// <p>The type of asset that is added to a data set.</p> pub fn set_asset_type( mut self, input: std::option::Option<crate::model::AssetType>, ) -> Self { self.asset_type = input; self } /// <p>The date and time that the asset was created, in ISO 8601 format.</p> pub fn created_at(mut self, input: aws_smithy_types::DateTime) -> Self { self.created_at = Some(input); self } /// <p>The date and time that the asset was created, in ISO 8601 format.</p> pub fn set_created_at( mut self, input: std::option::Option<aws_smithy_types::DateTime>, ) -> Self { self.created_at = input; self } /// <p>The unique identifier for the data set associated with this asset.</p> pub fn data_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.data_set_id = Some(input.into()); self } /// <p>The unique identifier for the data set associated with this asset.</p> pub fn set_data_set_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.data_set_id = input; self } /// <p>The unique identifier for the asset.</p> pub fn id(mut self, input: impl Into<std::string::String>) -> Self { self.id = Some(input.into()); self } /// <p>The unique identifier for the asset.</p> pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.id = input; self } /// <p>The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } /// <p>The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.</p> pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// <p>The unique identifier for the revision associated with this asset.</p> pub fn revision_id(mut self, input: impl Into<std::string::String>) -> Self { self.revision_id = Some(input.into()); self } /// <p>The unique identifier for the revision associated with this asset.</p> pub fn set_revision_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.revision_id = input; self } /// <p>The asset ID of the owned asset corresponding to the entitled asset being viewed. This parameter is returned when an asset owner is viewing the entitled copy of its owned asset.</p> pub fn source_id(mut self, input: impl Into<std::string::String>) -> Self { self.source_id = Some(input.into()); self } /// <p>The asset ID of the owned asset corresponding to the entitled asset being viewed. This parameter is returned when an asset owner is viewing the entitled copy of its owned asset.</p> pub fn set_source_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.source_id = input; self } /// <p>The date and time that the asset was last updated, in ISO 8601 format.</p> pub fn updated_at(mut self, input: aws_smithy_types::DateTime) -> Self { self.updated_at = Some(input); self } /// <p>The date and time that the asset was last updated, in ISO 8601 format.</p> pub fn set_updated_at( mut self, input: std::option::Option<aws_smithy_types::DateTime>, ) -> Self { self.updated_at = input; self } /// Consumes the builder and constructs a [`AssetEntry`](crate::model::AssetEntry) pub fn build(self) -> crate::model::AssetEntry { crate::model::AssetEntry { arn: self.arn, asset_details: self.asset_details, asset_type: self.asset_type, created_at: self.created_at, data_set_id: self.data_set_id, id: self.id, name: self.name, revision_id: self.revision_id, source_id: self.source_id, updated_at: self.updated_at, } } } } impl AssetEntry { /// Creates a new builder-style object to manufacture [`AssetEntry`](crate::model::AssetEntry) pub fn builder() -> crate::model::asset_entry::Builder { crate::model::asset_entry::Builder::default() } } /// AWS Data Exchange Jobs are asynchronous import or export operations used to create or copy assets. A data set owner can both import and export as they see fit. Someone with an entitlement to a data set can only export. Jobs are deleted 90 days after they are created. #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct JobEntry { /// <p>The ARN for the job.</p> pub arn: std::option::Option<std::string::String>, /// <p>The date and time that the job was created, in ISO 8601 format.</p> pub created_at: std::option::Option<aws_smithy_types::DateTime>, /// <p>Details of the operation to be performed by the job, such as export destination details or import source details.</p> pub details: std::option::Option<crate::model::ResponseDetails>, /// <p>Errors for jobs.</p> pub errors: std::option::Option<std::vec::Vec<crate::model::JobError>>, /// <p>The unique identifier for the job.</p> pub id: std::option::Option<std::string::String>, /// <p>The state of the job.</p> pub state: std::option::Option<crate::model::State>, /// <p>The job type.</p> pub r#type: std::option::Option<crate::model::Type>, /// <p>The date and time that the job was last updated, in ISO 8601 format.</p> pub updated_at: std::option::Option<aws_smithy_types::DateTime>, } impl JobEntry { /// <p>The ARN for the job.</p> pub fn arn(&self) -> std::option::Option<&str> { self.arn.as_deref() } /// <p>The date and time that the job was created, in ISO 8601 format.</p> pub fn created_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> { self.created_at.as_ref() } /// <p>Details of the operation to be performed by the job, such as export destination details or import source details.</p> pub fn details(&self) -> std::option::Option<&crate::model::ResponseDetails> { self.details.as_ref() } /// <p>Errors for jobs.</p> pub fn errors(&self) -> std::option::Option<&[crate::model::JobError]> { self.errors.as_deref() } /// <p>The unique identifier for the job.</p> pub fn id(&self) -> std::option::Option<&str> { self.id.as_deref() } /// <p>The state of the job.</p> pub fn state(&self) -> std::option::Option<&crate::model::State> { self.state.as_ref() } /// <p>The job type.</p> pub fn r#type(&self) -> std::option::Option<&crate::model::Type> { self.r#type.as_ref() } /// <p>The date and time that the job was last updated, in ISO 8601 format.</p> pub fn updated_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> { self.updated_at.as_ref() } } impl std::fmt::Debug for JobEntry { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("JobEntry"); formatter.field("arn", &self.arn); formatter.field("created_at", &self.created_at); formatter.field("details", &self.details); formatter.field("errors", &self.errors); formatter.field("id", &self.id); formatter.field("state", &self.state); formatter.field("r#type", &self.r#type); formatter.field("updated_at", &self.updated_at); formatter.finish() } } /// See [`JobEntry`](crate::model::JobEntry) pub mod job_entry { /// A builder for [`JobEntry`](crate::model::JobEntry) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) arn: std::option::Option<std::string::String>, pub(crate) created_at: std::option::Option<aws_smithy_types::DateTime>, pub(crate) details: std::option::Option<crate::model::ResponseDetails>, pub(crate) errors: std::option::Option<std::vec::Vec<crate::model::JobError>>, pub(crate) id: std::option::Option<std::string::String>, pub(crate) state: std::option::Option<crate::model::State>, pub(crate) r#type: std::option::Option<crate::model::Type>, pub(crate) updated_at: std::option::Option<aws_smithy_types::DateTime>, } impl Builder { /// <p>The ARN for the job.</p> pub fn arn(mut self, input: impl Into<std::string::String>) -> Self { self.arn = Some(input.into()); self } /// <p>The ARN for the job.</p> pub fn set_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.arn = input; self } /// <p>The date and time that the job was created, in ISO 8601 format.</p> pub fn created_at(mut self, input: aws_smithy_types::DateTime) -> Self { self.created_at = Some(input); self } /// <p>The date and time that the job was created, in ISO 8601 format.</p> pub fn set_created_at( mut self, input: std::option::Option<aws_smithy_types::DateTime>, ) -> Self { self.created_at = input; self } /// <p>Details of the operation to be performed by the job, such as export destination details or import source details.</p> pub fn details(mut self, input: crate::model::ResponseDetails) -> Self { self.details = Some(input); self } /// <p>Details of the operation to be performed by the job, such as export destination details or import source details.</p> pub fn set_details( mut self, input: std::option::Option<crate::model::ResponseDetails>, ) -> Self { self.details = input; self } /// Appends an item to `errors`. /// /// To override the contents of this collection use [`set_errors`](Self::set_errors). /// /// <p>Errors for jobs.</p> pub fn errors(mut self, input: impl Into<crate::model::JobError>) -> Self { let mut v = self.errors.unwrap_or_default(); v.push(input.into()); self.errors = Some(v); self } /// <p>Errors for jobs.</p> pub fn set_errors( mut self, input: std::option::Option<std::vec::Vec<crate::model::JobError>>, ) -> Self { self.errors = input; self } /// <p>The unique identifier for the job.</p> pub fn id(mut self, input: impl Into<std::string::String>) -> Self { self.id = Some(input.into()); self } /// <p>The unique identifier for the job.</p> pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.id = input; self } /// <p>The state of the job.</p> pub fn state(mut self, input: crate::model::State) -> Self { self.state = Some(input); self } /// <p>The state of the job.</p> pub fn set_state(mut self, input: std::option::Option<crate::model::State>) -> Self { self.state = input; self } /// <p>The job type.</p> pub fn r#type(mut self, input: crate::model::Type) -> Self { self.r#type = Some(input); self } /// <p>The job type.</p> pub fn set_type(mut self, input: std::option::Option<crate::model::Type>) -> Self { self.r#type = input; self } /// <p>The date and time that the job was last updated, in ISO 8601 format.</p> pub fn updated_at(mut self, input: aws_smithy_types::DateTime) -> Self { self.updated_at = Some(input); self } /// <p>The date and time that the job was last updated, in ISO 8601 format.</p> pub fn set_updated_at( mut self, input: std::option::Option<aws_smithy_types::DateTime>, ) -> Self { self.updated_at = input; self } /// Consumes the builder and constructs a [`JobEntry`](crate::model::JobEntry) pub fn build(self) -> crate::model::JobEntry { crate::model::JobEntry { arn: self.arn, created_at: self.created_at, details: self.details, errors: self.errors, id: self.id, state: self.state, r#type: self.r#type, updated_at: self.updated_at, } } } } impl JobEntry { /// Creates a new builder-style object to manufacture [`JobEntry`](crate::model::JobEntry) pub fn builder() -> crate::model::job_entry::Builder { crate::model::job_entry::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum Type { #[allow(missing_docs)] // documentation missing in model ExportAssetsToS3, #[allow(missing_docs)] // documentation missing in model ExportAssetToSignedUrl, #[allow(missing_docs)] // documentation missing in model ExportRevisionsToS3, #[allow(missing_docs)] // documentation missing in model ImportAssetsFromRedshiftDataShares, #[allow(missing_docs)] // documentation missing in model ImportAssetsFromS3, #[allow(missing_docs)] // documentation missing in model ImportAssetFromSignedUrl, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for Type { fn from(s: &str) -> Self { match s { "EXPORT_ASSETS_TO_S3" => Type::ExportAssetsToS3, "EXPORT_ASSET_TO_SIGNED_URL" => Type::ExportAssetToSignedUrl, "EXPORT_REVISIONS_TO_S3" => Type::ExportRevisionsToS3, "IMPORT_ASSETS_FROM_REDSHIFT_DATA_SHARES" => Type::ImportAssetsFromRedshiftDataShares, "IMPORT_ASSETS_FROM_S3" => Type::ImportAssetsFromS3, "IMPORT_ASSET_FROM_SIGNED_URL" => Type::ImportAssetFromSignedUrl, other => Type::Unknown(other.to_owned()), } } } impl std::str::FromStr for Type { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(Type::from(s)) } } impl Type { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { Type::ExportAssetsToS3 => "EXPORT_ASSETS_TO_S3", Type::ExportAssetToSignedUrl => "EXPORT_ASSET_TO_SIGNED_URL", Type::ExportRevisionsToS3 => "EXPORT_REVISIONS_TO_S3", Type::ImportAssetsFromRedshiftDataShares => "IMPORT_ASSETS_FROM_REDSHIFT_DATA_SHARES", Type::ImportAssetsFromS3 => "IMPORT_ASSETS_FROM_S3", Type::ImportAssetFromSignedUrl => "IMPORT_ASSET_FROM_SIGNED_URL", Type::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &[ "EXPORT_ASSETS_TO_S3", "EXPORT_ASSET_TO_SIGNED_URL", "EXPORT_REVISIONS_TO_S3", "IMPORT_ASSETS_FROM_REDSHIFT_DATA_SHARES", "IMPORT_ASSETS_FROM_S3", "IMPORT_ASSET_FROM_SIGNED_URL", ] } } impl AsRef<str> for Type { fn as_ref(&self) -> &str { self.as_str() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum State { #[allow(missing_docs)] // documentation missing in model Cancelled, #[allow(missing_docs)] // documentation missing in model Completed, #[allow(missing_docs)] // documentation missing in model Error, #[allow(missing_docs)] // documentation missing in model InProgress, #[allow(missing_docs)] // documentation missing in model TimedOut, #[allow(missing_docs)] // documentation missing in model Waiting, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for State { fn from(s: &str) -> Self { match s { "CANCELLED" => State::Cancelled, "COMPLETED" => State::Completed, "ERROR" => State::Error, "IN_PROGRESS" => State::InProgress, "TIMED_OUT" => State::TimedOut, "WAITING" => State::Waiting, other => State::Unknown(other.to_owned()), } } } impl std::str::FromStr for State { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(State::from(s)) } } impl State { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { State::Cancelled => "CANCELLED", State::Completed => "COMPLETED", State::Error => "ERROR", State::InProgress => "IN_PROGRESS", State::TimedOut => "TIMED_OUT", State::Waiting => "WAITING", State::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &[ "CANCELLED", "COMPLETED", "ERROR", "IN_PROGRESS", "TIMED_OUT", "WAITING", ] } } impl AsRef<str> for State { fn as_ref(&self) -> &str { self.as_str() } } /// An error that occurred with the job request. #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct JobError { /// The code for the job error. pub code: std::option::Option<crate::model::Code>, /// <p>The details about the job error.</p> pub details: std::option::Option<crate::model::Details>, /// <p>The name of the limit that was reached.</p> pub limit_name: std::option::Option<crate::model::JobErrorLimitName>, /// The value of the exceeded limit. pub limit_value: f64, /// The message related to the job error. pub message: std::option::Option<std::string::String>, /// The unique identifier for the resource related to the error. pub resource_id: std::option::Option<std::string::String>, /// The type of resource related to the error. pub resource_type: std::option::Option<crate::model::JobErrorResourceTypes>, } impl JobError { /// The code for the job error. pub fn code(&self) -> std::option::Option<&crate::model::Code> { self.code.as_ref() } /// <p>The details about the job error.</p> pub fn details(&self) -> std::option::Option<&crate::model::Details> { self.details.as_ref() } /// <p>The name of the limit that was reached.</p> pub fn limit_name(&self) -> std::option::Option<&crate::model::JobErrorLimitName> { self.limit_name.as_ref() } /// The value of the exceeded limit. pub fn limit_value(&self) -> f64 { self.limit_value } /// The message related to the job error. pub fn message(&self) -> std::option::Option<&str> { self.message.as_deref() } /// The unique identifier for the resource related to the error. pub fn resource_id(&self) -> std::option::Option<&str> { self.resource_id.as_deref() } /// The type of resource related to the error. pub fn resource_type(&self) -> std::option::Option<&crate::model::JobErrorResourceTypes> { self.resource_type.as_ref() } } impl std::fmt::Debug for JobError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("JobError"); formatter.field("code", &self.code); formatter.field("details", &self.details); formatter.field("limit_name", &self.limit_name); formatter.field("limit_value", &self.limit_value); formatter.field("message", &self.message); formatter.field("resource_id", &self.resource_id); formatter.field("resource_type", &self.resource_type); formatter.finish() } } /// See [`JobError`](crate::model::JobError) pub mod job_error { /// A builder for [`JobError`](crate::model::JobError) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) code: std::option::Option<crate::model::Code>, pub(crate) details: std::option::Option<crate::model::Details>, pub(crate) limit_name: std::option::Option<crate::model::JobErrorLimitName>, pub(crate) limit_value: std::option::Option<f64>, pub(crate) message: std::option::Option<std::string::String>, pub(crate) resource_id: std::option::Option<std::string::String>, pub(crate) resource_type: std::option::Option<crate::model::JobErrorResourceTypes>, } impl Builder { /// The code for the job error. pub fn code(mut self, input: crate::model::Code) -> Self { self.code = Some(input); self } /// The code for the job error. pub fn set_code(mut self, input: std::option::Option<crate::model::Code>) -> Self { self.code = input; self } /// <p>The details about the job error.</p> pub fn details(mut self, input: crate::model::Details) -> Self { self.details = Some(input); self } /// <p>The details about the job error.</p> pub fn set_details(mut self, input: std::option::Option<crate::model::Details>) -> Self { self.details = input; self } /// <p>The name of the limit that was reached.</p> pub fn limit_name(mut self, input: crate::model::JobErrorLimitName) -> Self { self.limit_name = Some(input); self } /// <p>The name of the limit that was reached.</p> pub fn set_limit_name( mut self, input: std::option::Option<crate::model::JobErrorLimitName>, ) -> Self { self.limit_name = input; self } /// The value of the exceeded limit. pub fn limit_value(mut self, input: f64) -> Self { self.limit_value = Some(input); self } /// The value of the exceeded limit. pub fn set_limit_value(mut self, input: std::option::Option<f64>) -> Self { self.limit_value = input; self } /// The message related to the job error. pub fn message(mut self, input: impl Into<std::string::String>) -> Self { self.message = Some(input.into()); self } /// The message related to the job error. pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self { self.message = input; self } /// The unique identifier for the resource related to the error. pub fn resource_id(mut self, input: impl Into<std::string::String>) -> Self { self.resource_id = Some(input.into()); self } /// The unique identifier for the resource related to the error. pub fn set_resource_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.resource_id = input; self } /// The type of resource related to the error. pub fn resource_type(mut self, input: crate::model::JobErrorResourceTypes) -> Self { self.resource_type = Some(input); self } /// The type of resource related to the error. pub fn set_resource_type( mut self, input: std::option::Option<crate::model::JobErrorResourceTypes>, ) -> Self { self.resource_type = input; self } /// Consumes the builder and constructs a [`JobError`](crate::model::JobError) pub fn build(self) -> crate::model::JobError { crate::model::JobError { code: self.code, details: self.details, limit_name: self.limit_name, limit_value: self.limit_value.unwrap_or_default(), message: self.message, resource_id: self.resource_id, resource_type: self.resource_type, } } } } impl JobError { /// Creates a new builder-style object to manufacture [`JobError`](crate::model::JobError) pub fn builder() -> crate::model::job_error::Builder { crate::model::job_error::Builder::default() } } /// The types of resource which the job error can apply to. #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum JobErrorResourceTypes { #[allow(missing_docs)] // documentation missing in model Asset, #[allow(missing_docs)] // documentation missing in model DataSet, #[allow(missing_docs)] // documentation missing in model Revision, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for JobErrorResourceTypes { fn from(s: &str) -> Self { match s { "ASSET" => JobErrorResourceTypes::Asset, "DATA_SET" => JobErrorResourceTypes::DataSet, "REVISION" => JobErrorResourceTypes::Revision, other => JobErrorResourceTypes::Unknown(other.to_owned()), } } } impl std::str::FromStr for JobErrorResourceTypes { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(JobErrorResourceTypes::from(s)) } } impl JobErrorResourceTypes { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { JobErrorResourceTypes::Asset => "ASSET", JobErrorResourceTypes::DataSet => "DATA_SET", JobErrorResourceTypes::Revision => "REVISION", JobErrorResourceTypes::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &["ASSET", "DATA_SET", "REVISION"] } } impl AsRef<str> for JobErrorResourceTypes { fn as_ref(&self) -> &str { self.as_str() } } /// The name of the limit that was reached. #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum JobErrorLimitName { #[allow(missing_docs)] // documentation missing in model AmazonRedshiftDatashareAssetsPerRevision, #[allow(missing_docs)] // documentation missing in model AssetSizeInGb, #[allow(missing_docs)] // documentation missing in model AssetsPerRevision, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for JobErrorLimitName { fn from(s: &str) -> Self { match s { "Amazon Redshift datashare assets per revision" => { JobErrorLimitName::AmazonRedshiftDatashareAssetsPerRevision } "Asset size in GB" => JobErrorLimitName::AssetSizeInGb, "Assets per revision" => JobErrorLimitName::AssetsPerRevision, other => JobErrorLimitName::Unknown(other.to_owned()), } } } impl std::str::FromStr for JobErrorLimitName { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(JobErrorLimitName::from(s)) } } impl JobErrorLimitName { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { JobErrorLimitName::AmazonRedshiftDatashareAssetsPerRevision => { "Amazon Redshift datashare assets per revision" } JobErrorLimitName::AssetSizeInGb => "Asset size in GB", JobErrorLimitName::AssetsPerRevision => "Assets per revision", JobErrorLimitName::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &[ "Amazon Redshift datashare assets per revision", "Asset size in GB", "Assets per revision", ] } } impl AsRef<str> for JobErrorLimitName { fn as_ref(&self) -> &str { self.as_str() } } /// <p>Information about the job error.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct Details { /// <p>Information about the job error.</p> pub import_asset_from_signed_url_job_error_details: std::option::Option<crate::model::ImportAssetFromSignedUrlJobErrorDetails>, /// <p>Information about the job error.</p> pub import_assets_from_s3_job_error_details: std::option::Option<std::vec::Vec<crate::model::AssetSourceEntry>>, } impl Details { /// <p>Information about the job error.</p> pub fn import_asset_from_signed_url_job_error_details( &self, ) -> std::option::Option<&crate::model::ImportAssetFromSignedUrlJobErrorDetails> { self.import_asset_from_signed_url_job_error_details.as_ref() } /// <p>Information about the job error.</p> pub fn import_assets_from_s3_job_error_details( &self, ) -> std::option::Option<&[crate::model::AssetSourceEntry]> { self.import_assets_from_s3_job_error_details.as_deref() } } impl std::fmt::Debug for Details { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("Details"); formatter.field( "import_asset_from_signed_url_job_error_details", &self.import_asset_from_signed_url_job_error_details, ); formatter.field( "import_assets_from_s3_job_error_details", &self.import_assets_from_s3_job_error_details, ); formatter.finish() } } /// See [`Details`](crate::model::Details) pub mod details { /// A builder for [`Details`](crate::model::Details) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) import_asset_from_signed_url_job_error_details: std::option::Option<crate::model::ImportAssetFromSignedUrlJobErrorDetails>, pub(crate) import_assets_from_s3_job_error_details: std::option::Option<std::vec::Vec<crate::model::AssetSourceEntry>>, } impl Builder { /// <p>Information about the job error.</p> pub fn import_asset_from_signed_url_job_error_details( mut self, input: crate::model::ImportAssetFromSignedUrlJobErrorDetails, ) -> Self { self.import_asset_from_signed_url_job_error_details = Some(input); self } /// <p>Information about the job error.</p> pub fn set_import_asset_from_signed_url_job_error_details( mut self, input: std::option::Option<crate::model::ImportAssetFromSignedUrlJobErrorDetails>, ) -> Self { self.import_asset_from_signed_url_job_error_details = input; self } /// Appends an item to `import_assets_from_s3_job_error_details`. /// /// To override the contents of this collection use [`set_import_assets_from_s3_job_error_details`](Self::set_import_assets_from_s3_job_error_details). /// /// <p>Information about the job error.</p> pub fn import_assets_from_s3_job_error_details( mut self, input: impl Into<crate::model::AssetSourceEntry>, ) -> Self { let mut v = self .import_assets_from_s3_job_error_details .unwrap_or_default(); v.push(input.into()); self.import_assets_from_s3_job_error_details = Some(v); self } /// <p>Information about the job error.</p> pub fn set_import_assets_from_s3_job_error_details( mut self, input: std::option::Option<std::vec::Vec<crate::model::AssetSourceEntry>>, ) -> Self { self.import_assets_from_s3_job_error_details = input; self } /// Consumes the builder and constructs a [`Details`](crate::model::Details) pub fn build(self) -> crate::model::Details { crate::model::Details { import_asset_from_signed_url_job_error_details: self .import_asset_from_signed_url_job_error_details, import_assets_from_s3_job_error_details: self .import_assets_from_s3_job_error_details, } } } } impl Details { /// Creates a new builder-style object to manufacture [`Details`](crate::model::Details) pub fn builder() -> crate::model::details::Builder { crate::model::details::Builder::default() } } /// <p>The source of the assets.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct AssetSourceEntry { /// <p>The S3 bucket that's part of the source of the asset.</p> pub bucket: std::option::Option<std::string::String>, /// <p>The name of the object in Amazon S3 for the asset.</p> pub key: std::option::Option<std::string::String>, } impl AssetSourceEntry { /// <p>The S3 bucket that's part of the source of the asset.</p> pub fn bucket(&self) -> std::option::Option<&str> { self.bucket.as_deref() } /// <p>The name of the object in Amazon S3 for the asset.</p> pub fn key(&self) -> std::option::Option<&str> { self.key.as_deref() } } impl std::fmt::Debug for AssetSourceEntry { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("AssetSourceEntry"); formatter.field("bucket", &self.bucket); formatter.field("key", &self.key); formatter.finish() } } /// See [`AssetSourceEntry`](crate::model::AssetSourceEntry) pub mod asset_source_entry { /// A builder for [`AssetSourceEntry`](crate::model::AssetSourceEntry) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) bucket: std::option::Option<std::string::String>, pub(crate) key: std::option::Option<std::string::String>, } impl Builder { /// <p>The S3 bucket that's part of the source of the asset.</p> pub fn bucket(mut self, input: impl Into<std::string::String>) -> Self { self.bucket = Some(input.into()); self } /// <p>The S3 bucket that's part of the source of the asset.</p> pub fn set_bucket(mut self, input: std::option::Option<std::string::String>) -> Self { self.bucket = input; self } /// <p>The name of the object in Amazon S3 for the asset.</p> pub fn key(mut self, input: impl Into<std::string::String>) -> Self { self.key = Some(input.into()); self } /// <p>The name of the object in Amazon S3 for the asset.</p> pub fn set_key(mut self, input: std::option::Option<std::string::String>) -> Self { self.key = input; self } /// Consumes the builder and constructs a [`AssetSourceEntry`](crate::model::AssetSourceEntry) pub fn build(self) -> crate::model::AssetSourceEntry { crate::model::AssetSourceEntry { bucket: self.bucket, key: self.key, } } } } impl AssetSourceEntry { /// Creates a new builder-style object to manufacture [`AssetSourceEntry`](crate::model::AssetSourceEntry) pub fn builder() -> crate::model::asset_source_entry::Builder { crate::model::asset_source_entry::Builder::default() } } /// <p>Information about the job error.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ImportAssetFromSignedUrlJobErrorDetails { /// <p>Information about the job error.</p> pub asset_name: std::option::Option<std::string::String>, } impl ImportAssetFromSignedUrlJobErrorDetails { /// <p>Information about the job error.</p> pub fn asset_name(&self) -> std::option::Option<&str> { self.asset_name.as_deref() } } impl std::fmt::Debug for ImportAssetFromSignedUrlJobErrorDetails { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ImportAssetFromSignedUrlJobErrorDetails"); formatter.field("asset_name", &self.asset_name); formatter.finish() } } /// See [`ImportAssetFromSignedUrlJobErrorDetails`](crate::model::ImportAssetFromSignedUrlJobErrorDetails) pub mod import_asset_from_signed_url_job_error_details { /// A builder for [`ImportAssetFromSignedUrlJobErrorDetails`](crate::model::ImportAssetFromSignedUrlJobErrorDetails) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) asset_name: std::option::Option<std::string::String>, } impl Builder { /// <p>Information about the job error.</p> pub fn asset_name(mut self, input: impl Into<std::string::String>) -> Self { self.asset_name = Some(input.into()); self } /// <p>Information about the job error.</p> pub fn set_asset_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.asset_name = input; self } /// Consumes the builder and constructs a [`ImportAssetFromSignedUrlJobErrorDetails`](crate::model::ImportAssetFromSignedUrlJobErrorDetails) pub fn build(self) -> crate::model::ImportAssetFromSignedUrlJobErrorDetails { crate::model::ImportAssetFromSignedUrlJobErrorDetails { asset_name: self.asset_name, } } } } impl ImportAssetFromSignedUrlJobErrorDetails { /// Creates a new builder-style object to manufacture [`ImportAssetFromSignedUrlJobErrorDetails`](crate::model::ImportAssetFromSignedUrlJobErrorDetails) pub fn builder() -> crate::model::import_asset_from_signed_url_job_error_details::Builder { crate::model::import_asset_from_signed_url_job_error_details::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum Code { #[allow(missing_docs)] // documentation missing in model AccessDeniedException, #[allow(missing_docs)] // documentation missing in model InternalServerException, #[allow(missing_docs)] // documentation missing in model MalwareDetected, #[allow(missing_docs)] // documentation missing in model MalwareScanEncryptedFile, #[allow(missing_docs)] // documentation missing in model ResourceNotFoundException, #[allow(missing_docs)] // documentation missing in model ServiceQuotaExceededException, #[allow(missing_docs)] // documentation missing in model ValidationException, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for Code { fn from(s: &str) -> Self { match s { "ACCESS_DENIED_EXCEPTION" => Code::AccessDeniedException, "INTERNAL_SERVER_EXCEPTION" => Code::InternalServerException, "MALWARE_DETECTED" => Code::MalwareDetected, "MALWARE_SCAN_ENCRYPTED_FILE" => Code::MalwareScanEncryptedFile, "RESOURCE_NOT_FOUND_EXCEPTION" => Code::ResourceNotFoundException, "SERVICE_QUOTA_EXCEEDED_EXCEPTION" => Code::ServiceQuotaExceededException, "VALIDATION_EXCEPTION" => Code::ValidationException, other => Code::Unknown(other.to_owned()), } } } impl std::str::FromStr for Code { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(Code::from(s)) } } impl Code { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { Code::AccessDeniedException => "ACCESS_DENIED_EXCEPTION", Code::InternalServerException => "INTERNAL_SERVER_EXCEPTION", Code::MalwareDetected => "MALWARE_DETECTED", Code::MalwareScanEncryptedFile => "MALWARE_SCAN_ENCRYPTED_FILE", Code::ResourceNotFoundException => "RESOURCE_NOT_FOUND_EXCEPTION", Code::ServiceQuotaExceededException => "SERVICE_QUOTA_EXCEEDED_EXCEPTION", Code::ValidationException => "VALIDATION_EXCEPTION", Code::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &[ "ACCESS_DENIED_EXCEPTION", "INTERNAL_SERVER_EXCEPTION", "MALWARE_DETECTED", "MALWARE_SCAN_ENCRYPTED_FILE", "RESOURCE_NOT_FOUND_EXCEPTION", "SERVICE_QUOTA_EXCEEDED_EXCEPTION", "VALIDATION_EXCEPTION", ] } } impl AsRef<str> for Code { fn as_ref(&self) -> &str { self.as_str() } } /// <p>Details for the response.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ResponseDetails { /// <p>Details for the export to signed URL response.</p> pub export_asset_to_signed_url: std::option::Option<crate::model::ExportAssetToSignedUrlResponseDetails>, /// <p>Details for the export to Amazon S3 response.</p> pub export_assets_to_s3: std::option::Option<crate::model::ExportAssetsToS3ResponseDetails>, /// <p>Details for the export revisions to Amazon S3 response.</p> pub export_revisions_to_s3: std::option::Option<crate::model::ExportRevisionsToS3ResponseDetails>, /// <p>Details for the import from signed URL response.</p> pub import_asset_from_signed_url: std::option::Option<crate::model::ImportAssetFromSignedUrlResponseDetails>, /// <p>Details for the import from Amazon S3 response.</p> pub import_assets_from_s3: std::option::Option<crate::model::ImportAssetsFromS3ResponseDetails>, /// <p>Details from an import from Amazon Redshift datashare response.</p> pub import_assets_from_redshift_data_shares: std::option::Option<crate::model::ImportAssetsFromRedshiftDataSharesResponseDetails>, } impl ResponseDetails { /// <p>Details for the export to signed URL response.</p> pub fn export_asset_to_signed_url( &self, ) -> std::option::Option<&crate::model::ExportAssetToSignedUrlResponseDetails> { self.export_asset_to_signed_url.as_ref() } /// <p>Details for the export to Amazon S3 response.</p> pub fn export_assets_to_s3( &self, ) -> std::option::Option<&crate::model::ExportAssetsToS3ResponseDetails> { self.export_assets_to_s3.as_ref() } /// <p>Details for the export revisions to Amazon S3 response.</p> pub fn export_revisions_to_s3( &self, ) -> std::option::Option<&crate::model::ExportRevisionsToS3ResponseDetails> { self.export_revisions_to_s3.as_ref() } /// <p>Details for the import from signed URL response.</p> pub fn import_asset_from_signed_url( &self, ) -> std::option::Option<&crate::model::ImportAssetFromSignedUrlResponseDetails> { self.import_asset_from_signed_url.as_ref() } /// <p>Details for the import from Amazon S3 response.</p> pub fn import_assets_from_s3( &self, ) -> std::option::Option<&crate::model::ImportAssetsFromS3ResponseDetails> { self.import_assets_from_s3.as_ref() } /// <p>Details from an import from Amazon Redshift datashare response.</p> pub fn import_assets_from_redshift_data_shares( &self, ) -> std::option::Option<&crate::model::ImportAssetsFromRedshiftDataSharesResponseDetails> { self.import_assets_from_redshift_data_shares.as_ref() } } impl std::fmt::Debug for ResponseDetails { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ResponseDetails"); formatter.field( "export_asset_to_signed_url", &self.export_asset_to_signed_url, ); formatter.field("export_assets_to_s3", &self.export_assets_to_s3); formatter.field("export_revisions_to_s3", &self.export_revisions_to_s3); formatter.field( "import_asset_from_signed_url", &self.import_asset_from_signed_url, ); formatter.field("import_assets_from_s3", &self.import_assets_from_s3); formatter.field( "import_assets_from_redshift_data_shares", &self.import_assets_from_redshift_data_shares, ); formatter.finish() } } /// See [`ResponseDetails`](crate::model::ResponseDetails) pub mod response_details { /// A builder for [`ResponseDetails`](crate::model::ResponseDetails) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) export_asset_to_signed_url: std::option::Option<crate::model::ExportAssetToSignedUrlResponseDetails>, pub(crate) export_assets_to_s3: std::option::Option<crate::model::ExportAssetsToS3ResponseDetails>, pub(crate) export_revisions_to_s3: std::option::Option<crate::model::ExportRevisionsToS3ResponseDetails>, pub(crate) import_asset_from_signed_url: std::option::Option<crate::model::ImportAssetFromSignedUrlResponseDetails>, pub(crate) import_assets_from_s3: std::option::Option<crate::model::ImportAssetsFromS3ResponseDetails>, pub(crate) import_assets_from_redshift_data_shares: std::option::Option<crate::model::ImportAssetsFromRedshiftDataSharesResponseDetails>, } impl Builder { /// <p>Details for the export to signed URL response.</p> pub fn export_asset_to_signed_url( mut self, input: crate::model::ExportAssetToSignedUrlResponseDetails, ) -> Self { self.export_asset_to_signed_url = Some(input); self } /// <p>Details for the export to signed URL response.</p> pub fn set_export_asset_to_signed_url( mut self, input: std::option::Option<crate::model::ExportAssetToSignedUrlResponseDetails>, ) -> Self { self.export_asset_to_signed_url = input; self } /// <p>Details for the export to Amazon S3 response.</p> pub fn export_assets_to_s3( mut self, input: crate::model::ExportAssetsToS3ResponseDetails, ) -> Self { self.export_assets_to_s3 = Some(input); self } /// <p>Details for the export to Amazon S3 response.</p> pub fn set_export_assets_to_s3( mut self, input: std::option::Option<crate::model::ExportAssetsToS3ResponseDetails>, ) -> Self { self.export_assets_to_s3 = input; self } /// <p>Details for the export revisions to Amazon S3 response.</p> pub fn export_revisions_to_s3( mut self, input: crate::model::ExportRevisionsToS3ResponseDetails, ) -> Self { self.export_revisions_to_s3 = Some(input); self } /// <p>Details for the export revisions to Amazon S3 response.</p> pub fn set_export_revisions_to_s3( mut self, input: std::option::Option<crate::model::ExportRevisionsToS3ResponseDetails>, ) -> Self { self.export_revisions_to_s3 = input; self } /// <p>Details for the import from signed URL response.</p> pub fn import_asset_from_signed_url( mut self, input: crate::model::ImportAssetFromSignedUrlResponseDetails, ) -> Self { self.import_asset_from_signed_url = Some(input); self } /// <p>Details for the import from signed URL response.</p> pub fn set_import_asset_from_signed_url( mut self, input: std::option::Option<crate::model::ImportAssetFromSignedUrlResponseDetails>, ) -> Self { self.import_asset_from_signed_url = input; self } /// <p>Details for the import from Amazon S3 response.</p> pub fn import_assets_from_s3( mut self, input: crate::model::ImportAssetsFromS3ResponseDetails, ) -> Self { self.import_assets_from_s3 = Some(input); self } /// <p>Details for the import from Amazon S3 response.</p> pub fn set_import_assets_from_s3( mut self, input: std::option::Option<crate::model::ImportAssetsFromS3ResponseDetails>, ) -> Self { self.import_assets_from_s3 = input; self } /// <p>Details from an import from Amazon Redshift datashare response.</p> pub fn import_assets_from_redshift_data_shares( mut self, input: crate::model::ImportAssetsFromRedshiftDataSharesResponseDetails, ) -> Self { self.import_assets_from_redshift_data_shares = Some(input); self } /// <p>Details from an import from Amazon Redshift datashare response.</p> pub fn set_import_assets_from_redshift_data_shares( mut self, input: std::option::Option< crate::model::ImportAssetsFromRedshiftDataSharesResponseDetails, >, ) -> Self { self.import_assets_from_redshift_data_shares = input; self } /// Consumes the builder and constructs a [`ResponseDetails`](crate::model::ResponseDetails) pub fn build(self) -> crate::model::ResponseDetails { crate::model::ResponseDetails { export_asset_to_signed_url: self.export_asset_to_signed_url, export_assets_to_s3: self.export_assets_to_s3, export_revisions_to_s3: self.export_revisions_to_s3, import_asset_from_signed_url: self.import_asset_from_signed_url, import_assets_from_s3: self.import_assets_from_s3, import_assets_from_redshift_data_shares: self .import_assets_from_redshift_data_shares, } } } } impl ResponseDetails { /// Creates a new builder-style object to manufacture [`ResponseDetails`](crate::model::ResponseDetails) pub fn builder() -> crate::model::response_details::Builder { crate::model::response_details::Builder::default() } } /// Details from an import from Amazon Redshift datashare response. #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ImportAssetsFromRedshiftDataSharesResponseDetails { /// A list of Amazon Redshift datashare asset sources. pub asset_sources: std::option::Option<std::vec::Vec<crate::model::RedshiftDataShareAssetSourceEntry>>, /// The unique identifier for the data set associated with this import job. pub data_set_id: std::option::Option<std::string::String>, /// The unique identifier for the revision associated with this import job. pub revision_id: std::option::Option<std::string::String>, } impl ImportAssetsFromRedshiftDataSharesResponseDetails { /// A list of Amazon Redshift datashare asset sources. pub fn asset_sources( &self, ) -> std::option::Option<&[crate::model::RedshiftDataShareAssetSourceEntry]> { self.asset_sources.as_deref() } /// The unique identifier for the data set associated with this import job. pub fn data_set_id(&self) -> std::option::Option<&str> { self.data_set_id.as_deref() } /// The unique identifier for the revision associated with this import job. pub fn revision_id(&self) -> std::option::Option<&str> { self.revision_id.as_deref() } } impl std::fmt::Debug for ImportAssetsFromRedshiftDataSharesResponseDetails { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ImportAssetsFromRedshiftDataSharesResponseDetails"); formatter.field("asset_sources", &self.asset_sources); formatter.field("data_set_id", &self.data_set_id); formatter.field("revision_id", &self.revision_id); formatter.finish() } } /// See [`ImportAssetsFromRedshiftDataSharesResponseDetails`](crate::model::ImportAssetsFromRedshiftDataSharesResponseDetails) pub mod import_assets_from_redshift_data_shares_response_details { /// A builder for [`ImportAssetsFromRedshiftDataSharesResponseDetails`](crate::model::ImportAssetsFromRedshiftDataSharesResponseDetails) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) asset_sources: std::option::Option<std::vec::Vec<crate::model::RedshiftDataShareAssetSourceEntry>>, pub(crate) data_set_id: std::option::Option<std::string::String>, pub(crate) revision_id: std::option::Option<std::string::String>, } impl Builder { /// Appends an item to `asset_sources`. /// /// To override the contents of this collection use [`set_asset_sources`](Self::set_asset_sources). /// /// A list of Amazon Redshift datashare asset sources. pub fn asset_sources( mut self, input: impl Into<crate::model::RedshiftDataShareAssetSourceEntry>, ) -> Self { let mut v = self.asset_sources.unwrap_or_default(); v.push(input.into()); self.asset_sources = Some(v); self } /// A list of Amazon Redshift datashare asset sources. pub fn set_asset_sources( mut self, input: std::option::Option< std::vec::Vec<crate::model::RedshiftDataShareAssetSourceEntry>, >, ) -> Self { self.asset_sources = input; self } /// The unique identifier for the data set associated with this import job. pub fn data_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.data_set_id = Some(input.into()); self } /// The unique identifier for the data set associated with this import job. pub fn set_data_set_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.data_set_id = input; self } /// The unique identifier for the revision associated with this import job. pub fn revision_id(mut self, input: impl Into<std::string::String>) -> Self { self.revision_id = Some(input.into()); self } /// The unique identifier for the revision associated with this import job. pub fn set_revision_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.revision_id = input; self } /// Consumes the builder and constructs a [`ImportAssetsFromRedshiftDataSharesResponseDetails`](crate::model::ImportAssetsFromRedshiftDataSharesResponseDetails) pub fn build(self) -> crate::model::ImportAssetsFromRedshiftDataSharesResponseDetails { crate::model::ImportAssetsFromRedshiftDataSharesResponseDetails { asset_sources: self.asset_sources, data_set_id: self.data_set_id, revision_id: self.revision_id, } } } } impl ImportAssetsFromRedshiftDataSharesResponseDetails { /// Creates a new builder-style object to manufacture [`ImportAssetsFromRedshiftDataSharesResponseDetails`](crate::model::ImportAssetsFromRedshiftDataSharesResponseDetails) pub fn builder( ) -> crate::model::import_assets_from_redshift_data_shares_response_details::Builder { crate::model::import_assets_from_redshift_data_shares_response_details::Builder::default() } } /// <p>The source of the Amazon Redshift datashare asset.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct RedshiftDataShareAssetSourceEntry { /// The Amazon Resource Name (ARN) of the datashare asset. pub data_share_arn: std::option::Option<std::string::String>, } impl RedshiftDataShareAssetSourceEntry { /// The Amazon Resource Name (ARN) of the datashare asset. pub fn data_share_arn(&self) -> std::option::Option<&str> { self.data_share_arn.as_deref() } } impl std::fmt::Debug for RedshiftDataShareAssetSourceEntry { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("RedshiftDataShareAssetSourceEntry"); formatter.field("data_share_arn", &self.data_share_arn); formatter.finish() } } /// See [`RedshiftDataShareAssetSourceEntry`](crate::model::RedshiftDataShareAssetSourceEntry) pub mod redshift_data_share_asset_source_entry { /// A builder for [`RedshiftDataShareAssetSourceEntry`](crate::model::RedshiftDataShareAssetSourceEntry) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) data_share_arn: std::option::Option<std::string::String>, } impl Builder { /// The Amazon Resource Name (ARN) of the datashare asset. pub fn data_share_arn(mut self, input: impl Into<std::string::String>) -> Self { self.data_share_arn = Some(input.into()); self } /// The Amazon Resource Name (ARN) of the datashare asset. pub fn set_data_share_arn( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.data_share_arn = input; self } /// Consumes the builder and constructs a [`RedshiftDataShareAssetSourceEntry`](crate::model::RedshiftDataShareAssetSourceEntry) pub fn build(self) -> crate::model::RedshiftDataShareAssetSourceEntry { crate::model::RedshiftDataShareAssetSourceEntry { data_share_arn: self.data_share_arn, } } } } impl RedshiftDataShareAssetSourceEntry { /// Creates a new builder-style object to manufacture [`RedshiftDataShareAssetSourceEntry`](crate::model::RedshiftDataShareAssetSourceEntry) pub fn builder() -> crate::model::redshift_data_share_asset_source_entry::Builder { crate::model::redshift_data_share_asset_source_entry::Builder::default() } } /// <p>Details from an import from Amazon S3 response.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ImportAssetsFromS3ResponseDetails { /// <p>Is a list of Amazon S3 bucket and object key pairs.</p> pub asset_sources: std::option::Option<std::vec::Vec<crate::model::AssetSourceEntry>>, /// <p>The unique identifier for the data set associated with this import job.</p> pub data_set_id: std::option::Option<std::string::String>, /// <p>The unique identifier for the revision associated with this import response.</p> pub revision_id: std::option::Option<std::string::String>, } impl ImportAssetsFromS3ResponseDetails { /// <p>Is a list of Amazon S3 bucket and object key pairs.</p> pub fn asset_sources(&self) -> std::option::Option<&[crate::model::AssetSourceEntry]> { self.asset_sources.as_deref() } /// <p>The unique identifier for the data set associated with this import job.</p> pub fn data_set_id(&self) -> std::option::Option<&str> { self.data_set_id.as_deref() } /// <p>The unique identifier for the revision associated with this import response.</p> pub fn revision_id(&self) -> std::option::Option<&str> { self.revision_id.as_deref() } } impl std::fmt::Debug for ImportAssetsFromS3ResponseDetails { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ImportAssetsFromS3ResponseDetails"); formatter.field("asset_sources", &self.asset_sources); formatter.field("data_set_id", &self.data_set_id); formatter.field("revision_id", &self.revision_id); formatter.finish() } } /// See [`ImportAssetsFromS3ResponseDetails`](crate::model::ImportAssetsFromS3ResponseDetails) pub mod import_assets_from_s3_response_details { /// A builder for [`ImportAssetsFromS3ResponseDetails`](crate::model::ImportAssetsFromS3ResponseDetails) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) asset_sources: std::option::Option<std::vec::Vec<crate::model::AssetSourceEntry>>, pub(crate) data_set_id: std::option::Option<std::string::String>, pub(crate) revision_id: std::option::Option<std::string::String>, } impl Builder { /// Appends an item to `asset_sources`. /// /// To override the contents of this collection use [`set_asset_sources`](Self::set_asset_sources). /// /// <p>Is a list of Amazon S3 bucket and object key pairs.</p> pub fn asset_sources(mut self, input: impl Into<crate::model::AssetSourceEntry>) -> Self { let mut v = self.asset_sources.unwrap_or_default(); v.push(input.into()); self.asset_sources = Some(v); self } /// <p>Is a list of Amazon S3 bucket and object key pairs.</p> pub fn set_asset_sources( mut self, input: std::option::Option<std::vec::Vec<crate::model::AssetSourceEntry>>, ) -> Self { self.asset_sources = input; self } /// <p>The unique identifier for the data set associated with this import job.</p> pub fn data_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.data_set_id = Some(input.into()); self } /// <p>The unique identifier for the data set associated with this import job.</p> pub fn set_data_set_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.data_set_id = input; self } /// <p>The unique identifier for the revision associated with this import response.</p> pub fn revision_id(mut self, input: impl Into<std::string::String>) -> Self { self.revision_id = Some(input.into()); self } /// <p>The unique identifier for the revision associated with this import response.</p> pub fn set_revision_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.revision_id = input; self } /// Consumes the builder and constructs a [`ImportAssetsFromS3ResponseDetails`](crate::model::ImportAssetsFromS3ResponseDetails) pub fn build(self) -> crate::model::ImportAssetsFromS3ResponseDetails { crate::model::ImportAssetsFromS3ResponseDetails { asset_sources: self.asset_sources, data_set_id: self.data_set_id, revision_id: self.revision_id, } } } } impl ImportAssetsFromS3ResponseDetails { /// Creates a new builder-style object to manufacture [`ImportAssetsFromS3ResponseDetails`](crate::model::ImportAssetsFromS3ResponseDetails) pub fn builder() -> crate::model::import_assets_from_s3_response_details::Builder { crate::model::import_assets_from_s3_response_details::Builder::default() } } /// <p>The details in the response for an import request, including the signed URL and other information.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ImportAssetFromSignedUrlResponseDetails { /// <p>The name for the asset associated with this import job.</p> pub asset_name: std::option::Option<std::string::String>, /// <p>The unique identifier for the data set associated with this import job.</p> pub data_set_id: std::option::Option<std::string::String>, /// <p>The Base64-encoded Md5 hash for the asset, used to ensure the integrity of the file at that location.</p> pub md5_hash: std::option::Option<std::string::String>, /// <p>The unique identifier for the revision associated with this import response.</p> pub revision_id: std::option::Option<std::string::String>, /// <p>The signed URL.</p> pub signed_url: std::option::Option<std::string::String>, /// <p>The time and date at which the signed URL expires, in ISO 8601 format.</p> pub signed_url_expires_at: std::option::Option<aws_smithy_types::DateTime>, } impl ImportAssetFromSignedUrlResponseDetails { /// <p>The name for the asset associated with this import job.</p> pub fn asset_name(&self) -> std::option::Option<&str> { self.asset_name.as_deref() } /// <p>The unique identifier for the data set associated with this import job.</p> pub fn data_set_id(&self) -> std::option::Option<&str> { self.data_set_id.as_deref() } /// <p>The Base64-encoded Md5 hash for the asset, used to ensure the integrity of the file at that location.</p> pub fn md5_hash(&self) -> std::option::Option<&str> { self.md5_hash.as_deref() } /// <p>The unique identifier for the revision associated with this import response.</p> pub fn revision_id(&self) -> std::option::Option<&str> { self.revision_id.as_deref() } /// <p>The signed URL.</p> pub fn signed_url(&self) -> std::option::Option<&str> { self.signed_url.as_deref() } /// <p>The time and date at which the signed URL expires, in ISO 8601 format.</p> pub fn signed_url_expires_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> { self.signed_url_expires_at.as_ref() } } impl std::fmt::Debug for ImportAssetFromSignedUrlResponseDetails { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ImportAssetFromSignedUrlResponseDetails"); formatter.field("asset_name", &self.asset_name); formatter.field("data_set_id", &self.data_set_id); formatter.field("md5_hash", &self.md5_hash); formatter.field("revision_id", &self.revision_id); formatter.field("signed_url", &self.signed_url); formatter.field("signed_url_expires_at", &self.signed_url_expires_at); formatter.finish() } } /// See [`ImportAssetFromSignedUrlResponseDetails`](crate::model::ImportAssetFromSignedUrlResponseDetails) pub mod import_asset_from_signed_url_response_details { /// A builder for [`ImportAssetFromSignedUrlResponseDetails`](crate::model::ImportAssetFromSignedUrlResponseDetails) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) asset_name: std::option::Option<std::string::String>, pub(crate) data_set_id: std::option::Option<std::string::String>, pub(crate) md5_hash: std::option::Option<std::string::String>, pub(crate) revision_id: std::option::Option<std::string::String>, pub(crate) signed_url: std::option::Option<std::string::String>, pub(crate) signed_url_expires_at: std::option::Option<aws_smithy_types::DateTime>, } impl Builder { /// <p>The name for the asset associated with this import job.</p> pub fn asset_name(mut self, input: impl Into<std::string::String>) -> Self { self.asset_name = Some(input.into()); self } /// <p>The name for the asset associated with this import job.</p> pub fn set_asset_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.asset_name = input; self } /// <p>The unique identifier for the data set associated with this import job.</p> pub fn data_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.data_set_id = Some(input.into()); self } /// <p>The unique identifier for the data set associated with this import job.</p> pub fn set_data_set_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.data_set_id = input; self } /// <p>The Base64-encoded Md5 hash for the asset, used to ensure the integrity of the file at that location.</p> pub fn md5_hash(mut self, input: impl Into<std::string::String>) -> Self { self.md5_hash = Some(input.into()); self } /// <p>The Base64-encoded Md5 hash for the asset, used to ensure the integrity of the file at that location.</p> pub fn set_md5_hash(mut self, input: std::option::Option<std::string::String>) -> Self { self.md5_hash = input; self } /// <p>The unique identifier for the revision associated with this import response.</p> pub fn revision_id(mut self, input: impl Into<std::string::String>) -> Self { self.revision_id = Some(input.into()); self } /// <p>The unique identifier for the revision associated with this import response.</p> pub fn set_revision_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.revision_id = input; self } /// <p>The signed URL.</p> pub fn signed_url(mut self, input: impl Into<std::string::String>) -> Self { self.signed_url = Some(input.into()); self } /// <p>The signed URL.</p> pub fn set_signed_url(mut self, input: std::option::Option<std::string::String>) -> Self { self.signed_url = input; self } /// <p>The time and date at which the signed URL expires, in ISO 8601 format.</p> pub fn signed_url_expires_at(mut self, input: aws_smithy_types::DateTime) -> Self { self.signed_url_expires_at = Some(input); self } /// <p>The time and date at which the signed URL expires, in ISO 8601 format.</p> pub fn set_signed_url_expires_at( mut self, input: std::option::Option<aws_smithy_types::DateTime>, ) -> Self { self.signed_url_expires_at = input; self } /// Consumes the builder and constructs a [`ImportAssetFromSignedUrlResponseDetails`](crate::model::ImportAssetFromSignedUrlResponseDetails) pub fn build(self) -> crate::model::ImportAssetFromSignedUrlResponseDetails { crate::model::ImportAssetFromSignedUrlResponseDetails { asset_name: self.asset_name, data_set_id: self.data_set_id, md5_hash: self.md5_hash, revision_id: self.revision_id, signed_url: self.signed_url, signed_url_expires_at: self.signed_url_expires_at, } } } } impl ImportAssetFromSignedUrlResponseDetails { /// Creates a new builder-style object to manufacture [`ImportAssetFromSignedUrlResponseDetails`](crate::model::ImportAssetFromSignedUrlResponseDetails) pub fn builder() -> crate::model::import_asset_from_signed_url_response_details::Builder { crate::model::import_asset_from_signed_url_response_details::Builder::default() } } /// <p>Details about the export revisions to Amazon S3 response.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ExportRevisionsToS3ResponseDetails { /// <p>The unique identifier for the data set associated with this export job.</p> pub data_set_id: std::option::Option<std::string::String>, /// <p>Encryption configuration of the export job.</p> pub encryption: std::option::Option<crate::model::ExportServerSideEncryption>, /// <p>The destination in Amazon S3 where the revision is exported.</p> pub revision_destinations: std::option::Option<std::vec::Vec<crate::model::RevisionDestinationEntry>>, /// <p>The Amazon Resource Name (ARN) of the event action.</p> pub event_action_arn: std::option::Option<std::string::String>, } impl ExportRevisionsToS3ResponseDetails { /// <p>The unique identifier for the data set associated with this export job.</p> pub fn data_set_id(&self) -> std::option::Option<&str> { self.data_set_id.as_deref() } /// <p>Encryption configuration of the export job.</p> pub fn encryption(&self) -> std::option::Option<&crate::model::ExportServerSideEncryption> { self.encryption.as_ref() } /// <p>The destination in Amazon S3 where the revision is exported.</p> pub fn revision_destinations( &self, ) -> std::option::Option<&[crate::model::RevisionDestinationEntry]> { self.revision_destinations.as_deref() } /// <p>The Amazon Resource Name (ARN) of the event action.</p> pub fn event_action_arn(&self) -> std::option::Option<&str> { self.event_action_arn.as_deref() } } impl std::fmt::Debug for ExportRevisionsToS3ResponseDetails { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ExportRevisionsToS3ResponseDetails"); formatter.field("data_set_id", &self.data_set_id); formatter.field("encryption", &self.encryption); formatter.field("revision_destinations", &self.revision_destinations); formatter.field("event_action_arn", &self.event_action_arn); formatter.finish() } } /// See [`ExportRevisionsToS3ResponseDetails`](crate::model::ExportRevisionsToS3ResponseDetails) pub mod export_revisions_to_s3_response_details { /// A builder for [`ExportRevisionsToS3ResponseDetails`](crate::model::ExportRevisionsToS3ResponseDetails) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) data_set_id: std::option::Option<std::string::String>, pub(crate) encryption: std::option::Option<crate::model::ExportServerSideEncryption>, pub(crate) revision_destinations: std::option::Option<std::vec::Vec<crate::model::RevisionDestinationEntry>>, pub(crate) event_action_arn: std::option::Option<std::string::String>, } impl Builder { /// <p>The unique identifier for the data set associated with this export job.</p> pub fn data_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.data_set_id = Some(input.into()); self } /// <p>The unique identifier for the data set associated with this export job.</p> pub fn set_data_set_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.data_set_id = input; self } /// <p>Encryption configuration of the export job.</p> pub fn encryption(mut self, input: crate::model::ExportServerSideEncryption) -> Self { self.encryption = Some(input); self } /// <p>Encryption configuration of the export job.</p> pub fn set_encryption( mut self, input: std::option::Option<crate::model::ExportServerSideEncryption>, ) -> Self { self.encryption = input; self } /// Appends an item to `revision_destinations`. /// /// To override the contents of this collection use [`set_revision_destinations`](Self::set_revision_destinations). /// /// <p>The destination in Amazon S3 where the revision is exported.</p> pub fn revision_destinations( mut self, input: impl Into<crate::model::RevisionDestinationEntry>, ) -> Self { let mut v = self.revision_destinations.unwrap_or_default(); v.push(input.into()); self.revision_destinations = Some(v); self } /// <p>The destination in Amazon S3 where the revision is exported.</p> pub fn set_revision_destinations( mut self, input: std::option::Option<std::vec::Vec<crate::model::RevisionDestinationEntry>>, ) -> Self { self.revision_destinations = input; self } /// <p>The Amazon Resource Name (ARN) of the event action.</p> pub fn event_action_arn(mut self, input: impl Into<std::string::String>) -> Self { self.event_action_arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) of the event action.</p> pub fn set_event_action_arn( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.event_action_arn = input; self } /// Consumes the builder and constructs a [`ExportRevisionsToS3ResponseDetails`](crate::model::ExportRevisionsToS3ResponseDetails) pub fn build(self) -> crate::model::ExportRevisionsToS3ResponseDetails { crate::model::ExportRevisionsToS3ResponseDetails { data_set_id: self.data_set_id, encryption: self.encryption, revision_destinations: self.revision_destinations, event_action_arn: self.event_action_arn, } } } } impl ExportRevisionsToS3ResponseDetails { /// Creates a new builder-style object to manufacture [`ExportRevisionsToS3ResponseDetails`](crate::model::ExportRevisionsToS3ResponseDetails) pub fn builder() -> crate::model::export_revisions_to_s3_response_details::Builder { crate::model::export_revisions_to_s3_response_details::Builder::default() } } /// <p>The destination where the assets in the revision will be exported.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct RevisionDestinationEntry { /// <p>The S3 bucket that is the destination for the assets in the revision.</p> pub bucket: std::option::Option<std::string::String>, /// <p>A string representing the pattern for generated names of the individual assets in the revision. For more information about key patterns, see <a href="https://docs.aws.amazon.com/data-exchange/latest/userguide/jobs.html#revision-export-keypatterns">Key patterns when exporting revisions</a>.</p> pub key_pattern: std::option::Option<std::string::String>, /// <p>The unique identifier for the revision.</p> pub revision_id: std::option::Option<std::string::String>, } impl RevisionDestinationEntry { /// <p>The S3 bucket that is the destination for the assets in the revision.</p> pub fn bucket(&self) -> std::option::Option<&str> { self.bucket.as_deref() } /// <p>A string representing the pattern for generated names of the individual assets in the revision. For more information about key patterns, see <a href="https://docs.aws.amazon.com/data-exchange/latest/userguide/jobs.html#revision-export-keypatterns">Key patterns when exporting revisions</a>.</p> pub fn key_pattern(&self) -> std::option::Option<&str> { self.key_pattern.as_deref() } /// <p>The unique identifier for the revision.</p> pub fn revision_id(&self) -> std::option::Option<&str> { self.revision_id.as_deref() } } impl std::fmt::Debug for RevisionDestinationEntry { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("RevisionDestinationEntry"); formatter.field("bucket", &self.bucket); formatter.field("key_pattern", &self.key_pattern); formatter.field("revision_id", &self.revision_id); formatter.finish() } } /// See [`RevisionDestinationEntry`](crate::model::RevisionDestinationEntry) pub mod revision_destination_entry { /// A builder for [`RevisionDestinationEntry`](crate::model::RevisionDestinationEntry) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) bucket: std::option::Option<std::string::String>, pub(crate) key_pattern: std::option::Option<std::string::String>, pub(crate) revision_id: std::option::Option<std::string::String>, } impl Builder { /// <p>The S3 bucket that is the destination for the assets in the revision.</p> pub fn bucket(mut self, input: impl Into<std::string::String>) -> Self { self.bucket = Some(input.into()); self } /// <p>The S3 bucket that is the destination for the assets in the revision.</p> pub fn set_bucket(mut self, input: std::option::Option<std::string::String>) -> Self { self.bucket = input; self } /// <p>A string representing the pattern for generated names of the individual assets in the revision. For more information about key patterns, see <a href="https://docs.aws.amazon.com/data-exchange/latest/userguide/jobs.html#revision-export-keypatterns">Key patterns when exporting revisions</a>.</p> pub fn key_pattern(mut self, input: impl Into<std::string::String>) -> Self { self.key_pattern = Some(input.into()); self } /// <p>A string representing the pattern for generated names of the individual assets in the revision. For more information about key patterns, see <a href="https://docs.aws.amazon.com/data-exchange/latest/userguide/jobs.html#revision-export-keypatterns">Key patterns when exporting revisions</a>.</p> pub fn set_key_pattern(mut self, input: std::option::Option<std::string::String>) -> Self { self.key_pattern = input; self } /// <p>The unique identifier for the revision.</p> pub fn revision_id(mut self, input: impl Into<std::string::String>) -> Self { self.revision_id = Some(input.into()); self } /// <p>The unique identifier for the revision.</p> pub fn set_revision_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.revision_id = input; self } /// Consumes the builder and constructs a [`RevisionDestinationEntry`](crate::model::RevisionDestinationEntry) pub fn build(self) -> crate::model::RevisionDestinationEntry { crate::model::RevisionDestinationEntry { bucket: self.bucket, key_pattern: self.key_pattern, revision_id: self.revision_id, } } } } impl RevisionDestinationEntry { /// Creates a new builder-style object to manufacture [`RevisionDestinationEntry`](crate::model::RevisionDestinationEntry) pub fn builder() -> crate::model::revision_destination_entry::Builder { crate::model::revision_destination_entry::Builder::default() } } /// <p>Details about the export to Amazon S3 response.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ExportAssetsToS3ResponseDetails { /// <p>The destination in Amazon S3 where the asset is exported.</p> pub asset_destinations: std::option::Option<std::vec::Vec<crate::model::AssetDestinationEntry>>, /// <p>The unique identifier for the data set associated with this export job.</p> pub data_set_id: std::option::Option<std::string::String>, /// <p>Encryption configuration of the export job.</p> pub encryption: std::option::Option<crate::model::ExportServerSideEncryption>, /// <p>The unique identifier for the revision associated with this export response.</p> pub revision_id: std::option::Option<std::string::String>, } impl ExportAssetsToS3ResponseDetails { /// <p>The destination in Amazon S3 where the asset is exported.</p> pub fn asset_destinations( &self, ) -> std::option::Option<&[crate::model::AssetDestinationEntry]> { self.asset_destinations.as_deref() } /// <p>The unique identifier for the data set associated with this export job.</p> pub fn data_set_id(&self) -> std::option::Option<&str> { self.data_set_id.as_deref() } /// <p>Encryption configuration of the export job.</p> pub fn encryption(&self) -> std::option::Option<&crate::model::ExportServerSideEncryption> { self.encryption.as_ref() } /// <p>The unique identifier for the revision associated with this export response.</p> pub fn revision_id(&self) -> std::option::Option<&str> { self.revision_id.as_deref() } } impl std::fmt::Debug for ExportAssetsToS3ResponseDetails { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ExportAssetsToS3ResponseDetails"); formatter.field("asset_destinations", &self.asset_destinations); formatter.field("data_set_id", &self.data_set_id); formatter.field("encryption", &self.encryption); formatter.field("revision_id", &self.revision_id); formatter.finish() } } /// See [`ExportAssetsToS3ResponseDetails`](crate::model::ExportAssetsToS3ResponseDetails) pub mod export_assets_to_s3_response_details { /// A builder for [`ExportAssetsToS3ResponseDetails`](crate::model::ExportAssetsToS3ResponseDetails) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) asset_destinations: std::option::Option<std::vec::Vec<crate::model::AssetDestinationEntry>>, pub(crate) data_set_id: std::option::Option<std::string::String>, pub(crate) encryption: std::option::Option<crate::model::ExportServerSideEncryption>, pub(crate) revision_id: std::option::Option<std::string::String>, } impl Builder { /// Appends an item to `asset_destinations`. /// /// To override the contents of this collection use [`set_asset_destinations`](Self::set_asset_destinations). /// /// <p>The destination in Amazon S3 where the asset is exported.</p> pub fn asset_destinations( mut self, input: impl Into<crate::model::AssetDestinationEntry>, ) -> Self { let mut v = self.asset_destinations.unwrap_or_default(); v.push(input.into()); self.asset_destinations = Some(v); self } /// <p>The destination in Amazon S3 where the asset is exported.</p> pub fn set_asset_destinations( mut self, input: std::option::Option<std::vec::Vec<crate::model::AssetDestinationEntry>>, ) -> Self { self.asset_destinations = input; self } /// <p>The unique identifier for the data set associated with this export job.</p> pub fn data_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.data_set_id = Some(input.into()); self } /// <p>The unique identifier for the data set associated with this export job.</p> pub fn set_data_set_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.data_set_id = input; self } /// <p>Encryption configuration of the export job.</p> pub fn encryption(mut self, input: crate::model::ExportServerSideEncryption) -> Self { self.encryption = Some(input); self } /// <p>Encryption configuration of the export job.</p> pub fn set_encryption( mut self, input: std::option::Option<crate::model::ExportServerSideEncryption>, ) -> Self { self.encryption = input; self } /// <p>The unique identifier for the revision associated with this export response.</p> pub fn revision_id(mut self, input: impl Into<std::string::String>) -> Self { self.revision_id = Some(input.into()); self } /// <p>The unique identifier for the revision associated with this export response.</p> pub fn set_revision_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.revision_id = input; self } /// Consumes the builder and constructs a [`ExportAssetsToS3ResponseDetails`](crate::model::ExportAssetsToS3ResponseDetails) pub fn build(self) -> crate::model::ExportAssetsToS3ResponseDetails { crate::model::ExportAssetsToS3ResponseDetails { asset_destinations: self.asset_destinations, data_set_id: self.data_set_id, encryption: self.encryption, revision_id: self.revision_id, } } } } impl ExportAssetsToS3ResponseDetails { /// Creates a new builder-style object to manufacture [`ExportAssetsToS3ResponseDetails`](crate::model::ExportAssetsToS3ResponseDetails) pub fn builder() -> crate::model::export_assets_to_s3_response_details::Builder { crate::model::export_assets_to_s3_response_details::Builder::default() } } /// <p>The destination for the asset.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct AssetDestinationEntry { /// <p>The unique identifier for the asset.</p> pub asset_id: std::option::Option<std::string::String>, /// <p>The S3 bucket that is the destination for the asset.</p> pub bucket: std::option::Option<std::string::String>, /// <p>The name of the object in Amazon S3 for the asset.</p> pub key: std::option::Option<std::string::String>, } impl AssetDestinationEntry { /// <p>The unique identifier for the asset.</p> pub fn asset_id(&self) -> std::option::Option<&str> { self.asset_id.as_deref() } /// <p>The S3 bucket that is the destination for the asset.</p> pub fn bucket(&self) -> std::option::Option<&str> { self.bucket.as_deref() } /// <p>The name of the object in Amazon S3 for the asset.</p> pub fn key(&self) -> std::option::Option<&str> { self.key.as_deref() } } impl std::fmt::Debug for AssetDestinationEntry { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("AssetDestinationEntry"); formatter.field("asset_id", &self.asset_id); formatter.field("bucket", &self.bucket); formatter.field("key", &self.key); formatter.finish() } } /// See [`AssetDestinationEntry`](crate::model::AssetDestinationEntry) pub mod asset_destination_entry { /// A builder for [`AssetDestinationEntry`](crate::model::AssetDestinationEntry) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) asset_id: std::option::Option<std::string::String>, pub(crate) bucket: std::option::Option<std::string::String>, pub(crate) key: std::option::Option<std::string::String>, } impl Builder { /// <p>The unique identifier for the asset.</p> pub fn asset_id(mut self, input: impl Into<std::string::String>) -> Self { self.asset_id = Some(input.into()); self } /// <p>The unique identifier for the asset.</p> pub fn set_asset_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.asset_id = input; self } /// <p>The S3 bucket that is the destination for the asset.</p> pub fn bucket(mut self, input: impl Into<std::string::String>) -> Self { self.bucket = Some(input.into()); self } /// <p>The S3 bucket that is the destination for the asset.</p> pub fn set_bucket(mut self, input: std::option::Option<std::string::String>) -> Self { self.bucket = input; self } /// <p>The name of the object in Amazon S3 for the asset.</p> pub fn key(mut self, input: impl Into<std::string::String>) -> Self { self.key = Some(input.into()); self } /// <p>The name of the object in Amazon S3 for the asset.</p> pub fn set_key(mut self, input: std::option::Option<std::string::String>) -> Self { self.key = input; self } /// Consumes the builder and constructs a [`AssetDestinationEntry`](crate::model::AssetDestinationEntry) pub fn build(self) -> crate::model::AssetDestinationEntry { crate::model::AssetDestinationEntry { asset_id: self.asset_id, bucket: self.bucket, key: self.key, } } } } impl AssetDestinationEntry { /// Creates a new builder-style object to manufacture [`AssetDestinationEntry`](crate::model::AssetDestinationEntry) pub fn builder() -> crate::model::asset_destination_entry::Builder { crate::model::asset_destination_entry::Builder::default() } } /// <p>The details of the export to signed URL response.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ExportAssetToSignedUrlResponseDetails { /// <p>The unique identifier for the asset associated with this export job.</p> pub asset_id: std::option::Option<std::string::String>, /// <p>The unique identifier for the data set associated with this export job.</p> pub data_set_id: std::option::Option<std::string::String>, /// <p>The unique identifier for the revision associated with this export response.</p> pub revision_id: std::option::Option<std::string::String>, /// <p>The signed URL for the export request.</p> pub signed_url: std::option::Option<std::string::String>, /// <p>The date and time that the signed URL expires, in ISO 8601 format.</p> pub signed_url_expires_at: std::option::Option<aws_smithy_types::DateTime>, } impl ExportAssetToSignedUrlResponseDetails { /// <p>The unique identifier for the asset associated with this export job.</p> pub fn asset_id(&self) -> std::option::Option<&str> { self.asset_id.as_deref() } /// <p>The unique identifier for the data set associated with this export job.</p> pub fn data_set_id(&self) -> std::option::Option<&str> { self.data_set_id.as_deref() } /// <p>The unique identifier for the revision associated with this export response.</p> pub fn revision_id(&self) -> std::option::Option<&str> { self.revision_id.as_deref() } /// <p>The signed URL for the export request.</p> pub fn signed_url(&self) -> std::option::Option<&str> { self.signed_url.as_deref() } /// <p>The date and time that the signed URL expires, in ISO 8601 format.</p> pub fn signed_url_expires_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> { self.signed_url_expires_at.as_ref() } } impl std::fmt::Debug for ExportAssetToSignedUrlResponseDetails { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ExportAssetToSignedUrlResponseDetails"); formatter.field("asset_id", &self.asset_id); formatter.field("data_set_id", &self.data_set_id); formatter.field("revision_id", &self.revision_id); formatter.field("signed_url", &self.signed_url); formatter.field("signed_url_expires_at", &self.signed_url_expires_at); formatter.finish() } } /// See [`ExportAssetToSignedUrlResponseDetails`](crate::model::ExportAssetToSignedUrlResponseDetails) pub mod export_asset_to_signed_url_response_details { /// A builder for [`ExportAssetToSignedUrlResponseDetails`](crate::model::ExportAssetToSignedUrlResponseDetails) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) asset_id: std::option::Option<std::string::String>, pub(crate) data_set_id: std::option::Option<std::string::String>, pub(crate) revision_id: std::option::Option<std::string::String>, pub(crate) signed_url: std::option::Option<std::string::String>, pub(crate) signed_url_expires_at: std::option::Option<aws_smithy_types::DateTime>, } impl Builder { /// <p>The unique identifier for the asset associated with this export job.</p> pub fn asset_id(mut self, input: impl Into<std::string::String>) -> Self { self.asset_id = Some(input.into()); self } /// <p>The unique identifier for the asset associated with this export job.</p> pub fn set_asset_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.asset_id = input; self } /// <p>The unique identifier for the data set associated with this export job.</p> pub fn data_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.data_set_id = Some(input.into()); self } /// <p>The unique identifier for the data set associated with this export job.</p> pub fn set_data_set_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.data_set_id = input; self } /// <p>The unique identifier for the revision associated with this export response.</p> pub fn revision_id(mut self, input: impl Into<std::string::String>) -> Self { self.revision_id = Some(input.into()); self } /// <p>The unique identifier for the revision associated with this export response.</p> pub fn set_revision_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.revision_id = input; self } /// <p>The signed URL for the export request.</p> pub fn signed_url(mut self, input: impl Into<std::string::String>) -> Self { self.signed_url = Some(input.into()); self } /// <p>The signed URL for the export request.</p> pub fn set_signed_url(mut self, input: std::option::Option<std::string::String>) -> Self { self.signed_url = input; self } /// <p>The date and time that the signed URL expires, in ISO 8601 format.</p> pub fn signed_url_expires_at(mut self, input: aws_smithy_types::DateTime) -> Self { self.signed_url_expires_at = Some(input); self } /// <p>The date and time that the signed URL expires, in ISO 8601 format.</p> pub fn set_signed_url_expires_at( mut self, input: std::option::Option<aws_smithy_types::DateTime>, ) -> Self { self.signed_url_expires_at = input; self } /// Consumes the builder and constructs a [`ExportAssetToSignedUrlResponseDetails`](crate::model::ExportAssetToSignedUrlResponseDetails) pub fn build(self) -> crate::model::ExportAssetToSignedUrlResponseDetails { crate::model::ExportAssetToSignedUrlResponseDetails { asset_id: self.asset_id, data_set_id: self.data_set_id, revision_id: self.revision_id, signed_url: self.signed_url, signed_url_expires_at: self.signed_url_expires_at, } } } } impl ExportAssetToSignedUrlResponseDetails { /// Creates a new builder-style object to manufacture [`ExportAssetToSignedUrlResponseDetails`](crate::model::ExportAssetToSignedUrlResponseDetails) pub fn builder() -> crate::model::export_asset_to_signed_url_response_details::Builder { crate::model::export_asset_to_signed_url_response_details::Builder::default() } } /// <p>An event action is an object that defines the relationship between a specific event and an automated action that will be taken on behalf of the customer.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct EventActionEntry { /// <p>What occurs after a certain event.</p> pub action: std::option::Option<crate::model::Action>, /// <p>The Amazon Resource Name (ARN) for the event action.</p> pub arn: std::option::Option<std::string::String>, /// <p>The date and time that the event action was created, in ISO 8601 format.</p> pub created_at: std::option::Option<aws_smithy_types::DateTime>, /// <p>What occurs to start an action.</p> pub event: std::option::Option<crate::model::Event>, /// <p>The unique identifier for the event action.</p> pub id: std::option::Option<std::string::String>, /// <p>The date and time that the event action was last updated, in ISO 8601 format.</p> pub updated_at: std::option::Option<aws_smithy_types::DateTime>, } impl EventActionEntry { /// <p>What occurs after a certain event.</p> pub fn action(&self) -> std::option::Option<&crate::model::Action> { self.action.as_ref() } /// <p>The Amazon Resource Name (ARN) for the event action.</p> pub fn arn(&self) -> std::option::Option<&str> { self.arn.as_deref() } /// <p>The date and time that the event action was created, in ISO 8601 format.</p> pub fn created_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> { self.created_at.as_ref() } /// <p>What occurs to start an action.</p> pub fn event(&self) -> std::option::Option<&crate::model::Event> { self.event.as_ref() } /// <p>The unique identifier for the event action.</p> pub fn id(&self) -> std::option::Option<&str> { self.id.as_deref() } /// <p>The date and time that the event action was last updated, in ISO 8601 format.</p> pub fn updated_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> { self.updated_at.as_ref() } } impl std::fmt::Debug for EventActionEntry { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("EventActionEntry"); formatter.field("action", &self.action); formatter.field("arn", &self.arn); formatter.field("created_at", &self.created_at); formatter.field("event", &self.event); formatter.field("id", &self.id); formatter.field("updated_at", &self.updated_at); formatter.finish() } } /// See [`EventActionEntry`](crate::model::EventActionEntry) pub mod event_action_entry { /// A builder for [`EventActionEntry`](crate::model::EventActionEntry) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) action: std::option::Option<crate::model::Action>, pub(crate) arn: std::option::Option<std::string::String>, pub(crate) created_at: std::option::Option<aws_smithy_types::DateTime>, pub(crate) event: std::option::Option<crate::model::Event>, pub(crate) id: std::option::Option<std::string::String>, pub(crate) updated_at: std::option::Option<aws_smithy_types::DateTime>, } impl Builder { /// <p>What occurs after a certain event.</p> pub fn action(mut self, input: crate::model::Action) -> Self { self.action = Some(input); self } /// <p>What occurs after a certain event.</p> pub fn set_action(mut self, input: std::option::Option<crate::model::Action>) -> Self { self.action = input; self } /// <p>The Amazon Resource Name (ARN) for the event action.</p> pub fn arn(mut self, input: impl Into<std::string::String>) -> Self { self.arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) for the event action.</p> pub fn set_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.arn = input; self } /// <p>The date and time that the event action was created, in ISO 8601 format.</p> pub fn created_at(mut self, input: aws_smithy_types::DateTime) -> Self { self.created_at = Some(input); self } /// <p>The date and time that the event action was created, in ISO 8601 format.</p> pub fn set_created_at( mut self, input: std::option::Option<aws_smithy_types::DateTime>, ) -> Self { self.created_at = input; self } /// <p>What occurs to start an action.</p> pub fn event(mut self, input: crate::model::Event) -> Self { self.event = Some(input); self } /// <p>What occurs to start an action.</p> pub fn set_event(mut self, input: std::option::Option<crate::model::Event>) -> Self { self.event = input; self } /// <p>The unique identifier for the event action.</p> pub fn id(mut self, input: impl Into<std::string::String>) -> Self { self.id = Some(input.into()); self } /// <p>The unique identifier for the event action.</p> pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.id = input; self } /// <p>The date and time that the event action was last updated, in ISO 8601 format.</p> pub fn updated_at(mut self, input: aws_smithy_types::DateTime) -> Self { self.updated_at = Some(input); self } /// <p>The date and time that the event action was last updated, in ISO 8601 format.</p> pub fn set_updated_at( mut self, input: std::option::Option<aws_smithy_types::DateTime>, ) -> Self { self.updated_at = input; self } /// Consumes the builder and constructs a [`EventActionEntry`](crate::model::EventActionEntry) pub fn build(self) -> crate::model::EventActionEntry { crate::model::EventActionEntry { action: self.action, arn: self.arn, created_at: self.created_at, event: self.event, id: self.id, updated_at: self.updated_at, } } } } impl EventActionEntry { /// Creates a new builder-style object to manufacture [`EventActionEntry`](crate::model::EventActionEntry) pub fn builder() -> crate::model::event_action_entry::Builder { crate::model::event_action_entry::Builder::default() } } /// <p>A data set is an AWS resource with one or more revisions.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DataSetEntry { /// <p>The ARN for the data set.</p> pub arn: std::option::Option<std::string::String>, /// <p>The type of asset that is added to a data set.</p> pub asset_type: std::option::Option<crate::model::AssetType>, /// <p>The date and time that the data set was created, in ISO 8601 format.</p> pub created_at: std::option::Option<aws_smithy_types::DateTime>, /// <p>The description for the data set.</p> pub description: std::option::Option<std::string::String>, /// <p>The unique identifier for the data set.</p> pub id: std::option::Option<std::string::String>, /// <p>The name of the data set.</p> pub name: std::option::Option<std::string::String>, /// <p>A property that defines the data set as OWNED by the account (for providers) or ENTITLED to the account (for subscribers).</p> pub origin: std::option::Option<crate::model::Origin>, /// <p>If the origin of this data set is ENTITLED, includes the details for the product on AWS Marketplace.</p> pub origin_details: std::option::Option<crate::model::OriginDetails>, /// <p>The data set ID of the owned data set corresponding to the entitled data set being viewed. This parameter is returned when a data set owner is viewing the entitled copy of its owned data set.</p> pub source_id: std::option::Option<std::string::String>, /// <p>The date and time that the data set was last updated, in ISO 8601 format.</p> pub updated_at: std::option::Option<aws_smithy_types::DateTime>, } impl DataSetEntry { /// <p>The ARN for the data set.</p> pub fn arn(&self) -> std::option::Option<&str> { self.arn.as_deref() } /// <p>The type of asset that is added to a data set.</p> pub fn asset_type(&self) -> std::option::Option<&crate::model::AssetType> { self.asset_type.as_ref() } /// <p>The date and time that the data set was created, in ISO 8601 format.</p> pub fn created_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> { self.created_at.as_ref() } /// <p>The description for the data set.</p> pub fn description(&self) -> std::option::Option<&str> { self.description.as_deref() } /// <p>The unique identifier for the data set.</p> pub fn id(&self) -> std::option::Option<&str> { self.id.as_deref() } /// <p>The name of the data set.</p> pub fn name(&self) -> std::option::Option<&str> { self.name.as_deref() } /// <p>A property that defines the data set as OWNED by the account (for providers) or ENTITLED to the account (for subscribers).</p> pub fn origin(&self) -> std::option::Option<&crate::model::Origin> { self.origin.as_ref() } /// <p>If the origin of this data set is ENTITLED, includes the details for the product on AWS Marketplace.</p> pub fn origin_details(&self) -> std::option::Option<&crate::model::OriginDetails> { self.origin_details.as_ref() } /// <p>The data set ID of the owned data set corresponding to the entitled data set being viewed. This parameter is returned when a data set owner is viewing the entitled copy of its owned data set.</p> pub fn source_id(&self) -> std::option::Option<&str> { self.source_id.as_deref() } /// <p>The date and time that the data set was last updated, in ISO 8601 format.</p> pub fn updated_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> { self.updated_at.as_ref() } } impl std::fmt::Debug for DataSetEntry { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DataSetEntry"); formatter.field("arn", &self.arn); formatter.field("asset_type", &self.asset_type); formatter.field("created_at", &self.created_at); formatter.field("description", &self.description); formatter.field("id", &self.id); formatter.field("name", &self.name); formatter.field("origin", &self.origin); formatter.field("origin_details", &self.origin_details); formatter.field("source_id", &self.source_id); formatter.field("updated_at", &self.updated_at); formatter.finish() } } /// See [`DataSetEntry`](crate::model::DataSetEntry) pub mod data_set_entry { /// A builder for [`DataSetEntry`](crate::model::DataSetEntry) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) arn: std::option::Option<std::string::String>, pub(crate) asset_type: std::option::Option<crate::model::AssetType>, pub(crate) created_at: std::option::Option<aws_smithy_types::DateTime>, pub(crate) description: std::option::Option<std::string::String>, pub(crate) id: std::option::Option<std::string::String>, pub(crate) name: std::option::Option<std::string::String>, pub(crate) origin: std::option::Option<crate::model::Origin>, pub(crate) origin_details: std::option::Option<crate::model::OriginDetails>, pub(crate) source_id: std::option::Option<std::string::String>, pub(crate) updated_at: std::option::Option<aws_smithy_types::DateTime>, } impl Builder { /// <p>The ARN for the data set.</p> pub fn arn(mut self, input: impl Into<std::string::String>) -> Self { self.arn = Some(input.into()); self } /// <p>The ARN for the data set.</p> pub fn set_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.arn = input; self } /// <p>The type of asset that is added to a data set.</p> pub fn asset_type(mut self, input: crate::model::AssetType) -> Self { self.asset_type = Some(input); self } /// <p>The type of asset that is added to a data set.</p> pub fn set_asset_type( mut self, input: std::option::Option<crate::model::AssetType>, ) -> Self { self.asset_type = input; self } /// <p>The date and time that the data set was created, in ISO 8601 format.</p> pub fn created_at(mut self, input: aws_smithy_types::DateTime) -> Self { self.created_at = Some(input); self } /// <p>The date and time that the data set was created, in ISO 8601 format.</p> pub fn set_created_at( mut self, input: std::option::Option<aws_smithy_types::DateTime>, ) -> Self { self.created_at = input; self } /// <p>The description for the data set.</p> pub fn description(mut self, input: impl Into<std::string::String>) -> Self { self.description = Some(input.into()); self } /// <p>The description for the data set.</p> pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self { self.description = input; self } /// <p>The unique identifier for the data set.</p> pub fn id(mut self, input: impl Into<std::string::String>) -> Self { self.id = Some(input.into()); self } /// <p>The unique identifier for the data set.</p> pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.id = input; self } /// <p>The name of the data set.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } /// <p>The name of the data set.</p> pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// <p>A property that defines the data set as OWNED by the account (for providers) or ENTITLED to the account (for subscribers).</p> pub fn origin(mut self, input: crate::model::Origin) -> Self { self.origin = Some(input); self } /// <p>A property that defines the data set as OWNED by the account (for providers) or ENTITLED to the account (for subscribers).</p> pub fn set_origin(mut self, input: std::option::Option<crate::model::Origin>) -> Self { self.origin = input; self } /// <p>If the origin of this data set is ENTITLED, includes the details for the product on AWS Marketplace.</p> pub fn origin_details(mut self, input: crate::model::OriginDetails) -> Self { self.origin_details = Some(input); self } /// <p>If the origin of this data set is ENTITLED, includes the details for the product on AWS Marketplace.</p> pub fn set_origin_details( mut self, input: std::option::Option<crate::model::OriginDetails>, ) -> Self { self.origin_details = input; self } /// <p>The data set ID of the owned data set corresponding to the entitled data set being viewed. This parameter is returned when a data set owner is viewing the entitled copy of its owned data set.</p> pub fn source_id(mut self, input: impl Into<std::string::String>) -> Self { self.source_id = Some(input.into()); self } /// <p>The data set ID of the owned data set corresponding to the entitled data set being viewed. This parameter is returned when a data set owner is viewing the entitled copy of its owned data set.</p> pub fn set_source_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.source_id = input; self } /// <p>The date and time that the data set was last updated, in ISO 8601 format.</p> pub fn updated_at(mut self, input: aws_smithy_types::DateTime) -> Self { self.updated_at = Some(input); self } /// <p>The date and time that the data set was last updated, in ISO 8601 format.</p> pub fn set_updated_at( mut self, input: std::option::Option<aws_smithy_types::DateTime>, ) -> Self { self.updated_at = input; self } /// Consumes the builder and constructs a [`DataSetEntry`](crate::model::DataSetEntry) pub fn build(self) -> crate::model::DataSetEntry { crate::model::DataSetEntry { arn: self.arn, asset_type: self.asset_type, created_at: self.created_at, description: self.description, id: self.id, name: self.name, origin: self.origin, origin_details: self.origin_details, source_id: self.source_id, updated_at: self.updated_at, } } } } impl DataSetEntry { /// Creates a new builder-style object to manufacture [`DataSetEntry`](crate::model::DataSetEntry) pub fn builder() -> crate::model::data_set_entry::Builder { crate::model::data_set_entry::Builder::default() } } /// <p>A revision is a container for one or more assets.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct RevisionEntry { /// <p>The ARN for the revision.</p> pub arn: std::option::Option<std::string::String>, /// <p>An optional comment about the revision.</p> pub comment: std::option::Option<std::string::String>, /// <p>The date and time that the revision was created, in ISO 8601 format.</p> pub created_at: std::option::Option<aws_smithy_types::DateTime>, /// <p>The unique identifier for the data set associated with this revision.</p> pub data_set_id: std::option::Option<std::string::String>, /// <p>To publish a revision to a data set in a product, the revision must first be finalized. Finalizing a revision tells AWS Data Exchange that your changes to the assets in the revision are complete. After it's in this read-only state, you can publish the revision to your products.</p> <p>Finalized revisions can be published through the AWS Data Exchange console or the AWS Marketplace Catalog API, using the StartChangeSet AWS Marketplace Catalog API action. When using the API, revisions are uniquely identified by their ARN.</p> pub finalized: bool, /// <p>The unique identifier for the revision.</p> pub id: std::option::Option<std::string::String>, /// <p>The revision ID of the owned revision corresponding to the entitled revision being viewed. This parameter is returned when a revision owner is viewing the entitled copy of its owned revision.</p> pub source_id: std::option::Option<std::string::String>, /// <p>The date and time that the revision was last updated, in ISO 8601 format.</p> pub updated_at: std::option::Option<aws_smithy_types::DateTime>, } impl RevisionEntry { /// <p>The ARN for the revision.</p> pub fn arn(&self) -> std::option::Option<&str> { self.arn.as_deref() } /// <p>An optional comment about the revision.</p> pub fn comment(&self) -> std::option::Option<&str> { self.comment.as_deref() } /// <p>The date and time that the revision was created, in ISO 8601 format.</p> pub fn created_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> { self.created_at.as_ref() } /// <p>The unique identifier for the data set associated with this revision.</p> pub fn data_set_id(&self) -> std::option::Option<&str> { self.data_set_id.as_deref() } /// <p>To publish a revision to a data set in a product, the revision must first be finalized. Finalizing a revision tells AWS Data Exchange that your changes to the assets in the revision are complete. After it's in this read-only state, you can publish the revision to your products.</p> <p>Finalized revisions can be published through the AWS Data Exchange console or the AWS Marketplace Catalog API, using the StartChangeSet AWS Marketplace Catalog API action. When using the API, revisions are uniquely identified by their ARN.</p> pub fn finalized(&self) -> bool { self.finalized } /// <p>The unique identifier for the revision.</p> pub fn id(&self) -> std::option::Option<&str> { self.id.as_deref() } /// <p>The revision ID of the owned revision corresponding to the entitled revision being viewed. This parameter is returned when a revision owner is viewing the entitled copy of its owned revision.</p> pub fn source_id(&self) -> std::option::Option<&str> { self.source_id.as_deref() } /// <p>The date and time that the revision was last updated, in ISO 8601 format.</p> pub fn updated_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> { self.updated_at.as_ref() } } impl std::fmt::Debug for RevisionEntry { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("RevisionEntry"); formatter.field("arn", &self.arn); formatter.field("comment", &self.comment); formatter.field("created_at", &self.created_at); formatter.field("data_set_id", &self.data_set_id); formatter.field("finalized", &self.finalized); formatter.field("id", &self.id); formatter.field("source_id", &self.source_id); formatter.field("updated_at", &self.updated_at); formatter.finish() } } /// See [`RevisionEntry`](crate::model::RevisionEntry) pub mod revision_entry { /// A builder for [`RevisionEntry`](crate::model::RevisionEntry) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) arn: std::option::Option<std::string::String>, pub(crate) comment: std::option::Option<std::string::String>, pub(crate) created_at: std::option::Option<aws_smithy_types::DateTime>, pub(crate) data_set_id: std::option::Option<std::string::String>, pub(crate) finalized: std::option::Option<bool>, pub(crate) id: std::option::Option<std::string::String>, pub(crate) source_id: std::option::Option<std::string::String>, pub(crate) updated_at: std::option::Option<aws_smithy_types::DateTime>, } impl Builder { /// <p>The ARN for the revision.</p> pub fn arn(mut self, input: impl Into<std::string::String>) -> Self { self.arn = Some(input.into()); self } /// <p>The ARN for the revision.</p> pub fn set_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.arn = input; self } /// <p>An optional comment about the revision.</p> pub fn comment(mut self, input: impl Into<std::string::String>) -> Self { self.comment = Some(input.into()); self } /// <p>An optional comment about the revision.</p> pub fn set_comment(mut self, input: std::option::Option<std::string::String>) -> Self { self.comment = input; self } /// <p>The date and time that the revision was created, in ISO 8601 format.</p> pub fn created_at(mut self, input: aws_smithy_types::DateTime) -> Self { self.created_at = Some(input); self } /// <p>The date and time that the revision was created, in ISO 8601 format.</p> pub fn set_created_at( mut self, input: std::option::Option<aws_smithy_types::DateTime>, ) -> Self { self.created_at = input; self } /// <p>The unique identifier for the data set associated with this revision.</p> pub fn data_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.data_set_id = Some(input.into()); self } /// <p>The unique identifier for the data set associated with this revision.</p> pub fn set_data_set_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.data_set_id = input; self } /// <p>To publish a revision to a data set in a product, the revision must first be finalized. Finalizing a revision tells AWS Data Exchange that your changes to the assets in the revision are complete. After it's in this read-only state, you can publish the revision to your products.</p> <p>Finalized revisions can be published through the AWS Data Exchange console or the AWS Marketplace Catalog API, using the StartChangeSet AWS Marketplace Catalog API action. When using the API, revisions are uniquely identified by their ARN.</p> pub fn finalized(mut self, input: bool) -> Self { self.finalized = Some(input); self } /// <p>To publish a revision to a data set in a product, the revision must first be finalized. Finalizing a revision tells AWS Data Exchange that your changes to the assets in the revision are complete. After it's in this read-only state, you can publish the revision to your products.</p> <p>Finalized revisions can be published through the AWS Data Exchange console or the AWS Marketplace Catalog API, using the StartChangeSet AWS Marketplace Catalog API action. When using the API, revisions are uniquely identified by their ARN.</p> pub fn set_finalized(mut self, input: std::option::Option<bool>) -> Self { self.finalized = input; self } /// <p>The unique identifier for the revision.</p> pub fn id(mut self, input: impl Into<std::string::String>) -> Self { self.id = Some(input.into()); self } /// <p>The unique identifier for the revision.</p> pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.id = input; self } /// <p>The revision ID of the owned revision corresponding to the entitled revision being viewed. This parameter is returned when a revision owner is viewing the entitled copy of its owned revision.</p> pub fn source_id(mut self, input: impl Into<std::string::String>) -> Self { self.source_id = Some(input.into()); self } /// <p>The revision ID of the owned revision corresponding to the entitled revision being viewed. This parameter is returned when a revision owner is viewing the entitled copy of its owned revision.</p> pub fn set_source_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.source_id = input; self } /// <p>The date and time that the revision was last updated, in ISO 8601 format.</p> pub fn updated_at(mut self, input: aws_smithy_types::DateTime) -> Self { self.updated_at = Some(input); self } /// <p>The date and time that the revision was last updated, in ISO 8601 format.</p> pub fn set_updated_at( mut self, input: std::option::Option<aws_smithy_types::DateTime>, ) -> Self { self.updated_at = input; self } /// Consumes the builder and constructs a [`RevisionEntry`](crate::model::RevisionEntry) pub fn build(self) -> crate::model::RevisionEntry { crate::model::RevisionEntry { arn: self.arn, comment: self.comment, created_at: self.created_at, data_set_id: self.data_set_id, finalized: self.finalized.unwrap_or_default(), id: self.id, source_id: self.source_id, updated_at: self.updated_at, } } } } impl RevisionEntry { /// Creates a new builder-style object to manufacture [`RevisionEntry`](crate::model::RevisionEntry) pub fn builder() -> crate::model::revision_entry::Builder { crate::model::revision_entry::Builder::default() } } /// <p>The details for the request.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct RequestDetails { /// <p>Details about the export to signed URL request.</p> pub export_asset_to_signed_url: std::option::Option<crate::model::ExportAssetToSignedUrlRequestDetails>, /// <p>Details about the export to Amazon S3 request.</p> pub export_assets_to_s3: std::option::Option<crate::model::ExportAssetsToS3RequestDetails>, /// <p>Details about the export to Amazon S3 request.</p> pub export_revisions_to_s3: std::option::Option<crate::model::ExportRevisionsToS3RequestDetails>, /// <p>Details about the import from signed URL request.</p> pub import_asset_from_signed_url: std::option::Option<crate::model::ImportAssetFromSignedUrlRequestDetails>, /// <p>Details about the import from Amazon S3 request.</p> pub import_assets_from_s3: std::option::Option<crate::model::ImportAssetsFromS3RequestDetails>, /// <p>Details from an import from Amazon Redshift datashare request.</p> pub import_assets_from_redshift_data_shares: std::option::Option<crate::model::ImportAssetsFromRedshiftDataSharesRequestDetails>, } impl RequestDetails { /// <p>Details about the export to signed URL request.</p> pub fn export_asset_to_signed_url( &self, ) -> std::option::Option<&crate::model::ExportAssetToSignedUrlRequestDetails> { self.export_asset_to_signed_url.as_ref() } /// <p>Details about the export to Amazon S3 request.</p> pub fn export_assets_to_s3( &self, ) -> std::option::Option<&crate::model::ExportAssetsToS3RequestDetails> { self.export_assets_to_s3.as_ref() } /// <p>Details about the export to Amazon S3 request.</p> pub fn export_revisions_to_s3( &self, ) -> std::option::Option<&crate::model::ExportRevisionsToS3RequestDetails> { self.export_revisions_to_s3.as_ref() } /// <p>Details about the import from signed URL request.</p> pub fn import_asset_from_signed_url( &self, ) -> std::option::Option<&crate::model::ImportAssetFromSignedUrlRequestDetails> { self.import_asset_from_signed_url.as_ref() } /// <p>Details about the import from Amazon S3 request.</p> pub fn import_assets_from_s3( &self, ) -> std::option::Option<&crate::model::ImportAssetsFromS3RequestDetails> { self.import_assets_from_s3.as_ref() } /// <p>Details from an import from Amazon Redshift datashare request.</p> pub fn import_assets_from_redshift_data_shares( &self, ) -> std::option::Option<&crate::model::ImportAssetsFromRedshiftDataSharesRequestDetails> { self.import_assets_from_redshift_data_shares.as_ref() } } impl std::fmt::Debug for RequestDetails { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("RequestDetails"); formatter.field( "export_asset_to_signed_url", &self.export_asset_to_signed_url, ); formatter.field("export_assets_to_s3", &self.export_assets_to_s3); formatter.field("export_revisions_to_s3", &self.export_revisions_to_s3); formatter.field( "import_asset_from_signed_url", &self.import_asset_from_signed_url, ); formatter.field("import_assets_from_s3", &self.import_assets_from_s3); formatter.field( "import_assets_from_redshift_data_shares", &self.import_assets_from_redshift_data_shares, ); formatter.finish() } } /// See [`RequestDetails`](crate::model::RequestDetails) pub mod request_details { /// A builder for [`RequestDetails`](crate::model::RequestDetails) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) export_asset_to_signed_url: std::option::Option<crate::model::ExportAssetToSignedUrlRequestDetails>, pub(crate) export_assets_to_s3: std::option::Option<crate::model::ExportAssetsToS3RequestDetails>, pub(crate) export_revisions_to_s3: std::option::Option<crate::model::ExportRevisionsToS3RequestDetails>, pub(crate) import_asset_from_signed_url: std::option::Option<crate::model::ImportAssetFromSignedUrlRequestDetails>, pub(crate) import_assets_from_s3: std::option::Option<crate::model::ImportAssetsFromS3RequestDetails>, pub(crate) import_assets_from_redshift_data_shares: std::option::Option<crate::model::ImportAssetsFromRedshiftDataSharesRequestDetails>, } impl Builder { /// <p>Details about the export to signed URL request.</p> pub fn export_asset_to_signed_url( mut self, input: crate::model::ExportAssetToSignedUrlRequestDetails, ) -> Self { self.export_asset_to_signed_url = Some(input); self } /// <p>Details about the export to signed URL request.</p> pub fn set_export_asset_to_signed_url( mut self, input: std::option::Option<crate::model::ExportAssetToSignedUrlRequestDetails>, ) -> Self { self.export_asset_to_signed_url = input; self } /// <p>Details about the export to Amazon S3 request.</p> pub fn export_assets_to_s3( mut self, input: crate::model::ExportAssetsToS3RequestDetails, ) -> Self { self.export_assets_to_s3 = Some(input); self } /// <p>Details about the export to Amazon S3 request.</p> pub fn set_export_assets_to_s3( mut self, input: std::option::Option<crate::model::ExportAssetsToS3RequestDetails>, ) -> Self { self.export_assets_to_s3 = input; self } /// <p>Details about the export to Amazon S3 request.</p> pub fn export_revisions_to_s3( mut self, input: crate::model::ExportRevisionsToS3RequestDetails, ) -> Self { self.export_revisions_to_s3 = Some(input); self } /// <p>Details about the export to Amazon S3 request.</p> pub fn set_export_revisions_to_s3( mut self, input: std::option::Option<crate::model::ExportRevisionsToS3RequestDetails>, ) -> Self { self.export_revisions_to_s3 = input; self } /// <p>Details about the import from signed URL request.</p> pub fn import_asset_from_signed_url( mut self, input: crate::model::ImportAssetFromSignedUrlRequestDetails, ) -> Self { self.import_asset_from_signed_url = Some(input); self } /// <p>Details about the import from signed URL request.</p> pub fn set_import_asset_from_signed_url( mut self, input: std::option::Option<crate::model::ImportAssetFromSignedUrlRequestDetails>, ) -> Self { self.import_asset_from_signed_url = input; self } /// <p>Details about the import from Amazon S3 request.</p> pub fn import_assets_from_s3( mut self, input: crate::model::ImportAssetsFromS3RequestDetails, ) -> Self { self.import_assets_from_s3 = Some(input); self } /// <p>Details about the import from Amazon S3 request.</p> pub fn set_import_assets_from_s3( mut self, input: std::option::Option<crate::model::ImportAssetsFromS3RequestDetails>, ) -> Self { self.import_assets_from_s3 = input; self } /// <p>Details from an import from Amazon Redshift datashare request.</p> pub fn import_assets_from_redshift_data_shares( mut self, input: crate::model::ImportAssetsFromRedshiftDataSharesRequestDetails, ) -> Self { self.import_assets_from_redshift_data_shares = Some(input); self } /// <p>Details from an import from Amazon Redshift datashare request.</p> pub fn set_import_assets_from_redshift_data_shares( mut self, input: std::option::Option< crate::model::ImportAssetsFromRedshiftDataSharesRequestDetails, >, ) -> Self { self.import_assets_from_redshift_data_shares = input; self } /// Consumes the builder and constructs a [`RequestDetails`](crate::model::RequestDetails) pub fn build(self) -> crate::model::RequestDetails { crate::model::RequestDetails { export_asset_to_signed_url: self.export_asset_to_signed_url, export_assets_to_s3: self.export_assets_to_s3, export_revisions_to_s3: self.export_revisions_to_s3, import_asset_from_signed_url: self.import_asset_from_signed_url, import_assets_from_s3: self.import_assets_from_s3, import_assets_from_redshift_data_shares: self .import_assets_from_redshift_data_shares, } } } } impl RequestDetails { /// Creates a new builder-style object to manufacture [`RequestDetails`](crate::model::RequestDetails) pub fn builder() -> crate::model::request_details::Builder { crate::model::request_details::Builder::default() } } /// Details from an import from Amazon Redshift datashare request. #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ImportAssetsFromRedshiftDataSharesRequestDetails { /// A list of Amazon Redshift datashare assets. pub asset_sources: std::option::Option<std::vec::Vec<crate::model::RedshiftDataShareAssetSourceEntry>>, /// The unique identifier for the data set associated with this import job. pub data_set_id: std::option::Option<std::string::String>, /// The unique identifier for the revision associated with this import job. pub revision_id: std::option::Option<std::string::String>, } impl ImportAssetsFromRedshiftDataSharesRequestDetails { /// A list of Amazon Redshift datashare assets. pub fn asset_sources( &self, ) -> std::option::Option<&[crate::model::RedshiftDataShareAssetSourceEntry]> { self.asset_sources.as_deref() } /// The unique identifier for the data set associated with this import job. pub fn data_set_id(&self) -> std::option::Option<&str> { self.data_set_id.as_deref() } /// The unique identifier for the revision associated with this import job. pub fn revision_id(&self) -> std::option::Option<&str> { self.revision_id.as_deref() } } impl std::fmt::Debug for ImportAssetsFromRedshiftDataSharesRequestDetails { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ImportAssetsFromRedshiftDataSharesRequestDetails"); formatter.field("asset_sources", &self.asset_sources); formatter.field("data_set_id", &self.data_set_id); formatter.field("revision_id", &self.revision_id); formatter.finish() } } /// See [`ImportAssetsFromRedshiftDataSharesRequestDetails`](crate::model::ImportAssetsFromRedshiftDataSharesRequestDetails) pub mod import_assets_from_redshift_data_shares_request_details { /// A builder for [`ImportAssetsFromRedshiftDataSharesRequestDetails`](crate::model::ImportAssetsFromRedshiftDataSharesRequestDetails) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) asset_sources: std::option::Option<std::vec::Vec<crate::model::RedshiftDataShareAssetSourceEntry>>, pub(crate) data_set_id: std::option::Option<std::string::String>, pub(crate) revision_id: std::option::Option<std::string::String>, } impl Builder { /// Appends an item to `asset_sources`. /// /// To override the contents of this collection use [`set_asset_sources`](Self::set_asset_sources). /// /// A list of Amazon Redshift datashare assets. pub fn asset_sources( mut self, input: impl Into<crate::model::RedshiftDataShareAssetSourceEntry>, ) -> Self { let mut v = self.asset_sources.unwrap_or_default(); v.push(input.into()); self.asset_sources = Some(v); self } /// A list of Amazon Redshift datashare assets. pub fn set_asset_sources( mut self, input: std::option::Option< std::vec::Vec<crate::model::RedshiftDataShareAssetSourceEntry>, >, ) -> Self { self.asset_sources = input; self } /// The unique identifier for the data set associated with this import job. pub fn data_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.data_set_id = Some(input.into()); self } /// The unique identifier for the data set associated with this import job. pub fn set_data_set_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.data_set_id = input; self } /// The unique identifier for the revision associated with this import job. pub fn revision_id(mut self, input: impl Into<std::string::String>) -> Self { self.revision_id = Some(input.into()); self } /// The unique identifier for the revision associated with this import job. pub fn set_revision_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.revision_id = input; self } /// Consumes the builder and constructs a [`ImportAssetsFromRedshiftDataSharesRequestDetails`](crate::model::ImportAssetsFromRedshiftDataSharesRequestDetails) pub fn build(self) -> crate::model::ImportAssetsFromRedshiftDataSharesRequestDetails { crate::model::ImportAssetsFromRedshiftDataSharesRequestDetails { asset_sources: self.asset_sources, data_set_id: self.data_set_id, revision_id: self.revision_id, } } } } impl ImportAssetsFromRedshiftDataSharesRequestDetails { /// Creates a new builder-style object to manufacture [`ImportAssetsFromRedshiftDataSharesRequestDetails`](crate::model::ImportAssetsFromRedshiftDataSharesRequestDetails) pub fn builder( ) -> crate::model::import_assets_from_redshift_data_shares_request_details::Builder { crate::model::import_assets_from_redshift_data_shares_request_details::Builder::default() } } /// <p>Details of the operation to be performed by the job.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ImportAssetsFromS3RequestDetails { /// <p>Is a list of S3 bucket and object key pairs.</p> pub asset_sources: std::option::Option<std::vec::Vec<crate::model::AssetSourceEntry>>, /// <p>The unique identifier for the data set associated with this import job.</p> pub data_set_id: std::option::Option<std::string::String>, /// <p>The unique identifier for the revision associated with this import request.</p> pub revision_id: std::option::Option<std::string::String>, } impl ImportAssetsFromS3RequestDetails { /// <p>Is a list of S3 bucket and object key pairs.</p> pub fn asset_sources(&self) -> std::option::Option<&[crate::model::AssetSourceEntry]> { self.asset_sources.as_deref() } /// <p>The unique identifier for the data set associated with this import job.</p> pub fn data_set_id(&self) -> std::option::Option<&str> { self.data_set_id.as_deref() } /// <p>The unique identifier for the revision associated with this import request.</p> pub fn revision_id(&self) -> std::option::Option<&str> { self.revision_id.as_deref() } } impl std::fmt::Debug for ImportAssetsFromS3RequestDetails { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ImportAssetsFromS3RequestDetails"); formatter.field("asset_sources", &self.asset_sources); formatter.field("data_set_id", &self.data_set_id); formatter.field("revision_id", &self.revision_id); formatter.finish() } } /// See [`ImportAssetsFromS3RequestDetails`](crate::model::ImportAssetsFromS3RequestDetails) pub mod import_assets_from_s3_request_details { /// A builder for [`ImportAssetsFromS3RequestDetails`](crate::model::ImportAssetsFromS3RequestDetails) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) asset_sources: std::option::Option<std::vec::Vec<crate::model::AssetSourceEntry>>, pub(crate) data_set_id: std::option::Option<std::string::String>, pub(crate) revision_id: std::option::Option<std::string::String>, } impl Builder { /// Appends an item to `asset_sources`. /// /// To override the contents of this collection use [`set_asset_sources`](Self::set_asset_sources). /// /// <p>Is a list of S3 bucket and object key pairs.</p> pub fn asset_sources(mut self, input: impl Into<crate::model::AssetSourceEntry>) -> Self { let mut v = self.asset_sources.unwrap_or_default(); v.push(input.into()); self.asset_sources = Some(v); self } /// <p>Is a list of S3 bucket and object key pairs.</p> pub fn set_asset_sources( mut self, input: std::option::Option<std::vec::Vec<crate::model::AssetSourceEntry>>, ) -> Self { self.asset_sources = input; self } /// <p>The unique identifier for the data set associated with this import job.</p> pub fn data_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.data_set_id = Some(input.into()); self } /// <p>The unique identifier for the data set associated with this import job.</p> pub fn set_data_set_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.data_set_id = input; self } /// <p>The unique identifier for the revision associated with this import request.</p> pub fn revision_id(mut self, input: impl Into<std::string::String>) -> Self { self.revision_id = Some(input.into()); self } /// <p>The unique identifier for the revision associated with this import request.</p> pub fn set_revision_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.revision_id = input; self } /// Consumes the builder and constructs a [`ImportAssetsFromS3RequestDetails`](crate::model::ImportAssetsFromS3RequestDetails) pub fn build(self) -> crate::model::ImportAssetsFromS3RequestDetails { crate::model::ImportAssetsFromS3RequestDetails { asset_sources: self.asset_sources, data_set_id: self.data_set_id, revision_id: self.revision_id, } } } } impl ImportAssetsFromS3RequestDetails { /// Creates a new builder-style object to manufacture [`ImportAssetsFromS3RequestDetails`](crate::model::ImportAssetsFromS3RequestDetails) pub fn builder() -> crate::model::import_assets_from_s3_request_details::Builder { crate::model::import_assets_from_s3_request_details::Builder::default() } } /// <p>Details of the operation to be performed by the job.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ImportAssetFromSignedUrlRequestDetails { /// <p>The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name.</p> pub asset_name: std::option::Option<std::string::String>, /// <p>The unique identifier for the data set associated with this import job.</p> pub data_set_id: std::option::Option<std::string::String>, /// <p>The Base64-encoded Md5 hash for the asset, used to ensure the integrity of the file at that location.</p> pub md5_hash: std::option::Option<std::string::String>, /// <p>The unique identifier for the revision associated with this import request.</p> pub revision_id: std::option::Option<std::string::String>, } impl ImportAssetFromSignedUrlRequestDetails { /// <p>The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name.</p> pub fn asset_name(&self) -> std::option::Option<&str> { self.asset_name.as_deref() } /// <p>The unique identifier for the data set associated with this import job.</p> pub fn data_set_id(&self) -> std::option::Option<&str> { self.data_set_id.as_deref() } /// <p>The Base64-encoded Md5 hash for the asset, used to ensure the integrity of the file at that location.</p> pub fn md5_hash(&self) -> std::option::Option<&str> { self.md5_hash.as_deref() } /// <p>The unique identifier for the revision associated with this import request.</p> pub fn revision_id(&self) -> std::option::Option<&str> { self.revision_id.as_deref() } } impl std::fmt::Debug for ImportAssetFromSignedUrlRequestDetails { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ImportAssetFromSignedUrlRequestDetails"); formatter.field("asset_name", &self.asset_name); formatter.field("data_set_id", &self.data_set_id); formatter.field("md5_hash", &self.md5_hash); formatter.field("revision_id", &self.revision_id); formatter.finish() } } /// See [`ImportAssetFromSignedUrlRequestDetails`](crate::model::ImportAssetFromSignedUrlRequestDetails) pub mod import_asset_from_signed_url_request_details { /// A builder for [`ImportAssetFromSignedUrlRequestDetails`](crate::model::ImportAssetFromSignedUrlRequestDetails) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) asset_name: std::option::Option<std::string::String>, pub(crate) data_set_id: std::option::Option<std::string::String>, pub(crate) md5_hash: std::option::Option<std::string::String>, pub(crate) revision_id: std::option::Option<std::string::String>, } impl Builder { /// <p>The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name.</p> pub fn asset_name(mut self, input: impl Into<std::string::String>) -> Self { self.asset_name = Some(input.into()); self } /// <p>The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name.</p> pub fn set_asset_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.asset_name = input; self } /// <p>The unique identifier for the data set associated with this import job.</p> pub fn data_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.data_set_id = Some(input.into()); self } /// <p>The unique identifier for the data set associated with this import job.</p> pub fn set_data_set_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.data_set_id = input; self } /// <p>The Base64-encoded Md5 hash for the asset, used to ensure the integrity of the file at that location.</p> pub fn md5_hash(mut self, input: impl Into<std::string::String>) -> Self { self.md5_hash = Some(input.into()); self } /// <p>The Base64-encoded Md5 hash for the asset, used to ensure the integrity of the file at that location.</p> pub fn set_md5_hash(mut self, input: std::option::Option<std::string::String>) -> Self { self.md5_hash = input; self } /// <p>The unique identifier for the revision associated with this import request.</p> pub fn revision_id(mut self, input: impl Into<std::string::String>) -> Self { self.revision_id = Some(input.into()); self } /// <p>The unique identifier for the revision associated with this import request.</p> pub fn set_revision_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.revision_id = input; self } /// Consumes the builder and constructs a [`ImportAssetFromSignedUrlRequestDetails`](crate::model::ImportAssetFromSignedUrlRequestDetails) pub fn build(self) -> crate::model::ImportAssetFromSignedUrlRequestDetails { crate::model::ImportAssetFromSignedUrlRequestDetails { asset_name: self.asset_name, data_set_id: self.data_set_id, md5_hash: self.md5_hash, revision_id: self.revision_id, } } } } impl ImportAssetFromSignedUrlRequestDetails { /// Creates a new builder-style object to manufacture [`ImportAssetFromSignedUrlRequestDetails`](crate::model::ImportAssetFromSignedUrlRequestDetails) pub fn builder() -> crate::model::import_asset_from_signed_url_request_details::Builder { crate::model::import_asset_from_signed_url_request_details::Builder::default() } } /// <p>Details of the operation to be performed by the job.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ExportRevisionsToS3RequestDetails { /// <p>The unique identifier for the data set associated with this export job.</p> pub data_set_id: std::option::Option<std::string::String>, /// <p>Encryption configuration for the export job.</p> pub encryption: std::option::Option<crate::model::ExportServerSideEncryption>, /// <p>The destination for the revision.</p> pub revision_destinations: std::option::Option<std::vec::Vec<crate::model::RevisionDestinationEntry>>, } impl ExportRevisionsToS3RequestDetails { /// <p>The unique identifier for the data set associated with this export job.</p> pub fn data_set_id(&self) -> std::option::Option<&str> { self.data_set_id.as_deref() } /// <p>Encryption configuration for the export job.</p> pub fn encryption(&self) -> std::option::Option<&crate::model::ExportServerSideEncryption> { self.encryption.as_ref() } /// <p>The destination for the revision.</p> pub fn revision_destinations( &self, ) -> std::option::Option<&[crate::model::RevisionDestinationEntry]> { self.revision_destinations.as_deref() } } impl std::fmt::Debug for ExportRevisionsToS3RequestDetails { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ExportRevisionsToS3RequestDetails"); formatter.field("data_set_id", &self.data_set_id); formatter.field("encryption", &self.encryption); formatter.field("revision_destinations", &self.revision_destinations); formatter.finish() } } /// See [`ExportRevisionsToS3RequestDetails`](crate::model::ExportRevisionsToS3RequestDetails) pub mod export_revisions_to_s3_request_details { /// A builder for [`ExportRevisionsToS3RequestDetails`](crate::model::ExportRevisionsToS3RequestDetails) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) data_set_id: std::option::Option<std::string::String>, pub(crate) encryption: std::option::Option<crate::model::ExportServerSideEncryption>, pub(crate) revision_destinations: std::option::Option<std::vec::Vec<crate::model::RevisionDestinationEntry>>, } impl Builder { /// <p>The unique identifier for the data set associated with this export job.</p> pub fn data_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.data_set_id = Some(input.into()); self } /// <p>The unique identifier for the data set associated with this export job.</p> pub fn set_data_set_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.data_set_id = input; self } /// <p>Encryption configuration for the export job.</p> pub fn encryption(mut self, input: crate::model::ExportServerSideEncryption) -> Self { self.encryption = Some(input); self } /// <p>Encryption configuration for the export job.</p> pub fn set_encryption( mut self, input: std::option::Option<crate::model::ExportServerSideEncryption>, ) -> Self { self.encryption = input; self } /// Appends an item to `revision_destinations`. /// /// To override the contents of this collection use [`set_revision_destinations`](Self::set_revision_destinations). /// /// <p>The destination for the revision.</p> pub fn revision_destinations( mut self, input: impl Into<crate::model::RevisionDestinationEntry>, ) -> Self { let mut v = self.revision_destinations.unwrap_or_default(); v.push(input.into()); self.revision_destinations = Some(v); self } /// <p>The destination for the revision.</p> pub fn set_revision_destinations( mut self, input: std::option::Option<std::vec::Vec<crate::model::RevisionDestinationEntry>>, ) -> Self { self.revision_destinations = input; self } /// Consumes the builder and constructs a [`ExportRevisionsToS3RequestDetails`](crate::model::ExportRevisionsToS3RequestDetails) pub fn build(self) -> crate::model::ExportRevisionsToS3RequestDetails { crate::model::ExportRevisionsToS3RequestDetails { data_set_id: self.data_set_id, encryption: self.encryption, revision_destinations: self.revision_destinations, } } } } impl ExportRevisionsToS3RequestDetails { /// Creates a new builder-style object to manufacture [`ExportRevisionsToS3RequestDetails`](crate::model::ExportRevisionsToS3RequestDetails) pub fn builder() -> crate::model::export_revisions_to_s3_request_details::Builder { crate::model::export_revisions_to_s3_request_details::Builder::default() } } /// <p>Details of the operation to be performed by the job.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ExportAssetsToS3RequestDetails { /// <p>The destination for the asset.</p> pub asset_destinations: std::option::Option<std::vec::Vec<crate::model::AssetDestinationEntry>>, /// <p>The unique identifier for the data set associated with this export job.</p> pub data_set_id: std::option::Option<std::string::String>, /// <p>Encryption configuration for the export job.</p> pub encryption: std::option::Option<crate::model::ExportServerSideEncryption>, /// <p>The unique identifier for the revision associated with this export request.</p> pub revision_id: std::option::Option<std::string::String>, } impl ExportAssetsToS3RequestDetails { /// <p>The destination for the asset.</p> pub fn asset_destinations( &self, ) -> std::option::Option<&[crate::model::AssetDestinationEntry]> { self.asset_destinations.as_deref() } /// <p>The unique identifier for the data set associated with this export job.</p> pub fn data_set_id(&self) -> std::option::Option<&str> { self.data_set_id.as_deref() } /// <p>Encryption configuration for the export job.</p> pub fn encryption(&self) -> std::option::Option<&crate::model::ExportServerSideEncryption> { self.encryption.as_ref() } /// <p>The unique identifier for the revision associated with this export request.</p> pub fn revision_id(&self) -> std::option::Option<&str> { self.revision_id.as_deref() } } impl std::fmt::Debug for ExportAssetsToS3RequestDetails { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ExportAssetsToS3RequestDetails"); formatter.field("asset_destinations", &self.asset_destinations); formatter.field("data_set_id", &self.data_set_id); formatter.field("encryption", &self.encryption); formatter.field("revision_id", &self.revision_id); formatter.finish() } } /// See [`ExportAssetsToS3RequestDetails`](crate::model::ExportAssetsToS3RequestDetails) pub mod export_assets_to_s3_request_details { /// A builder for [`ExportAssetsToS3RequestDetails`](crate::model::ExportAssetsToS3RequestDetails) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) asset_destinations: std::option::Option<std::vec::Vec<crate::model::AssetDestinationEntry>>, pub(crate) data_set_id: std::option::Option<std::string::String>, pub(crate) encryption: std::option::Option<crate::model::ExportServerSideEncryption>, pub(crate) revision_id: std::option::Option<std::string::String>, } impl Builder { /// Appends an item to `asset_destinations`. /// /// To override the contents of this collection use [`set_asset_destinations`](Self::set_asset_destinations). /// /// <p>The destination for the asset.</p> pub fn asset_destinations( mut self, input: impl Into<crate::model::AssetDestinationEntry>, ) -> Self { let mut v = self.asset_destinations.unwrap_or_default(); v.push(input.into()); self.asset_destinations = Some(v); self } /// <p>The destination for the asset.</p> pub fn set_asset_destinations( mut self, input: std::option::Option<std::vec::Vec<crate::model::AssetDestinationEntry>>, ) -> Self { self.asset_destinations = input; self } /// <p>The unique identifier for the data set associated with this export job.</p> pub fn data_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.data_set_id = Some(input.into()); self } /// <p>The unique identifier for the data set associated with this export job.</p> pub fn set_data_set_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.data_set_id = input; self } /// <p>Encryption configuration for the export job.</p> pub fn encryption(mut self, input: crate::model::ExportServerSideEncryption) -> Self { self.encryption = Some(input); self } /// <p>Encryption configuration for the export job.</p> pub fn set_encryption( mut self, input: std::option::Option<crate::model::ExportServerSideEncryption>, ) -> Self { self.encryption = input; self } /// <p>The unique identifier for the revision associated with this export request.</p> pub fn revision_id(mut self, input: impl Into<std::string::String>) -> Self { self.revision_id = Some(input.into()); self } /// <p>The unique identifier for the revision associated with this export request.</p> pub fn set_revision_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.revision_id = input; self } /// Consumes the builder and constructs a [`ExportAssetsToS3RequestDetails`](crate::model::ExportAssetsToS3RequestDetails) pub fn build(self) -> crate::model::ExportAssetsToS3RequestDetails { crate::model::ExportAssetsToS3RequestDetails { asset_destinations: self.asset_destinations, data_set_id: self.data_set_id, encryption: self.encryption, revision_id: self.revision_id, } } } } impl ExportAssetsToS3RequestDetails { /// Creates a new builder-style object to manufacture [`ExportAssetsToS3RequestDetails`](crate::model::ExportAssetsToS3RequestDetails) pub fn builder() -> crate::model::export_assets_to_s3_request_details::Builder { crate::model::export_assets_to_s3_request_details::Builder::default() } } /// <p>Details of the operation to be performed by the job.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ExportAssetToSignedUrlRequestDetails { /// <p>The unique identifier for the asset that is exported to a signed URL.</p> pub asset_id: std::option::Option<std::string::String>, /// <p>The unique identifier for the data set associated with this export job.</p> pub data_set_id: std::option::Option<std::string::String>, /// <p>The unique identifier for the revision associated with this export request.</p> pub revision_id: std::option::Option<std::string::String>, } impl ExportAssetToSignedUrlRequestDetails { /// <p>The unique identifier for the asset that is exported to a signed URL.</p> pub fn asset_id(&self) -> std::option::Option<&str> { self.asset_id.as_deref() } /// <p>The unique identifier for the data set associated with this export job.</p> pub fn data_set_id(&self) -> std::option::Option<&str> { self.data_set_id.as_deref() } /// <p>The unique identifier for the revision associated with this export request.</p> pub fn revision_id(&self) -> std::option::Option<&str> { self.revision_id.as_deref() } } impl std::fmt::Debug for ExportAssetToSignedUrlRequestDetails { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ExportAssetToSignedUrlRequestDetails"); formatter.field("asset_id", &self.asset_id); formatter.field("data_set_id", &self.data_set_id); formatter.field("revision_id", &self.revision_id); formatter.finish() } } /// See [`ExportAssetToSignedUrlRequestDetails`](crate::model::ExportAssetToSignedUrlRequestDetails) pub mod export_asset_to_signed_url_request_details { /// A builder for [`ExportAssetToSignedUrlRequestDetails`](crate::model::ExportAssetToSignedUrlRequestDetails) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) asset_id: std::option::Option<std::string::String>, pub(crate) data_set_id: std::option::Option<std::string::String>, pub(crate) revision_id: std::option::Option<std::string::String>, } impl Builder { /// <p>The unique identifier for the asset that is exported to a signed URL.</p> pub fn asset_id(mut self, input: impl Into<std::string::String>) -> Self { self.asset_id = Some(input.into()); self } /// <p>The unique identifier for the asset that is exported to a signed URL.</p> pub fn set_asset_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.asset_id = input; self } /// <p>The unique identifier for the data set associated with this export job.</p> pub fn data_set_id(mut self, input: impl Into<std::string::String>) -> Self { self.data_set_id = Some(input.into()); self } /// <p>The unique identifier for the data set associated with this export job.</p> pub fn set_data_set_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.data_set_id = input; self } /// <p>The unique identifier for the revision associated with this export request.</p> pub fn revision_id(mut self, input: impl Into<std::string::String>) -> Self { self.revision_id = Some(input.into()); self } /// <p>The unique identifier for the revision associated with this export request.</p> pub fn set_revision_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.revision_id = input; self } /// Consumes the builder and constructs a [`ExportAssetToSignedUrlRequestDetails`](crate::model::ExportAssetToSignedUrlRequestDetails) pub fn build(self) -> crate::model::ExportAssetToSignedUrlRequestDetails { crate::model::ExportAssetToSignedUrlRequestDetails { asset_id: self.asset_id, data_set_id: self.data_set_id, revision_id: self.revision_id, } } } } impl ExportAssetToSignedUrlRequestDetails { /// Creates a new builder-style object to manufacture [`ExportAssetToSignedUrlRequestDetails`](crate::model::ExportAssetToSignedUrlRequestDetails) pub fn builder() -> crate::model::export_asset_to_signed_url_request_details::Builder { crate::model::export_asset_to_signed_url_request_details::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive( std::clone::Clone, std::cmp::Eq, std::cmp::Ord, std::cmp::PartialEq, std::cmp::PartialOrd, std::fmt::Debug, std::hash::Hash, )] pub enum LimitName { #[allow(missing_docs)] // documentation missing in model AmazonRedshiftDatashareAssetsPerImportJobFromRedshift, #[allow(missing_docs)] // documentation missing in model AmazonRedshiftDatashareAssetsPerRevision, #[allow(missing_docs)] // documentation missing in model AssetPerExportJobFromAmazonS3, #[allow(missing_docs)] // documentation missing in model AssetSizeInGb, #[allow(missing_docs)] // documentation missing in model AssetsPerImportJobFromAmazonS3, #[allow(missing_docs)] // documentation missing in model AssetsPerRevision, #[allow(missing_docs)] // documentation missing in model AutoExportEventActionsPerDataSet, #[allow(missing_docs)] // documentation missing in model ConcurrentInProgressJobsToExportAssetsToAmazonS3, #[allow(missing_docs)] // documentation missing in model ConcurrentInProgressJobsToExportAssetsToASignedUrl, #[allow(missing_docs)] // documentation missing in model ConcurrentInProgressJobsToExportRevisionsToAmazonS3, #[allow(missing_docs)] // documentation missing in model ConcurrentInProgressJobsToImportAssetsFromAmazonRedshiftDatashares, #[allow(missing_docs)] // documentation missing in model ConcurrentInProgressJobsToImportAssetsFromAmazonS3, #[allow(missing_docs)] // documentation missing in model ConcurrentInProgressJobsToImportAssetsFromASignedUrl, #[allow(missing_docs)] // documentation missing in model DataSetsPerAccount, #[allow(missing_docs)] // documentation missing in model DataSetsPerProduct, #[allow(missing_docs)] // documentation missing in model EventActionsPerAccount, #[allow(missing_docs)] // documentation missing in model ProductsPerAccount, #[allow(missing_docs)] // documentation missing in model RevisionsPerAmazonRedshiftDatashareDataSet, #[allow(missing_docs)] // documentation missing in model RevisionsPerDataSet, /// Unknown contains new variants that have been added since this code was generated. Unknown(String), } impl std::convert::From<&str> for LimitName { fn from(s: &str) -> Self { match s { "Amazon Redshift datashare assets per import job from Redshift" => { LimitName::AmazonRedshiftDatashareAssetsPerImportJobFromRedshift } "Amazon Redshift datashare assets per revision" => { LimitName::AmazonRedshiftDatashareAssetsPerRevision } "Asset per export job from Amazon S3" => LimitName::AssetPerExportJobFromAmazonS3, "Asset size in GB" => LimitName::AssetSizeInGb, "Assets per import job from Amazon S3" => LimitName::AssetsPerImportJobFromAmazonS3, "Assets per revision" => LimitName::AssetsPerRevision, "Auto export event actions per data set" => LimitName::AutoExportEventActionsPerDataSet, "Concurrent in progress jobs to export assets to Amazon S3" => { LimitName::ConcurrentInProgressJobsToExportAssetsToAmazonS3 } "Concurrent in progress jobs to export assets to a signed URL" => { LimitName::ConcurrentInProgressJobsToExportAssetsToASignedUrl } "Concurrent in progress jobs to export revisions to Amazon S3" => { LimitName::ConcurrentInProgressJobsToExportRevisionsToAmazonS3 } "Concurrent in progress jobs to import assets from Amazon Redshift datashares" => { LimitName::ConcurrentInProgressJobsToImportAssetsFromAmazonRedshiftDatashares } "Concurrent in progress jobs to import assets from Amazon S3" => { LimitName::ConcurrentInProgressJobsToImportAssetsFromAmazonS3 } "Concurrent in progress jobs to import assets from a signed URL" => { LimitName::ConcurrentInProgressJobsToImportAssetsFromASignedUrl } "Data sets per account" => LimitName::DataSetsPerAccount, "Data sets per product" => LimitName::DataSetsPerProduct, "Event actions per account" => LimitName::EventActionsPerAccount, "Products per account" => LimitName::ProductsPerAccount, "Revisions per Amazon Redshift datashare data set" => { LimitName::RevisionsPerAmazonRedshiftDatashareDataSet } "Revisions per data set" => LimitName::RevisionsPerDataSet, other => LimitName::Unknown(other.to_owned()), } } } impl std::str::FromStr for LimitName { type Err = std::convert::Infallible; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { Ok(LimitName::from(s)) } } impl LimitName { /// Returns the `&str` value of the enum member. pub fn as_str(&self) -> &str { match self { LimitName::AmazonRedshiftDatashareAssetsPerImportJobFromRedshift => { "Amazon Redshift datashare assets per import job from Redshift" } LimitName::AmazonRedshiftDatashareAssetsPerRevision => { "Amazon Redshift datashare assets per revision" } LimitName::AssetPerExportJobFromAmazonS3 => "Asset per export job from Amazon S3", LimitName::AssetSizeInGb => "Asset size in GB", LimitName::AssetsPerImportJobFromAmazonS3 => "Assets per import job from Amazon S3", LimitName::AssetsPerRevision => "Assets per revision", LimitName::AutoExportEventActionsPerDataSet => "Auto export event actions per data set", LimitName::ConcurrentInProgressJobsToExportAssetsToAmazonS3 => { "Concurrent in progress jobs to export assets to Amazon S3" } LimitName::ConcurrentInProgressJobsToExportAssetsToASignedUrl => { "Concurrent in progress jobs to export assets to a signed URL" } LimitName::ConcurrentInProgressJobsToExportRevisionsToAmazonS3 => { "Concurrent in progress jobs to export revisions to Amazon S3" } LimitName::ConcurrentInProgressJobsToImportAssetsFromAmazonRedshiftDatashares => { "Concurrent in progress jobs to import assets from Amazon Redshift datashares" } LimitName::ConcurrentInProgressJobsToImportAssetsFromAmazonS3 => { "Concurrent in progress jobs to import assets from Amazon S3" } LimitName::ConcurrentInProgressJobsToImportAssetsFromASignedUrl => { "Concurrent in progress jobs to import assets from a signed URL" } LimitName::DataSetsPerAccount => "Data sets per account", LimitName::DataSetsPerProduct => "Data sets per product", LimitName::EventActionsPerAccount => "Event actions per account", LimitName::ProductsPerAccount => "Products per account", LimitName::RevisionsPerAmazonRedshiftDatashareDataSet => { "Revisions per Amazon Redshift datashare data set" } LimitName::RevisionsPerDataSet => "Revisions per data set", LimitName::Unknown(s) => s.as_ref(), } } /// Returns all the `&str` values of the enum members. pub fn values() -> &'static [&'static str] { &[ "Amazon Redshift datashare assets per import job from Redshift", "Amazon Redshift datashare assets per revision", "Asset per export job from Amazon S3", "Asset size in GB", "Assets per import job from Amazon S3", "Assets per revision", "Auto export event actions per data set", "Concurrent in progress jobs to export assets to Amazon S3", "Concurrent in progress jobs to export assets to a signed URL", "Concurrent in progress jobs to export revisions to Amazon S3", "Concurrent in progress jobs to import assets from Amazon Redshift datashares", "Concurrent in progress jobs to import assets from Amazon S3", "Concurrent in progress jobs to import assets from a signed URL", "Data sets per account", "Data sets per product", "Event actions per account", "Products per account", "Revisions per Amazon Redshift datashare data set", "Revisions per data set", ] } } impl AsRef<str> for LimitName { fn as_ref(&self) -> &str { self.as_str() } }
45.264448
544
0.635308
eda2b49ce95fdd27cc50b91f57b8ff275f983fb3
4,279
#![no_main] #![no_std] #![warn(rust_2018_idioms, unsafe_op_in_unsafe_fn)] #![feature(panic_info_message)] use core::{fmt::Write, mem::MaybeUninit, panic::PanicInfo, sync::atomic::Ordering}; use atomic_maybe_uninit::*; use cortex_m::asm; use cortex_m_rt::entry; use cortex_m_semihosting as semihosting; macro_rules! __test_atomic { ($int_type:ident) => { load_store(); fn load_store() { unsafe { static VAR: AtomicMaybeUninit<$int_type> = AtomicMaybeUninit::<$int_type>::const_new(MaybeUninit::new(10)); for (load_order, store_order) in load_orderings().into_iter().zip(store_orderings()) { assert_eq!(VAR.load(load_order).assume_init(), 10); VAR.store(MaybeUninit::new(5), store_order); assert_eq!(VAR.load(load_order).assume_init(), 5); VAR.store(MaybeUninit::uninit(), store_order); let _v = VAR.load(load_order); VAR.store(MaybeUninit::new(10), store_order); let a = AtomicMaybeUninit::<$int_type>::new(MaybeUninit::new(1)); assert_eq!(a.load(load_order).assume_init(), 1); a.store(MaybeUninit::new(2), store_order); assert_eq!(a.load(load_order).assume_init(), 2); let a = AtomicMaybeUninit::<$int_type>::new(MaybeUninit::uninit()); let _v = a.load(load_order); a.store(MaybeUninit::new(2), store_order); assert_eq!(a.load(load_order).assume_init(), 2); a.store(MaybeUninit::uninit(), store_order); let _v = a.load(load_order); } } } swap(); fn swap() { unsafe { for order in swap_orderings() { let a = AtomicMaybeUninit::<$int_type>::new(MaybeUninit::new(5)); assert_eq!(a.swap(MaybeUninit::new(10), order).assume_init(), 5); assert_eq!(a.swap(MaybeUninit::uninit(), order).assume_init(), 10); let _v = a.swap(MaybeUninit::new(15), order); let a = AtomicMaybeUninit::<$int_type>::new(MaybeUninit::uninit()); let _v = a.swap(MaybeUninit::new(10), order); assert_eq!(a.swap(MaybeUninit::uninit(), order).assume_init(), 10); } } } }; } fn load_orderings() -> [Ordering; 3] { [Ordering::Relaxed, Ordering::Acquire, Ordering::SeqCst] } fn store_orderings() -> [Ordering; 3] { [Ordering::Relaxed, Ordering::Release, Ordering::SeqCst] } fn swap_orderings() -> [Ordering; 5] { [Ordering::Relaxed, Ordering::Release, Ordering::Acquire, Ordering::AcqRel, Ordering::SeqCst] } #[entry] fn main() -> ! { asm::nop(); let mut hstdout = semihosting::hio::hstdout().unwrap(); macro_rules! test_atomic { ($int_type:ident) => { paste::paste! { fn [<test_atomic_ $int_type>]() { __test_atomic!($int_type); } let _ = write!(hstdout, "test test_atomic_{} ...", stringify!($int_type)); [<test_atomic_ $int_type>](); let _ = write!(hstdout, " ok\n"); } }; } test_atomic!(isize); test_atomic!(usize); test_atomic!(i8); test_atomic!(u8); test_atomic!(i16); test_atomic!(u16); test_atomic!(i32); test_atomic!(u32); loop { semihosting::debug::exit(semihosting::debug::EXIT_SUCCESS); } } #[inline(never)] #[panic_handler] fn panic(info: &PanicInfo<'_>) -> ! { if let Ok(mut hstdout) = semihosting::hio::hstdout() { if let Some(s) = info.message() { if let Some(l) = info.location() { let _ = writeln!(hstdout, "panicked at '{:?}', {}", s, l); } else { let _ = writeln!(hstdout, "panicked at '{:?}' (no location info)", s); } } else { let _ = writeln!(hstdout, "panic occurred (no message)"); } } loop { semihosting::debug::exit(semihosting::debug::EXIT_FAILURE); } }
35.363636
100
0.530965
1e6ae9d7445296f7151d9c103b3f4fcd55a866f4
206
// This shopping list program isn't compiling! // Use your knowledge of generics to fix it. // I AM NOT DONE fn main() { let mut shopping_list: Vec<?> = Vec::new(); shopping_list.push("milk"); }
18.727273
47
0.650485
9bda2c5abddf40b6f3a562516249e15b81fda5d1
6,988
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ //! Utilities for providing the goto definition feature use crate::{ location::to_lsp_location_of_graphql_literal, lsp::GotoDefinitionResponse, lsp_runtime_error::{LSPRuntimeError, LSPRuntimeResult}, resolution_path::utils::find_selection_parent_type, resolution_path::{ IdentParent, IdentPath, LinkedFieldPath, ResolutionPath, ResolvePosition, ScalarFieldPath, SelectionParent, TypeConditionPath, }, server::LSPState, LSPExtraDataProvider, }; use common::PerfLogger; use fnv::FnvHashMap; use graphql_ir::Program; use interner::StringKey; use lsp_types::{ request::{GotoDefinition, Request}, Url, }; use schema::Schema; use std::{ path::PathBuf, str, sync::{Arc, RwLock}, }; fn get_goto_definition_response<'a>( node_path: ResolutionPath<'a>, project_name: StringKey, source_programs: &Arc<RwLock<FnvHashMap<StringKey, Program>>>, root_dir: &PathBuf, // https://github.com/rust-lang/rust-clippy/issues/3971 #[allow(clippy::borrowed_box)] extra_data_provider: &Box<dyn LSPExtraDataProvider + 'static>, ) -> LSPRuntimeResult<GotoDefinitionResponse> { match node_path { ResolutionPath::Ident(IdentPath { inner: fragment_name, parent: IdentParent::FragmentSpreadName(_), }) => { if let Some(source_program) = source_programs .read() .expect("get_goto_definition_response: expect to acquire a read lock on programs") .get(&project_name) { let fragment = source_program .fragment(fragment_name.value) .ok_or_else(|| { LSPRuntimeError::UnexpectedError(format!( "Could not find fragment with name {}", fragment_name )) })?; Ok(GotoDefinitionResponse::Scalar( to_lsp_location_of_graphql_literal(fragment.name.location, root_dir)?, )) } else { Err(LSPRuntimeError::UnexpectedError(format!( "Project name {} not found", project_name ))) } } ResolutionPath::Ident(IdentPath { inner: field_name, parent: IdentParent::LinkedFieldName(LinkedFieldPath { inner: _, parent: selection_path, }), }) => resolve_field( field_name.value.to_string(), selection_path.parent, project_name, source_programs, root_dir, extra_data_provider, ), ResolutionPath::Ident(IdentPath { inner: field_name, parent: IdentParent::ScalarFieldName(ScalarFieldPath { inner: _, parent: selection_path, }), }) => resolve_field( field_name.value.to_string(), selection_path.parent, project_name, source_programs, root_dir, extra_data_provider, ), ResolutionPath::Ident(IdentPath { inner: _, parent: IdentParent::TypeConditionType(TypeConditionPath { inner: type_condition, parent: _, }), }) => { let provider_response = extra_data_provider .resolve_field_definition( project_name.to_string(), root_dir, type_condition.type_.value.to_string(), None, ) .ok_or(LSPRuntimeError::ExpectedError)?; let (path, line) = provider_response.map_err(|e| -> LSPRuntimeError { LSPRuntimeError::UnexpectedError(format!( "Error resolving field definition location: {}", e )) })?; Ok(GotoDefinitionResponse::Scalar(get_location(&path, line)?)) } _ => Err(LSPRuntimeError::ExpectedError), } } fn resolve_field<'a>( field_name: String, selection_parent: SelectionParent<'a>, project_name: StringKey, source_programs: &Arc<RwLock<FnvHashMap<StringKey, Program>>>, root_dir: &PathBuf, // https://github.com/rust-lang/rust-clippy/issues/3971 #[allow(clippy::borrowed_box)] extra_data_provider: &Box<dyn LSPExtraDataProvider + 'static>, ) -> LSPRuntimeResult<GotoDefinitionResponse> { let programs = source_programs .read() .expect("get_goto_definition_response: expect to acquire a read lock on programs"); let source_program = programs.get(&project_name).ok_or_else(|| { LSPRuntimeError::UnexpectedError(format!("Project name {} not found", project_name)) })?; let parent_type = find_selection_parent_type(selection_parent, &source_program.schema) .ok_or(LSPRuntimeError::ExpectedError)?; let parent_name = source_program.schema.get_type_name(parent_type); let provider_response = extra_data_provider .resolve_field_definition( project_name.to_string(), root_dir, parent_name.to_string(), Some(field_name), ) .ok_or(LSPRuntimeError::ExpectedError)?; let (path, line) = provider_response.map_err(|e| -> LSPRuntimeError { LSPRuntimeError::UnexpectedError(format!( "Error resolving field definition location: {}", e )) })?; Ok(GotoDefinitionResponse::Scalar(get_location(&path, line)?)) } pub(crate) fn on_goto_definition<TPerfLogger: PerfLogger + 'static>( state: &mut LSPState<TPerfLogger>, params: <GotoDefinition as Request>::Params, ) -> LSPRuntimeResult<<GotoDefinition as Request>::Result> { let (document, position_span, project_name) = state.extract_executable_document_from_text(params, 1)?; let path = document.resolve((), position_span); let goto_definition_response = get_goto_definition_response( path, project_name, state.get_source_programs_ref(), state.root_dir(), &state.extra_data_provider, )?; Ok(Some(goto_definition_response)) } fn get_location(path: &str, line: u64) -> Result<lsp_types::Location, LSPRuntimeError> { let start = lsp_types::Position { line, character: 0 }; let range = lsp_types::Range { start, end: start }; let uri = Url::parse(&format!("file://{}", path)).map_err(|e| { LSPRuntimeError::UnexpectedError(format!("Could not parse path as URL: {}", e)) })?; Ok(lsp_types::Location { uri, range }) }
35.115578
98
0.59502
380c0e57ac72ddc3b126d04d053e1228a6000de9
16,139
use crate::{ format_sso, instruction::{InstructionKind, InstructionStreamPair}, managers::{ CameraManager, DirectionalLightManager, InternalTexture, MaterialManager, MeshManager, ObjectManager, TextureManager, }, types::{ Camera, DirectionalLight, DirectionalLightChange, DirectionalLightHandle, MaterialHandle, Mesh, MeshHandle, Object, ObjectHandle, Texture, TextureHandle, }, util::mipmap::MipmapGenerator, ExtendedAdapterInfo, InstanceAdapterDevice, ReadyData, RendererInitializationError, RendererMode, }; use glam::Mat4; use parking_lot::{Mutex, RwLock}; use rend3_types::{Material, MipmapCount, MipmapSource, TextureFormat, TextureFromTexture, TextureUsages}; use std::{num::NonZeroU32, panic::Location, sync::Arc}; use wgpu::{ util::DeviceExt, CommandBuffer, CommandEncoderDescriptor, Device, Extent3d, ImageCopyTexture, ImageDataLayout, Origin3d, Queue, TextureAspect, TextureDescriptor, TextureDimension, TextureSampleType, TextureViewDescriptor, TextureViewDimension, }; use wgpu_profiler::GpuProfiler; pub mod error; mod ready; mod setup; /// Core struct which contains the renderer world. Primary way to interact with the world. pub struct Renderer { instructions: InstructionStreamPair, /// The culling mode used. pub mode: RendererMode, /// Information about the adapter. pub adapter_info: ExtendedAdapterInfo, /// Queue all command buffers will be submitted to. pub queue: Arc<Queue>, /// Device all objects will be created with. pub device: Arc<Device>, /// Position and settings of the camera. pub camera_manager: RwLock<CameraManager>, /// Manages all vertex and index data. pub mesh_manager: RwLock<MeshManager>, /// Manages all 2D textures, including bindless bind group. pub d2_texture_manager: RwLock<TextureManager>, /// Manages all Cube textures, including bindless bind groups. pub d2c_texture_manager: RwLock<TextureManager>, /// Manages all materials, including material bind groups in CPU mode. pub material_manager: RwLock<MaterialManager>, /// Manages all objects. pub object_manager: RwLock<ObjectManager>, /// Manages all directional lights, including their shadow maps. pub directional_light_manager: RwLock<DirectionalLightManager>, /// Tool which generates mipmaps from a texture. pub mipmap_generator: MipmapGenerator, /// Stores gpu timing and debug scopes. pub profiler: Mutex<GpuProfiler>, } impl Renderer { /// Create a new renderer with the given IAD. /// /// You can create your own IAD or call [`create_iad`](crate::create_iad). /// /// The aspect ratio is that of the window. This automatically configures the camera. If None is passed, an aspect ratio of 1.0 is assumed. pub fn new( iad: InstanceAdapterDevice, aspect_ratio: Option<f32>, ) -> Result<Arc<Self>, RendererInitializationError> { setup::create_renderer(iad, aspect_ratio) } /// Adds a 3D mesh to the renderer. This doesn't instantiate it to world. To show this in the world, you need to create an [`Object`] using this mesh. /// /// The handle will keep the mesh alive. All objects created will also keep the mesh alive. #[track_caller] pub fn add_mesh(&self, mesh: Mesh) -> MeshHandle { let handle = self.mesh_manager.read().allocate(); self.instructions.push( InstructionKind::AddMesh { handle: handle.clone(), mesh, }, *Location::caller(), ); handle } /// Add a 2D texture to the renderer. This can be used in a [`Material`]. /// /// The handle will keep the texture alive. All materials created with this texture will also keep the texture alive. #[track_caller] pub fn add_texture_2d(&self, texture: Texture) -> TextureHandle { profiling::scope!("Add Texture 2D"); Self::validation_texture_format(texture.format); let handle = self.d2_texture_manager.read().allocate(); let size = Extent3d { width: texture.size.x, height: texture.size.y, depth_or_array_layers: 1, }; let mip_level_count = match texture.mip_count { MipmapCount::Specific(v) => v.get(), MipmapCount::Maximum => size.max_mips(), }; let desc = TextureDescriptor { label: None, size, mip_level_count, sample_count: 1, dimension: TextureDimension::D2, format: texture.format, usage: TextureUsages::TEXTURE_BINDING | TextureUsages::COPY_SRC | TextureUsages::COPY_DST, }; let (buffer, tex) = match texture.mip_source { MipmapSource::Uploaded => ( None, self.device.create_texture_with_data(&self.queue, &desc, &texture.data), ), MipmapSource::Generated => { let desc = TextureDescriptor { usage: desc.usage | TextureUsages::RENDER_ATTACHMENT, ..desc }; let tex = self.device.create_texture(&desc); let format_desc = texture.format.describe(); // write first level self.queue.write_texture( ImageCopyTexture { texture: &tex, mip_level: 0, origin: Origin3d::ZERO, aspect: TextureAspect::All, }, &texture.data, ImageDataLayout { offset: 0, bytes_per_row: NonZeroU32::new( format_desc.block_size as u32 * (size.width / format_desc.block_dimensions.0 as u32), ), rows_per_image: None, }, size, ); let mut encoder = self.device.create_command_encoder(&CommandEncoderDescriptor::default()); // generate mipmaps self.mipmap_generator .generate_mipmaps(&self.device, &self.profiler, &mut encoder, &tex, &desc); (Some(encoder.finish()), tex) } }; let view = tex.create_view(&TextureViewDescriptor::default()); self.instructions.push( InstructionKind::AddTexture { handle: handle.clone(), desc, texture: tex, view, buffer, cube: false, }, *Location::caller(), ); handle } /// Add a 2D texture to the renderer by copying a set of mipmaps from an existing texture. This new can be used in a [`Material`]. /// /// The handle will keep the texture alive. All materials created with this texture will also keep the texture alive. #[track_caller] pub fn add_texture_2d_from_texture(&self, texture: TextureFromTexture) -> TextureHandle { profiling::scope!("Add Texture 2D From Texture"); let mut encoder = self.device.create_command_encoder(&CommandEncoderDescriptor::default()); let d2_manager = self.d2_texture_manager.read(); let handle = d2_manager.allocate(); // self.profiler // .lock() // .begin_scope("Add Texture 2D From Texture", &mut encoder, &self.device); let InternalTexture { texture: old_texture, desc: old_texture_desc, } = d2_manager.get_internal(texture.src.get_raw()); let new_size = old_texture_desc.mip_level_size(texture.start_mip).unwrap(); let mip_level_count = texture .mip_count .map_or_else(|| old_texture_desc.mip_level_count - texture.start_mip, |c| c.get()); let desc = TextureDescriptor { size: new_size, mip_level_count, ..old_texture_desc.clone() }; let tex = self.device.create_texture(&desc); let view = tex.create_view(&TextureViewDescriptor::default()); for new_mip in 0..mip_level_count { let old_mip = new_mip + texture.start_mip; let _label = format_sso!("mip {} to {}", old_mip, new_mip); profiling::scope!(&_label); // self.profiler.lock().begin_scope(&label, &mut encoder, &self.device); encoder.copy_texture_to_texture( ImageCopyTexture { texture: old_texture, mip_level: old_mip, origin: Origin3d::ZERO, aspect: TextureAspect::All, }, ImageCopyTexture { texture: &tex, mip_level: new_mip, origin: Origin3d::ZERO, aspect: TextureAspect::All, }, old_texture_desc.mip_level_size(old_mip).unwrap(), ); // self.profiler.lock().end_scope(&mut encoder); } // self.profiler.lock().end_scope(&mut encoder); self.instructions.push( InstructionKind::AddTexture { handle: handle.clone(), texture: tex, desc, view, buffer: Some(encoder.finish()), cube: false, }, *Location::caller(), ); handle } /// Adds a Cube texture to the renderer. This can be used as a cube environment map by a render routine. /// /// The handle will keep the texture alive. #[track_caller] pub fn add_texture_cube(&self, texture: Texture) -> TextureHandle { profiling::scope!("Add Texture Cube"); Self::validation_texture_format(texture.format); let handle = self.d2c_texture_manager.read().allocate(); let size = Extent3d { width: texture.size.x, height: texture.size.y, depth_or_array_layers: 6, }; let mip_level_count = match texture.mip_count { MipmapCount::Specific(v) => v.get(), MipmapCount::Maximum => size.max_mips(), }; let desc = TextureDescriptor { label: None, size, mip_level_count, sample_count: 1, dimension: TextureDimension::D2, format: texture.format, usage: TextureUsages::TEXTURE_BINDING | TextureUsages::COPY_DST, }; let tex = self.device.create_texture_with_data(&self.queue, &desc, &texture.data); let view = tex.create_view(&TextureViewDescriptor { dimension: Some(TextureViewDimension::Cube), ..TextureViewDescriptor::default() }); self.instructions.push( InstructionKind::AddTexture { handle: handle.clone(), texture: tex, desc, view, buffer: None, cube: true, }, *Location::caller(), ); handle } fn validation_texture_format(format: TextureFormat) { let sample_type = format.describe().sample_type; if let TextureSampleType::Float { filterable } = sample_type { if !filterable { panic!( "Textures formats must allow filtering with a linear filter. {:?} has sample type {:?} which does not.", format, sample_type ) } } else { panic!( "Textures formats must be sample-able as floating point. {:?} has sample type {:?}.", format, sample_type ) } } /// Adds a material to the renderer. This can be used in an [`Object`]. /// /// The handle will keep the material alive. All objects created with this material will also keep this material alive. /// /// The material will keep the inside textures alive. #[track_caller] pub fn add_material<M: Material>(&self, material: M) -> MaterialHandle { let handle = self.material_manager.read().allocate(); self.instructions.push( InstructionKind::AddMaterial { handle: handle.clone(), fill_invoke: Box::new(move |material_manager, device, mode, d2_manager, mat_handle| { material_manager.fill(device, mode, d2_manager, mat_handle, material) }), }, *Location::caller(), ); handle } /// Updates a given material. Old references will be dropped. #[track_caller] pub fn update_material<M: Material>(&self, handle: &MaterialHandle, material: M) { self.instructions.push( InstructionKind::ChangeMaterial { handle: handle.clone(), change_invoke: Box::new( move |material_manager, device, mode, d2_manager, object_manager, mat_handle| { material_manager.update(device, mode, d2_manager, object_manager, mat_handle, material) }, ), }, *Location::caller(), ) } /// Adds an object to the renderer. This will create a visible object using the given mesh and materal. /// /// The handle will keep the material alive. /// /// The object will keep all materials, textures, and meshes alive. #[track_caller] pub fn add_object(&self, object: Object) -> ObjectHandle { let handle = self.object_manager.read().allocate(); self.instructions.push( InstructionKind::AddObject { handle: handle.clone(), object, }, *Location::caller(), ); handle } /// Move the given object to a new transform location. #[track_caller] pub fn set_object_transform(&self, handle: &ObjectHandle, transform: Mat4) { self.instructions.push( InstructionKind::SetObjectTransform { handle: handle.get_raw(), transform, }, *Location::caller(), ); } /// Add a sun-like light into the world. /// /// The handle will keep the light alive. #[track_caller] pub fn add_directional_light(&self, light: DirectionalLight) -> DirectionalLightHandle { let handle = self.directional_light_manager.read().allocate(); self.instructions.push( InstructionKind::AddDirectionalLight { handle: handle.clone(), light, }, *Location::caller(), ); handle } /// Updates the settings for given directional light. #[track_caller] pub fn update_directional_light(&self, handle: &DirectionalLightHandle, change: DirectionalLightChange) { self.instructions.push( InstructionKind::ChangeDirectionalLight { handle: handle.get_raw(), change, }, *Location::caller(), ) } /// Sets the aspect ratio of the camera. This should correspond with the aspect ratio of the user. #[track_caller] pub fn set_aspect_ratio(&self, ratio: f32) { self.instructions .push(InstructionKind::SetAspectRatio { ratio }, *Location::caller()) } /// Sets the position, pov, or projection mode of the camera. #[track_caller] pub fn set_camera_data(&self, data: Camera) { self.instructions .push(InstructionKind::SetCameraData { data }, *Location::caller()) } /// Render a frame of the scene onto the given output, using the given RenderRoutine. /// /// The RendererStatistics may not be the results from this frame, but might be the results from multiple frames ago. pub fn ready(&self) -> (Vec<CommandBuffer>, ReadyData) { ready::ready(self) } }
36.267416
154
0.58213
878f0268fcf371ac00a561557fe0b60c1dc87a2c
3,213
use crate::file::save_keys; use crate::offline::sign::read_key; use crate::{MasterKeyOutput, PrivateMasterKey}; use bitcoin::secp256k1::Secp256k1; use bitcoin::util::bip32::ChildNumber; use bitcoin::Network; use std::path::PathBuf; use structopt::StructOpt; /// Restore a master key from the secret component #[derive(StructOpt, Debug, Clone)] #[structopt(name = "derive_key")] pub struct DeriveKeyOptions { /// Name of the master^2 key #[structopt(short, long)] from_key_file: PathBuf, /// Name of the generated master key, used as path to generate the child key #[structopt(short, long)] to_key_name: String, /// QR code max version to use (max size) #[structopt(long, default_value = "14")] pub qr_version: i16, } pub fn start( datadir: &str, network: Network, opt: &DeriveKeyOptions, ) -> crate::Result<MasterKeyOutput> { if opt.to_key_name.is_empty() { return Err("--to-key-name must have 1 or more characters".into()); } let secp = Secp256k1::signing_only(); let from_key_json = read_key(&opt.from_key_file)?; let mut child_key = from_key_json.xprv; let bytes = opt.to_key_name.as_bytes(); for byte in bytes { let path = [ChildNumber::from_hardened_idx(*byte as u32)?]; child_key = child_key.derive_priv(&secp, &path)?; } let child_key_json = PrivateMasterKey::from_xprv(child_key, &opt.to_key_name); let output = save_keys( datadir, network, &opt.to_key_name, child_key_json, opt.qr_version, )?; Ok(output) } #[cfg(test)] mod tests { use crate::offline::derive_key::DeriveKeyOptions; use crate::offline::random::RandomOptions; use bitcoin::Network; use tempfile::TempDir; #[test] fn test_derive_key() -> crate::Result<()> { let temp_dir = TempDir::new().unwrap(); let temp_dir_str = format!("{}/", temp_dir.path().display()); let key_name = "random".to_string(); let rand_opts = RandomOptions::new(key_name); let key = crate::offline::random::create_key(&temp_dir_str, Network::Testnet, &rand_opts) .unwrap(); let to_key_name = "derived".to_string(); let mut der_opts = DeriveKeyOptions { from_key_file: key.private_file.clone(), to_key_name, qr_version: 14, }; let derived = crate::offline::derive_key::start(&temp_dir_str, Network::Testnet, &der_opts.clone()) .unwrap(); assert_ne!(key.key, derived.key); let temp_dir_2 = TempDir::new().unwrap(); let temp_dir_str_2 = format!("{}/", temp_dir_2.path().display()); let derived_2 = crate::offline::derive_key::start(&temp_dir_str_2, Network::Testnet, &der_opts) .unwrap(); assert_eq!(derived.key, derived_2.key); der_opts.to_key_name = "".to_string(); let key = crate::offline::derive_key::start(&temp_dir_str, Network::Testnet, &der_opts); assert!(key.is_err()); assert_eq!( key.unwrap_err().to_string(), "--to-key-name must have 1 or more characters" ); Ok(()) } }
31.194175
97
0.622782