hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
1dd0fcc89488ada05b6a705dd6cf76b5b51697df
18,569
// Copyright (c) 2019 Cloudflare, Inc. All rights reserved. // SPDX-License-Identifier: BSD-3-Clause #[cfg(test)] mod tests { use super::super::*; use crate::crypto::x25519::*; use base64::encode; use slog::*; use std::fs; use std::fs::File; use std::io::prelude::Write; use std::net::UdpSocket; use std::process::Command; use std::str; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::sync::Arc; use std::thread; use std::time::Duration; // Simple counter, atomically increasing by one each call struct AtomicCounter { ctr: AtomicUsize, } impl AtomicCounter { pub fn next(&self) -> usize { self.ctr.fetch_add(1, Ordering::Relaxed) } } // Very dumb spin lock struct SpinLock { lock: AtomicBool, } impl SpinLock { pub fn lock(&self) { loop { if self.lock.compare_and_swap(true, false, Ordering::Relaxed) { break; } } } pub fn unlock(&self) { self.lock.store(true, Ordering::Relaxed); } } const MAX_PACKET: usize = 65536; // Next unused port static NEXT_PORT: AtomicCounter = AtomicCounter { ctr: AtomicUsize::new(30000), }; // Next WG conf file name to use static NEXT_CONF: AtomicCounter = AtomicCounter { ctr: AtomicUsize::new(1), }; // Next ip address to use for WG interface, of the form 192.168.2.NEXT_IP static NEXT_IP: AtomicCounter = AtomicCounter { ctr: AtomicUsize::new(3), }; // Locks the use of wg-quick to a single thread static WG_LOCK: SpinLock = SpinLock { lock: AtomicBool::new(true), }; // Reads a decapsulated packet and strips its IPv4 header fn read_ipv4_packet(socket: &UdpSocket) -> Vec<u8> { let mut data = [0u8; MAX_PACKET]; let mut packet = Vec::new(); let len = socket.recv(&mut data).unwrap(); packet.extend_from_slice(&data[IPV4_MIN_HEADER_SIZE..len]); packet } // Appends an IPv4 header to a buffer and writes the resulting "packet" fn write_ipv4_packet(socket: &UdpSocket, data: &[u8]) { let mut header = [0u8; IPV4_MIN_HEADER_SIZE]; let mut packet = Vec::new(); let packet_len = data.len() + header.len(); header[0] = 4 << 4; header[IPV4_LEN_OFF] = (packet_len >> 8) as u8; header[IPV4_LEN_OFF + 1] = packet_len as u8; packet.extend_from_slice(&header); packet.extend_from_slice(&data); socket.send(&packet).unwrap(); } fn write_u16_be(val: u16, buf: &mut [u8]) { assert!(buf.len() >= 2); buf[0] = (val >> 8) as u8; buf[1] = val as u8; } // Compute the internet checksum of a buffer fn ipv4_checksum(buf: &[u8]) -> u16 { let mut sum = 0u32; for i in 0..buf.len() / 2 { sum += u16::from_be_bytes([buf[i * 2], buf[i * 2 + 1]]) as u32; } if buf.len() % 2 == 1 { sum += (buf[buf.len() - 1] as u32) << 8; } while sum > 0xffff { sum = (sum >> 16) + sum & 0xffff; } !(sum as u16) } // Generate a simple ping request packet from 192.168.2.2 to 192.168.2.ip fn write_ipv4_ping(socket: &UdpSocket, data: &[u8], seq: u16, ip: u8) { let mut ipv4_header = [0u8; IPV4_MIN_HEADER_SIZE]; let mut icmp_header = [0u8; 8]; let packet_len = ipv4_header.len() + icmp_header.len() + data.len(); ipv4_header[0] = (4 << 4) + 5; // version = 4, header length = 5 * 4 write_u16_be(packet_len as u16, &mut ipv4_header[2..]); // packet length ipv4_header[8] = 64; // TTL ipv4_header[9] = 1; // ICMP ipv4_header[12..16].copy_from_slice(&0xC0A80202u32.to_be_bytes()); // src ip = 192.168.2.2 ipv4_header[16..20].copy_from_slice(&(0xC0A80200u32 + ip as u32).to_be_bytes()); // dst ip = 192.168.2.ip let checksum = ipv4_checksum(&ipv4_header); write_u16_be(checksum, &mut ipv4_header[10..]); icmp_header[0] = 8; // PING write_u16_be(654, &mut icmp_header[4..]); // identifier write_u16_be(seq, &mut icmp_header[6..]); // sequence number let mut packet = Vec::new(); packet.extend_from_slice(&ipv4_header); packet.extend_from_slice(&icmp_header); packet.extend_from_slice(&data); // Compute the checksum of the icmp header + payload let icmp_checksum = ipv4_checksum(&packet[20..]); write_u16_be(icmp_checksum, &mut packet[20 + 2..]); socket.send(&packet).unwrap(); } // Validate a ping reply packet fn read_ipv4_ping(socket: &UdpSocket, want_seq: u16) -> Vec<u8> { let mut data = [0u8; MAX_PACKET]; let mut packet = Vec::new(); if let Ok(len) = socket.recv(&mut data) { assert!(len >= IPV4_MIN_HEADER_SIZE); assert_eq!(data[0] >> 4, 4); let hdr_len = ((data[0] & 15) * 4) as usize; assert!(len >= hdr_len + 8); let ipv4_header = &data[..hdr_len]; assert_eq!(ipv4_header[9], 1); // ICMP let icmp_header = &data[hdr_len..hdr_len + 8]; let seq = u16::from_be_bytes([icmp_header[6], icmp_header[7]]); assert_eq!(seq, want_seq); packet.extend_from_slice(&data[hdr_len + 8..len]); } else { println!("skip {}", want_seq); } packet } // Start a WireGuard peer fn wireguard_test_peer( network_socket: UdpSocket, static_private: &str, peer_static_public: &str, logger: Logger, close: Arc<AtomicBool>, ) -> UdpSocket { let static_private = static_private.parse().unwrap(); let peer_static_public = peer_static_public.parse().unwrap(); let mut peer = Tunn::new( Arc::new(static_private), Arc::new(peer_static_public), None, None, 100, None, None, ) .unwrap(); peer.set_logger(logger); let peer: Arc<Box<Tunn>> = Arc::from(peer); let (iface_socket_ret, iface_socket) = connected_sock_pair(); network_socket .set_read_timeout(Some(Duration::from_millis(1000))) .unwrap(); iface_socket .set_read_timeout(Some(Duration::from_millis(1000))) .unwrap(); // The peer has three threads: // 1) listens on the network for encapsulated packets and decapsulates them // 2) listens on the iface for raw packets and encapsulates them // 3) times maintenance function responsible for state expiration { let network_socket = network_socket.try_clone().unwrap(); let iface_socket = iface_socket.try_clone().unwrap(); let peer = peer.clone(); let close = close.clone(); thread::spawn(move || loop { // Listen on the network let mut recv_buf = [0u8; MAX_PACKET]; let mut send_buf = [0u8; MAX_PACKET]; let n = match network_socket.recv(&mut recv_buf) { Ok(n) => n, Err(_) => { if close.load(Ordering::Relaxed) { return; } continue; } }; match peer.decapsulate(None, &recv_buf[..n], &mut send_buf) { TunnResult::WriteToNetwork(packet) => { network_socket.send(packet).unwrap(); // Send form queue? loop { let mut send_buf = [0u8; MAX_PACKET]; match peer.decapsulate(None, &[], &mut send_buf) { TunnResult::WriteToNetwork(packet) => { network_socket.send(packet).unwrap(); } _ => { break; } } } } TunnResult::WriteToTunnelV4(packet, _) => { iface_socket.send(packet).unwrap(); } TunnResult::WriteToTunnelV6(packet, _) => { iface_socket.send(packet).unwrap(); } _ => {} } }); } { let network_socket = network_socket.try_clone().unwrap(); let iface_socket = iface_socket.try_clone().unwrap(); let peer = peer.clone(); let close = close.clone(); thread::spawn(move || loop { let mut recv_buf = [0u8; MAX_PACKET]; let mut send_buf = [0u8; MAX_PACKET]; let n = match iface_socket.recv(&mut recv_buf) { Ok(n) => n, Err(_) => { if close.load(Ordering::Relaxed) { return; } continue; } }; match peer.encapsulate(&recv_buf[..n], &mut send_buf) { TunnResult::WriteToNetwork(packet) => { network_socket.send(packet).unwrap(); } _ => {} } }); } thread::spawn(move || loop { if close.load(Ordering::Relaxed) { return; } let mut send_buf = [0u8; MAX_PACKET]; match peer.update_timers(&mut send_buf) { TunnResult::WriteToNetwork(packet) => { network_socket.send(packet).unwrap(); } _ => {} } thread::sleep(Duration::from_millis(200)); }); iface_socket_ret } fn connected_sock_pair() -> (UdpSocket, UdpSocket) { let addr_a = format!("localhost:{}", NEXT_PORT.next()); let addr_b = format!("localhost:{}", NEXT_PORT.next()); let sock_a = UdpSocket::bind(&addr_a).unwrap(); let sock_b = UdpSocket::bind(&addr_b).unwrap(); sock_a.connect(&addr_b).unwrap(); sock_b.connect(&addr_a).unwrap(); (sock_a, sock_b) } fn key_pair() -> (String, String) { let secret_key = X25519SecretKey::new(); let public_key = secret_key.public_key(); (encode(secret_key.as_bytes()), encode(public_key.as_bytes())) } fn wireguard_test_pair() -> (UdpSocket, UdpSocket, Arc<AtomicBool>) { let (s_sock, c_sock) = connected_sock_pair(); let close = Arc::new(AtomicBool::new(false)); let server_pair = key_pair(); let client_pair = key_pair(); let logger = Logger::root( slog_term::FullFormat::new(slog_term::PlainSyncDecorator::new(std::io::stdout())) .build() .fuse(), slog::o!(), ); let s_iface = wireguard_test_peer( s_sock, &server_pair.0, &client_pair.1, logger.new(o!("server" => "")), close.clone(), ); let c_iface = wireguard_test_peer( c_sock, &client_pair.0, &server_pair.1, logger.new(o!("client" => "")), close.clone(), ); (s_iface, c_iface, close) } #[test] fn wireguard_handshake() { // Test the connection is successfully established and some packets are passed around { let (peer_iface_socket_sender, client_iface_socket_sender, close) = wireguard_test_pair(); client_iface_socket_sender .set_read_timeout(Some(Duration::from_millis(1000))) .unwrap(); client_iface_socket_sender .set_write_timeout(Some(Duration::from_millis(1000))) .unwrap(); thread::spawn(move || loop { let data = read_ipv4_packet(&peer_iface_socket_sender); let data_string = str::from_utf8(&data).unwrap().to_uppercase().into_bytes(); write_ipv4_packet(&peer_iface_socket_sender, &data_string); }); for _i in 0..64 { write_ipv4_packet(&client_iface_socket_sender, b"test"); let response = read_ipv4_packet(&client_iface_socket_sender); assert_eq!(&response, b"TEST"); } for _i in 0..64 { write_ipv4_packet(&client_iface_socket_sender, b"check"); let response = read_ipv4_packet(&client_iface_socket_sender); assert_eq!(&response, b"CHECK"); } close.store(true, Ordering::Relaxed); } } struct WireGuardExt { conf_file_name: String, port: u16, public_key: String, ip: u8, // Last byte of ip } impl WireGuardExt { // Start an instance of wireguard using wg-quick pub fn start(endpoint: u16, public_key: &str) -> WireGuardExt { WG_LOCK.lock(); let conf_file_name = format!("./wg{}.conf", NEXT_CONF.next()); let mut file = File::create(&conf_file_name).unwrap(); let port = NEXT_PORT.next() as u16; let ip = NEXT_IP.next() as u8; let key_pair = key_pair(); file.write_all( format!( r#"[Interface] Address = 192.168.2.{} ListenPort = {} PrivateKey = {} [Peer] PublicKey = {} AllowedIPs = 192.168.2.2/32 Endpoint = localhost:{}"#, ip, port, key_pair.0, public_key, endpoint, ) .as_bytes(), ) .unwrap(); // Start wireguard Command::new("wg-quick") .env("WG_I_PREFER_BUGGY_USERSPACE_TO_POLISHED_KMOD", "1") .args(&["up", &conf_file_name]) .status() .expect("Failed to run wg-quick"); WireGuardExt { conf_file_name, port, public_key: key_pair.1, ip, } } } impl Drop for WireGuardExt { fn drop(&mut self) { // Stop wireguard Command::new("wg-quick") .args(&["down", &self.conf_file_name]) .status() .expect("Failed to run wg-quick"); fs::remove_file(&self.conf_file_name).unwrap(); WG_LOCK.unlock(); } } #[test] #[ignore] fn wireguard_interop() { // Test the connection with wireguard-go is successfully established // and we are getting ping from server let c_key_pair = key_pair(); let itr = 1000; let endpoint = NEXT_PORT.next() as u16; let wg = WireGuardExt::start(endpoint, &c_key_pair.1); let c_addr = format!("localhost:{}", endpoint); let w_addr = format!("localhost:{}", wg.port); let client_socket = UdpSocket::bind(&c_addr).unwrap_or_else(|e| panic!("UdpSocket {}: {}", c_addr, e)); client_socket .connect(&w_addr) .unwrap_or_else(|e| panic!("connect {}: {}", w_addr, e)); let close = Arc::new(AtomicBool::new(false)); let logger = Logger::root( slog_term::FullFormat::new(slog_term::PlainSyncDecorator::new(std::io::stdout())) .build() .fuse(), slog::o!(), ); let c_iface = wireguard_test_peer( client_socket, &c_key_pair.0, &wg.public_key, logger.new(o!()), close.clone(), ); c_iface .set_read_timeout(Some(Duration::from_millis(1000))) .unwrap(); for i in 0..itr { write_ipv4_ping(&c_iface, b"test_ping", i as u16, wg.ip); assert_eq!(read_ipv4_ping(&c_iface, i as u16), b"test_ping",); thread::sleep(Duration::from_millis(30)); } close.store(true, Ordering::Relaxed); } #[test] #[ignore] fn wireguard_receiver() { // Test the connection with wireguard-go is successfully established // when go is the initiator let c_key_pair = key_pair(); let itr = 1000; let endpoint = NEXT_PORT.next() as u16; let wg = WireGuardExt::start(endpoint, &c_key_pair.1); let c_addr = format!("localhost:{}", endpoint); let w_addr = format!("localhost:{}", wg.port); let client_socket = UdpSocket::bind(c_addr).unwrap(); client_socket.connect(w_addr).unwrap(); let close = Arc::new(AtomicBool::new(false)); let logger = Logger::root( slog_term::FullFormat::new(slog_term::PlainSyncDecorator::new(std::io::stdout())) .build() .fuse(), slog::o!(), ); let c_iface = wireguard_test_peer( client_socket, &c_key_pair.0, &wg.public_key, logger, close.clone(), ); c_iface .set_read_timeout(Some(Duration::from_millis(1000))) .unwrap(); let t_addr = format!("192.168.2.{}:{}", wg.ip, NEXT_PORT.next()); let test_socket = UdpSocket::bind(t_addr).unwrap(); test_socket.connect("192.168.2.2:30000").unwrap(); thread::spawn(move || { for i in 0..itr { test_socket .send(format!("This is a test message {}", i).as_bytes()) .unwrap(); thread::sleep(Duration::from_millis(10)); } }); let mut src = [0u8; MAX_PACKET]; for i in 0..itr { let m = c_iface.recv(&mut src).unwrap(); assert_eq!( &src[28..m], // Strip ip and udp headers format!("This is a test message {}", i).as_bytes() ); } } }
33.457658
113
0.507243
0e9d8093a21bb6dec59d6ad2b59cba178faf83cf
598
use crate::syntax::Stmt; #[derive(Clone, Debug, PartialEq)] pub struct Block<'s> { pub(crate) indent: u32, stmts: Vec<Stmt<'s>>, } impl<'s> Block<'s> { pub fn new(indent: u32, stmts: Vec<Stmt<'s>>) -> Self { Block { indent, stmts, } } pub fn stmts(&self) -> &[Stmt<'s>] { &self.stmts } pub fn stmts_mut(&mut self) -> &mut [Stmt<'s>] { &mut self.stmts } pub fn append_stmt(&mut self, stmt: Stmt<'s>) { self.stmts.push(stmt); } pub fn indent(&self) -> u32 { self.indent } }
18.121212
59
0.493311
72563c7ed837e5551acaa8e5638f2d420320673d
11,300
//! This program shows how to render two simple triangles and is the hello world of luminance. //! //! The direct / indexed methods just show you how you’re supposed to use them (don’t try and find //! any differences in the rendered images, because there’s none!). //! //! Press <space> to switch between direct tessellation and indexed tessellation. //! Press <escape> to quit or close the window. //! //! https://docs.rs/luminance use glutin::{ dpi::PhysicalSize, event::{ElementState, Event, KeyboardInput, StartCause, VirtualKeyCode, WindowEvent}, event_loop::ControlFlow, }; use luminance::context::GraphicsContext; use luminance::pipeline::PipelineState; use luminance::render_state::RenderState; use luminance::tess::Mode; use luminance_derive::{Semantics, Vertex}; use luminance_glutin::GlutinSurface; // We get the shader at compile time from local files const VS: &'static str = include_str!("simple-vs.glsl"); const FS: &'static str = include_str!("simple-fs.glsl"); // Vertex semantics. Those are needed to instruct the GPU how to select vertex’s attributes from // the memory we fill at render time, in shaders. You don’t have to worry about them; just keep in // mind they’re mandatory and act as “protocol” between GPU’s memory regions and shaders. // // We derive Semantics automatically and provide the mapping as field attributes. #[derive(Clone, Copy, Debug, Eq, PartialEq, Semantics)] pub enum Semantics { // - Reference vertex positions with the "co" variable in vertex shaders. // - The underlying representation is [f32; 2], which is a vec2 in GLSL. // - The wrapper type you can use to handle such a semantics is VertexPosition. #[sem(name = "co", repr = "[f32; 2]", wrapper = "VertexPosition")] Position, // - Reference vertex colors with the "color" variable in vertex shaders. // - The underlying representation is [u8; 3], which is a uvec3 in GLSL. // - The wrapper type you can use to handle such a semantics is VertexColor. #[sem(name = "color", repr = "[u8; 3]", wrapper = "VertexColor")] Color, } // Our vertex type. // // We derive the Vertex trait automatically and we associate to each field the semantics that must // be used on the GPU. The proc-macro derive Vertex will make sur for us every field we use have a // mapping to the type you specified as semantics. // // Currently, we need to use #[repr(C))] to ensure Rust is not going to move struct’s fields around. #[repr(C)] #[derive(Clone, Copy, Debug, PartialEq, Vertex)] #[vertex(sem = "Semantics")] struct Vertex { pos: VertexPosition, // Here, we can use the special normalized = <bool> construct to state whether we want integral // vertex attributes to be available as normalized floats in the shaders, when fetching them from // the vertex buffers. If you set it to "false" or ignore it, you will get non-normalized integer // values (i.e. value ranging from 0 to 255 for u8, for instance). #[vertex(normalized = "true")] rgb: VertexColor, } // The vertices. We define two triangles. const TRI_VERTICES: [Vertex; 6] = [ // First triangle – an RGB one. Vertex { pos: VertexPosition::new([0.5, -0.5]), rgb: VertexColor::new([0, 255, 0]), }, Vertex { pos: VertexPosition::new([0.0, 0.5]), rgb: VertexColor::new([0, 0, 255]), }, Vertex { pos: VertexPosition::new([-0.5, -0.5]), rgb: VertexColor::new([255, 0, 0]), }, // Second triangle, a purple one, positioned differently. Vertex { pos: VertexPosition::new([-0.5, 0.5]), rgb: VertexColor::new([255, 51, 255]), }, Vertex { pos: VertexPosition::new([0.0, -0.5]), rgb: VertexColor::new([51, 255, 255]), }, Vertex { pos: VertexPosition::new([0.5, 0.5]), rgb: VertexColor::new([51, 51, 255]), }, ]; // A small struct wrapper used to deinterleave positions. #[repr(C)] #[derive(Clone, Copy, Debug, PartialEq, Vertex)] #[vertex(sem = "Semantics")] struct Positions { pos: VertexPosition, } // A small struct wrapper used to deinterleave colors. #[repr(C)] #[derive(Clone, Copy, Debug, PartialEq, Vertex)] #[vertex(sem = "Semantics")] struct Colors { #[vertex(normalized = "true")] color: VertexColor, } // The vertices, deinterleaved versions. We still define two triangles. const TRI_DEINT_POS_VERTICES: &[Positions] = &[ Positions { pos: VertexPosition::new([0.5, -0.5]), }, Positions { pos: VertexPosition::new([0.0, 0.5]), }, Positions { pos: VertexPosition::new([-0.5, -0.5]), }, Positions { pos: VertexPosition::new([-0.5, 0.5]), }, Positions { pos: VertexPosition::new([0.0, -0.5]), }, Positions { pos: VertexPosition::new([0.5, 0.5]), }, ]; const TRI_DEINT_COLOR_VERTICES: &[Colors] = &[ Colors { color: VertexColor::new([0, 255, 0]), }, Colors { color: VertexColor::new([0, 0, 255]), }, Colors { color: VertexColor::new([255, 0, 0]), }, Colors { color: VertexColor::new([255, 51, 255]), }, Colors { color: VertexColor::new([51, 255, 255]), }, Colors { color: VertexColor::new([51, 51, 255]), }, ]; // Indices into TRI_VERTICES to use to build up the triangles. const TRI_INDICES: [u32; 6] = [ 0, 1, 2, // First triangle. 3, 4, 5, // Second triangle. ]; // Convenience type to demonstrate the difference between direct geometry and indirect (indexed) // one. #[derive(Copy, Clone, Debug)] enum TessMethod { Direct, Indexed, DirectDeinterleaved, IndexedDeinterleaved, } impl TessMethod { fn toggle(self) -> Self { match self { TessMethod::Direct => TessMethod::Indexed, TessMethod::Indexed => TessMethod::DirectDeinterleaved, TessMethod::DirectDeinterleaved => TessMethod::IndexedDeinterleaved, TessMethod::IndexedDeinterleaved => TessMethod::Direct, } } } fn main() { // First thing first: we create a new surface to render to and get events from. // We use the `GlutinSurface::from_builders` to build a custom window and context // to use. let (mut surface, event_loop) = GlutinSurface::from_builders( |win_builder| { win_builder .with_title("Hello, world!") .with_inner_size(PhysicalSize::new(960, 540)) }, |ctx_builder| ctx_builder.with_double_buffer(Some(true)), ) .expect("Glutin surface creation"); // We need a program to “shade” our triangles and to tell luminance which is the input vertex // type, and we’re not interested in the other two type variables for this sample. let mut program = surface .new_shader_program::<Semantics, (), ()>() .from_strings(VS, None, None, FS) .expect("program creation") .ignore_warnings(); // Create tessellation for direct geometry; that is, tessellation that will render vertices by // taking one after another in the provided slice. let direct_triangles = surface .new_tess() .and_then(|b| b.add_vertices(TRI_VERTICES)) .and_then(|b| b.set_mode(Mode::Triangle)) .and_then(|b| b.build()) .unwrap(); // Create indexed tessellation; that is, the vertices will be picked by using the indexes provided // by the second slice and this indexes will reference the first slice (useful not to duplicate // vertices on more complex objects than just two triangles). let indexed_triangles = surface .new_tess() .and_then(|b| b.add_vertices(TRI_VERTICES)) .and_then(|b| b.set_indices(TRI_INDICES)) .and_then(|b| b.set_mode(Mode::Triangle)) .and_then(|b| b.build()) .unwrap(); // Create direct, deinterleaved tesselations; such tessellations allow to separate vertex // attributes in several contiguous regions of memory. let direct_deinterleaved_triangles = surface .new_tess() .and_then(|b| b.add_vertices(TRI_DEINT_POS_VERTICES)) .and_then(|b| b.add_vertices(TRI_DEINT_COLOR_VERTICES)) .and_then(|b| b.set_mode(Mode::Triangle)) .and_then(|b| b.build()) .unwrap(); // Create indexed, deinterleaved tessellations; have your cake and fucking eat it, now. let indexed_deinterleaved_triangles = surface .new_tess() .and_then(|b| b.add_vertices(TRI_DEINT_POS_VERTICES)) .and_then(|b| b.add_vertices(TRI_DEINT_COLOR_VERTICES)) .and_then(|b| b.set_indices(TRI_INDICES)) .and_then(|b| b.set_mode(Mode::Triangle)) .and_then(|b| b.build()) .unwrap(); // The back buffer, which we will make our render into (we make it mutable so that we can change // it whenever the window dimensions change). let mut back_buffer = surface.back_buffer().unwrap(); let mut demo = TessMethod::Direct; println!("now rendering {:?}", demo); event_loop.run(move |event, _, control_flow| { match event { Event::NewEvents(StartCause::Init) => *control_flow = ControlFlow::Wait, Event::WindowEvent { event, .. } => match event { // If we hit the spacebar, change the kind of tessellation. WindowEvent::KeyboardInput { input: KeyboardInput { state: ElementState::Released, virtual_keycode: Some(VirtualKeyCode::Space), .. }, .. } => { demo = demo.toggle(); println!("now rendering {:?}", demo); } // Handle window resizing. WindowEvent::Resized(_) | WindowEvent::ScaleFactorChanged { .. } => { back_buffer = surface.back_buffer().unwrap(); } WindowEvent::CloseRequested | WindowEvent::Destroyed | WindowEvent::KeyboardInput { input: KeyboardInput { state: ElementState::Released, virtual_keycode: Some(VirtualKeyCode::Escape), .. }, .. } => { *control_flow = ControlFlow::Exit; } _ => {} }, Event::MainEventsCleared => { surface.ctx.window().request_redraw(); } Event::RedrawRequested(_) => { // Create a new dynamic pipeline that will render to the back buffer and must clear it with // pitch black prior to do any render to it. let render = surface.new_pipeline_gate().pipeline( &back_buffer, &PipelineState::default(), |_, mut shd_gate| { // Start shading with our program. shd_gate.shade(&mut program, |_, _, mut rdr_gate| { // Start rendering things with the default render state provided by luminance. rdr_gate.render(&RenderState::default(), |mut tess_gate| { // Pick the right tessellation to use depending on the mode chosen. let tess = match demo { TessMethod::Direct => &direct_triangles, TessMethod::Indexed => &indexed_triangles, TessMethod::DirectDeinterleaved => &direct_deinterleaved_triangles, TessMethod::IndexedDeinterleaved => &indexed_deinterleaved_triangles, }; // Render the tessellation to the surface. tess_gate.render(tess); }); }); }, ); // Finally, swap the backbuffer with the frontbuffer in order to render our triangles onto your // screen. if render.is_ok() { surface.swap_buffers(); } } _ => {} }; }); }
34.556575
103
0.648142
e2cffd66e36170495de92677b88b6e6edc88d16d
39,465
use crate::resources::pipeline_cache::GraphicsPipelineRenderTargetMeta; use crate::resources::resource_arc::{ResourceId, ResourceWithHash, WeakResourceArc}; use crate::resources::DescriptorSetLayout; use crate::resources::ResourceArc; use crate::{CookedShaderPackage, ReflectedEntryPoint, ResourceDropSink}; use crossbeam_channel::{Receiver, Sender}; use fnv::{FnvHashMap, FnvHasher}; use rafx_api::RafxTexture; use rafx_api::*; use serde::{Deserialize, Serialize}; use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use std::sync::atomic::AtomicU64; use std::sync::atomic::Ordering; use std::sync::{Arc, Mutex}; // Hash of a GPU resource #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct ResourceHash(u64); impl ResourceHash { pub fn from_key<KeyT: Hash>(key: &KeyT) -> ResourceHash { let mut hasher = FnvHasher::default(); key.hash(&mut hasher); ResourceHash(hasher.finish()) } } impl From<ResourceId> for ResourceHash { fn from(resource_id: ResourceId) -> Self { ResourceHash(resource_id.0) } } impl Into<ResourceId> for ResourceHash { fn into(self) -> ResourceId { ResourceId(self.0) } } // // A lookup of resources. They reference count using Arcs internally and send a signal when they // drop. This allows the resources to be collected and disposed of // pub struct ResourceLookupInner<KeyT, ResourceT> where KeyT: Eq + Hash + Clone, ResourceT: Clone, { resources: FnvHashMap<ResourceHash, WeakResourceArc<ResourceT>>, //TODO: Add support for "cancelling" dropping stuff. This would likely be a ring of hashmaps. // that gets cycled. drop_sink: ResourceDropSink<ResourceT>, drop_tx: Sender<ResourceWithHash<ResourceT>>, drop_rx: Receiver<ResourceWithHash<ResourceT>>, phantom_data: PhantomData<KeyT>, #[cfg(debug_assertions)] keys: FnvHashMap<ResourceHash, KeyT>, #[cfg(debug_assertions)] lock_call_count_previous_frame: u64, #[cfg(debug_assertions)] lock_call_count: u64, create_count_previous_frame: u64, create_count: u64, } //TODO: Don't love using a mutex here. If this becomes a performance bottleneck: // - Try making locks more granular (something like dashmap) // - Have a read-only hashmap that's checked first and then a read/write map that's checked if the // read-only fails. At a later sync point, copy new data from the read-write into the read. This // could occur during the extract phase. Or could potentially double-buffer the read-only map // and swap them. pub struct ResourceLookup<KeyT, ResourceT> where KeyT: Eq + Hash + Clone, ResourceT: Clone, { inner: Mutex<ResourceLookupInner<KeyT, ResourceT>>, } impl<KeyT, ResourceT> ResourceLookup<KeyT, ResourceT> where KeyT: Eq + Hash + Clone, ResourceT: Clone + std::fmt::Debug, { pub fn new(max_frames_in_flight: u32) -> Self { let (drop_tx, drop_rx) = crossbeam_channel::unbounded(); let inner = ResourceLookupInner { resources: Default::default(), drop_sink: ResourceDropSink::new(max_frames_in_flight), drop_tx, drop_rx, phantom_data: Default::default(), #[cfg(debug_assertions)] keys: Default::default(), #[cfg(debug_assertions)] lock_call_count_previous_frame: 0, #[cfg(debug_assertions)] lock_call_count: 0, create_count_previous_frame: 0, create_count: 0, }; ResourceLookup { inner: Mutex::new(inner), } } fn do_get( inner: &mut ResourceLookupInner<KeyT, ResourceT>, hash: ResourceHash, _key: &KeyT, ) -> Option<ResourceArc<ResourceT>> { let resource = inner.resources.get(&hash); if let Some(resource) = resource { let upgrade = resource.upgrade(); #[cfg(debug_assertions)] if upgrade.is_some() { debug_assert!(inner.keys.get(&hash).unwrap() == _key); } upgrade } else { None } } fn do_create<F>( inner: &mut ResourceLookupInner<KeyT, ResourceT>, hash: ResourceHash, _key: &KeyT, create_resource_fn: F, ) -> RafxResult<ResourceArc<ResourceT>> where F: FnOnce() -> RafxResult<ResourceT>, { // Process any pending drops. If we don't do this, it's possible that the pending drop could // wipe out the state we're about to set Self::handle_dropped_resources(inner); inner.create_count += 1; let resource = (create_resource_fn)()?; log::trace!( "insert resource {} {:?}", core::any::type_name::<ResourceT>(), resource ); let arc = ResourceArc::new(resource, hash.into(), inner.drop_tx.clone()); let downgraded = arc.downgrade(); let old = inner.resources.insert(hash, downgraded); assert!(old.is_none()); #[cfg(debug_assertions)] { inner.keys.insert(hash, _key.clone()); assert!(old.is_none()); } Ok(arc) } #[allow(dead_code)] pub fn get( &self, key: &KeyT, ) -> Option<ResourceArc<ResourceT>> { let hash = ResourceHash::from_key(key); let mut guard = self.inner.lock().unwrap(); #[cfg(debug_assertions)] { guard.lock_call_count += 1; } Self::do_get(&mut *guard, hash, key) } pub fn create<F>( &self, key: &KeyT, create_resource_fn: F, ) -> RafxResult<ResourceArc<ResourceT>> where F: FnOnce() -> RafxResult<ResourceT>, { let hash = ResourceHash::from_key(key); let mut guard = self.inner.lock().unwrap(); #[cfg(debug_assertions)] { guard.lock_call_count += 1; } Self::do_create(&mut *guard, hash, key, create_resource_fn) } pub fn get_or_create<F>( &self, key: &KeyT, create_resource_fn: F, ) -> RafxResult<ResourceArc<ResourceT>> where F: FnOnce() -> RafxResult<ResourceT>, { let hash = ResourceHash::from_key(key); let mut guard = self.inner.lock().unwrap(); #[cfg(debug_assertions)] { guard.lock_call_count += 1; } if let Some(resource) = Self::do_get(&mut *guard, hash, key) { //println!("get {} {:?}", core::any::type_name::<ResourceT>(), hash); Ok(resource) } else { //println!("create {} {:?}", core::any::type_name::<ResourceT>(), hash); Self::do_create(&mut *guard, hash, key, create_resource_fn) } } fn handle_dropped_resources(inner: &mut ResourceLookupInner<KeyT, ResourceT>) { for dropped in inner.drop_rx.try_iter() { log::trace!( "dropping {} {:?}", core::any::type_name::<ResourceT>(), dropped.resource ); inner.drop_sink.retire(dropped.resource); inner.resources.remove(&dropped.resource_hash.into()); #[cfg(debug_assertions)] { inner.keys.remove(&dropped.resource_hash.into()); } } } fn on_frame_complete(&self) -> RafxResult<()> { let mut guard = self.inner.lock().unwrap(); #[cfg(debug_assertions)] { guard.lock_call_count_previous_frame = guard.lock_call_count + 1; guard.lock_call_count = 0; } guard.create_count_previous_frame = guard.create_count; guard.create_count = 0; Self::handle_dropped_resources(&mut guard); guard.drop_sink.on_frame_complete()?; Ok(()) } fn metrics(&self) -> ResourceLookupMetric { let guard = self.inner.lock().unwrap(); ResourceLookupMetric { count: guard.resources.len(), previous_frame_create_count: guard.create_count_previous_frame, #[cfg(debug_assertions)] previous_frame_lock_call_count: guard.lock_call_count_previous_frame, } } fn destroy(&self) -> RafxResult<()> { let mut guard = self.inner.lock().unwrap(); #[cfg(debug_assertions)] { guard.lock_call_count += 1; } Self::handle_dropped_resources(&mut guard); if !guard.resources.is_empty() { log::warn!( "{} resource count {} > 0, resources will leak", core::any::type_name::<ResourceT>(), guard.resources.len() ); } guard.drop_sink.destroy()?; Ok(()) } } // // Keys for each resource type. (Some keys are simple and use types from crate::pipeline_description // and some are a combination of the definitions and runtime state. (For example, combining a // renderpass with the swapchain surface it would be applied to) // #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct FixedFunctionState { pub blend_state: RafxBlendState, pub depth_state: RafxDepthState, pub rasterizer_state: RafxRasterizerState, } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize)] pub struct ShaderModuleHash(u64); impl ShaderModuleHash { pub fn new(shader_package: &RafxShaderPackage) -> Self { let mut hasher = FnvHasher::default(); shader_package.hash(&mut hasher); let hash = hasher.finish(); ShaderModuleHash(hash) } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize)] pub struct ShaderHash(u64); impl ShaderHash { pub fn new( entry_points: &[&ReflectedEntryPoint], shader_module_hashes: &[ShaderModuleHash], ) -> Self { let reflection_data: Vec<_> = entry_points .iter() .map(|x| &x.rafx_api_reflection) .collect(); let mut hasher = FnvHasher::default(); RafxShaderStageDef::hash_definition(&mut hasher, &reflection_data, shader_module_hashes); let hash = hasher.finish(); ShaderHash(hash) } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize)] pub struct SamplerHash(u64); impl SamplerHash { pub fn new(sampler_def: &RafxSamplerDef) -> Self { let mut hasher = FnvHasher::default(); sampler_def.hash(&mut hasher); let hash = hasher.finish(); SamplerHash(hash) } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize)] pub struct RootSignatureHash(u64); impl RootSignatureHash { pub fn new( shader_hashes: &[ShaderHash], immutable_sampler_keys: &[RafxImmutableSamplerKey], immutable_sampler_hashes: &[Vec<SamplerHash>], ) -> Self { let mut hasher = FnvHasher::default(); RafxRootSignatureDef::hash_definition( &mut hasher, shader_hashes, immutable_sampler_keys, immutable_sampler_hashes, ); let hash = hasher.finish(); RootSignatureHash(hash) } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize)] pub struct DescriptorSetLayoutHash(u64); impl DescriptorSetLayoutHash { pub fn new( root_signature_hash: RootSignatureHash, set_index: u32, bindings: &DescriptorSetLayout, ) -> Self { let mut hasher = FnvHasher::default(); root_signature_hash.hash(&mut hasher); set_index.hash(&mut hasher); bindings.hash(&mut hasher); let hash = hasher.finish(); DescriptorSetLayoutHash(hash) } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize)] pub struct MaterialPassHash(u64); impl MaterialPassHash { pub fn new( shader_hash: ShaderHash, root_signature_hash: RootSignatureHash, descriptor_set_layout_hashes: &[DescriptorSetLayoutHash], fixed_function_state: &FixedFunctionState, vertex_inputs: &[MaterialPassVertexInput], ) -> Self { let mut hasher = FnvHasher::default(); shader_hash.hash(&mut hasher); root_signature_hash.hash(&mut hasher); descriptor_set_layout_hashes.hash(&mut hasher); fixed_function_state.hash(&mut hasher); for vertex_input in vertex_inputs { vertex_input.hash(&mut hasher); } let hash = hasher.finish(); MaterialPassHash(hash) } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize)] pub struct GraphicsPipelineHash(u64); impl GraphicsPipelineHash { pub fn new( material_pass_key: MaterialPassHash, render_target_meta: &GraphicsPipelineRenderTargetMeta, primitive_topology: RafxPrimitiveTopology, vertex_layout: &RafxVertexLayout, ) -> Self { let mut hasher = FnvHasher::default(); material_pass_key.hash(&mut hasher); render_target_meta .render_target_meta_hash() .hash(&mut hasher); primitive_topology.hash(&mut hasher); vertex_layout.hash(&mut hasher); let hash = hasher.finish(); GraphicsPipelineHash(hash) } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize)] pub struct ComputePipelineHash(u64); impl ComputePipelineHash { pub fn new( shader_hash: ShaderHash, root_signature_hash: RootSignatureHash, descriptor_set_layout_hashes: &[DescriptorSetLayoutHash], ) -> Self { let mut hasher = FnvHasher::default(); shader_hash.hash(&mut hasher); root_signature_hash.hash(&mut hasher); descriptor_set_layout_hashes.hash(&mut hasher); let hash = hasher.finish(); ComputePipelineHash(hash) } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct ShaderModuleKey { hash: ShaderModuleHash, } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct ShaderKey { hash: ShaderHash, } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct RootSignatureKey { // hash is based on shader code hash, stage, and entry point hash: RootSignatureHash, } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct DescriptorSetLayoutKey { hash: DescriptorSetLayoutHash, } #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct MaterialPassVertexInput { pub semantic: String, pub location: u32, pub gl_attribute_name: String, } #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct MaterialPassKey { hash: MaterialPassHash, } #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct GraphicsPipelineKey { hash: GraphicsPipelineHash, } #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct ComputePipelineKey { hash: ComputePipelineHash, } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct ImageKey { id: u64, } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct BufferKey { id: u64, } #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct SamplerKey { hash: SamplerHash, } #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct ImageViewKey { image_key: ImageKey, texture_bind_type: Option<RafxTextureBindType>, } #[derive(Debug)] pub struct ResourceLookupMetric { pub count: usize, pub previous_frame_create_count: u64, #[cfg(debug_assertions)] pub previous_frame_lock_call_count: u64, } #[derive(Debug)] pub struct ResourceMetrics { pub shader_module_metrics: ResourceLookupMetric, pub shader_metrics: ResourceLookupMetric, pub root_signature_metrics: ResourceLookupMetric, pub descriptor_set_layout_metrics: ResourceLookupMetric, pub material_pass_metrics: ResourceLookupMetric, pub graphics_pipeline_metrics: ResourceLookupMetric, pub compute_pipeline_metrics: ResourceLookupMetric, pub image_metrics: ResourceLookupMetric, pub image_view_metrics: ResourceLookupMetric, pub sampler_metrics: ResourceLookupMetric, pub buffer_metrics: ResourceLookupMetric, } #[derive(Debug, Clone)] pub struct ShaderModuleResource { pub shader_module_key: ShaderModuleKey, pub shader_package: Arc<RafxShaderPackage>, pub shader_module: RafxShaderModule, } #[derive(Debug, Clone)] pub struct ShaderResource { pub key: ShaderKey, pub shader_modules: Vec<ResourceArc<ShaderModuleResource>>, pub shader: RafxShader, } #[derive(Debug, Clone)] pub struct RootSignatureResource { pub key: RootSignatureKey, pub shaders: Vec<ResourceArc<ShaderResource>>, pub immutable_samplers: Vec<ResourceArc<SamplerResource>>, pub root_signature: RafxRootSignature, } #[derive(Debug, Clone)] pub struct DescriptorSetLayoutResource { // Just keep it in scope pub root_signature_arc: ResourceArc<RootSignatureResource>, pub root_signature: RafxRootSignature, pub set_index: u32, pub descriptor_set_layout_def: Arc<DescriptorSetLayout>, pub key: DescriptorSetLayoutKey, } #[derive(Debug, Clone)] pub struct MaterialPassResource { pub material_pass_key: MaterialPassKey, pub shader: ResourceArc<ShaderResource>, pub root_signature: ResourceArc<RootSignatureResource>, pub descriptor_set_layouts: Arc<Vec<ResourceArc<DescriptorSetLayoutResource>>>, pub fixed_function_state: Arc<FixedFunctionState>, pub vertex_inputs: Arc<Vec<MaterialPassVertexInput>>, } #[derive(Debug, Clone)] pub struct GraphicsPipelineResource { pub render_target_meta: GraphicsPipelineRenderTargetMeta, pub pipeline: Arc<RafxPipeline>, pub descriptor_set_layouts: Arc<Vec<ResourceArc<DescriptorSetLayoutResource>>>, } #[derive(Debug, Clone)] pub struct ComputePipelineResource { pub root_signature: ResourceArc<RootSignatureResource>, pub pipeline: Arc<RafxPipeline>, pub descriptor_set_layouts: Arc<Vec<ResourceArc<DescriptorSetLayoutResource>>>, } #[derive(Debug, Clone)] pub struct ImageResource { pub image: RafxTexture, // Dynamic resources have no key pub image_key: Option<ImageKey>, } #[derive(Debug, Clone)] pub struct ImageViewResource { pub image: ResourceArc<ImageResource>, // Dynamic resources have no key pub image_view_key: Option<ImageViewKey>, pub texture_bind_type: Option<RafxTextureBindType>, } #[derive(Debug, Clone)] pub struct SamplerResource { pub sampler: RafxSampler, pub sampler_key: SamplerKey, } #[derive(Debug, Clone)] pub struct BufferResource { pub buffer: Arc<RafxBuffer>, // Dynamic resources have no key pub buffer_key: Option<BufferKey>, } // // Handles raw lookup and destruction of GPU resources. Everything is reference counted. No safety // is provided for dependencies/order of destruction. The general expectation is that anything // dropped can safely be destroyed after a few frames have passed (based on max number of frames // that can be submitted to the GPU) // //TODO: Some of the resources like buffers and images don't need to be "keyed" and could probably // be kept in a slab. We *do* need a way to access and quickly remove elements though, and whatever // key we use is sent through a Sender/Receiver pair to be dropped later. pub struct ResourceLookupSetInner { device_context: RafxDeviceContext, shader_modules: ResourceLookup<ShaderModuleKey, ShaderModuleResource>, shaders: ResourceLookup<ShaderKey, ShaderResource>, root_signatures: ResourceLookup<RootSignatureKey, RootSignatureResource>, descriptor_set_layouts: ResourceLookup<DescriptorSetLayoutKey, DescriptorSetLayoutResource>, material_passes: ResourceLookup<MaterialPassKey, MaterialPassResource>, graphics_pipelines: ResourceLookup<GraphicsPipelineKey, GraphicsPipelineResource>, compute_pipelines: ResourceLookup<ComputePipelineKey, ComputePipelineResource>, images: ResourceLookup<ImageKey, ImageResource>, image_views: ResourceLookup<ImageViewKey, ImageViewResource>, samplers: ResourceLookup<SamplerKey, SamplerResource>, buffers: ResourceLookup<BufferKey, BufferResource>, // Used to generate keys for images/buffers next_image_id: AtomicU64, next_buffer_id: AtomicU64, } #[derive(Clone)] pub struct ResourceLookupSet { inner: Arc<ResourceLookupSetInner>, } impl ResourceLookupSet { pub fn new( device_context: &RafxDeviceContext, max_frames_in_flight: u32, ) -> Self { let set = ResourceLookupSetInner { device_context: device_context.clone(), shader_modules: ResourceLookup::new(max_frames_in_flight), shaders: ResourceLookup::new(max_frames_in_flight), root_signatures: ResourceLookup::new(max_frames_in_flight), descriptor_set_layouts: ResourceLookup::new(max_frames_in_flight), material_passes: ResourceLookup::new(max_frames_in_flight), graphics_pipelines: ResourceLookup::new(max_frames_in_flight), compute_pipelines: ResourceLookup::new(max_frames_in_flight), images: ResourceLookup::new(max_frames_in_flight), image_views: ResourceLookup::new(max_frames_in_flight), samplers: ResourceLookup::new(max_frames_in_flight), buffers: ResourceLookup::new(max_frames_in_flight), next_image_id: AtomicU64::new(0), next_buffer_id: AtomicU64::new(0), }; ResourceLookupSet { inner: Arc::new(set), } } pub fn device_context(&self) -> &RafxDeviceContext { &self.inner.device_context } #[profiling::function] pub fn on_frame_complete(&self) -> RafxResult<()> { self.inner.images.on_frame_complete()?; self.inner.image_views.on_frame_complete()?; self.inner.buffers.on_frame_complete()?; self.inner.shader_modules.on_frame_complete()?; self.inner.shaders.on_frame_complete()?; self.inner.samplers.on_frame_complete()?; self.inner.root_signatures.on_frame_complete()?; self.inner.descriptor_set_layouts.on_frame_complete()?; self.inner.material_passes.on_frame_complete()?; self.inner.graphics_pipelines.on_frame_complete()?; self.inner.compute_pipelines.on_frame_complete()?; Ok(()) } // This assumes that no GPU work remains that relies on these resources. Use // RafxQueue::wait_for_queue_idle pub fn destroy(&self) -> RafxResult<()> { //WARNING: These need to be in order of dependencies to avoid frame-delays on destroying // resources. self.inner.compute_pipelines.destroy()?; self.inner.graphics_pipelines.destroy()?; self.inner.material_passes.destroy()?; self.inner.descriptor_set_layouts.destroy()?; self.inner.root_signatures.destroy()?; self.inner.samplers.destroy()?; self.inner.shaders.destroy()?; self.inner.shader_modules.destroy()?; self.inner.buffers.destroy()?; self.inner.image_views.destroy()?; self.inner.images.destroy()?; Ok(()) } pub fn metrics(&self) -> ResourceMetrics { ResourceMetrics { shader_module_metrics: self.inner.shader_modules.metrics(), shader_metrics: self.inner.shaders.metrics(), root_signature_metrics: self.inner.root_signatures.metrics(), descriptor_set_layout_metrics: self.inner.descriptor_set_layouts.metrics(), material_pass_metrics: self.inner.material_passes.metrics(), graphics_pipeline_metrics: self.inner.graphics_pipelines.metrics(), compute_pipeline_metrics: self.inner.compute_pipelines.metrics(), image_metrics: self.inner.images.metrics(), image_view_metrics: self.inner.image_views.metrics(), sampler_metrics: self.inner.samplers.metrics(), buffer_metrics: self.inner.buffers.metrics(), } } pub fn get_or_create_shader_module_from_cooked_package( &self, package: &CookedShaderPackage, ) -> RafxResult<ResourceArc<ShaderModuleResource>> { self.get_or_create_shader_module(&package.shader_package, Some(package.hash)) } pub fn get_or_create_shader_module( &self, shader_package: &RafxShaderPackage, shader_module_hash: Option<ShaderModuleHash>, ) -> RafxResult<ResourceArc<ShaderModuleResource>> { let shader_module_hash = shader_module_hash.unwrap_or_else(|| ShaderModuleHash::new(shader_package)); let shader_module_key = ShaderModuleKey { hash: shader_module_hash, }; self.inner .shader_modules .get_or_create(&shader_module_key, || { log::trace!( "Creating shader module\n[hash: {:?}]", shader_module_key.hash, ); let shader_module = self .inner .device_context .create_shader_module(shader_package.module_def())?; let resource = ShaderModuleResource { shader_module, shader_package: Arc::new(shader_package.clone()), shader_module_key: shader_module_key.clone(), }; log::trace!("Created shader module {:?}", resource); Ok(resource) }) } pub fn get_or_create_sampler( &self, sampler_def: &RafxSamplerDef, ) -> RafxResult<ResourceArc<SamplerResource>> { let hash = SamplerHash::new(sampler_def); let sampler_key = SamplerKey { hash }; self.inner.samplers.get_or_create(&sampler_key, || { log::trace!("Creating sampler\n{:#?}", sampler_def); let sampler = self.inner.device_context.create_sampler(sampler_def)?; let resource = SamplerResource { sampler, sampler_key: sampler_key.clone(), }; log::trace!("Created sampler {:?}", resource); Ok(resource) }) } pub fn get_or_create_shader( &self, shader_modules: &[ResourceArc<ShaderModuleResource>], entry_points: &[&ReflectedEntryPoint], ) -> RafxResult<ResourceArc<ShaderResource>> { let shader_module_hashes: Vec<_> = shader_modules .iter() .map(|x| x.get_raw().shader_module_key.hash) .collect(); let hash = ShaderHash::new(entry_points, &shader_module_hashes); let key = ShaderKey { hash }; self.inner.shaders.get_or_create(&key, || { log::trace!("Creating shader\n"); let mut shader_defs = Vec::with_capacity(entry_points.len()); for (entry_point, module) in entry_points.iter().zip(shader_modules) { shader_defs.push(RafxShaderStageDef { shader_module: module.get_raw().shader_module.clone(), reflection: entry_point.rafx_api_reflection.clone(), }); } let shader = self.inner.device_context.create_shader(shader_defs)?; let resource = ShaderResource { key, shader, shader_modules: shader_modules.iter().cloned().collect(), }; log::trace!("Created shader {:?}", resource); Ok(resource) }) } pub fn get_or_create_root_signature( &self, shader_resources: &[ResourceArc<ShaderResource>], immutable_sampler_keys: &[RafxImmutableSamplerKey], immutable_sampler_resources: &[Vec<ResourceArc<SamplerResource>>], ) -> RafxResult<ResourceArc<RootSignatureResource>> { let shader_hashes: Vec<_> = shader_resources .iter() .map(|x| x.get_raw().key.hash) .collect(); let mut sampler_hashes = Vec::with_capacity(immutable_sampler_resources.len()); for sampler_list in immutable_sampler_resources { let hashes: Vec<_> = sampler_list .iter() .map(|x| x.get_raw().sampler_key.hash) .collect(); sampler_hashes.push(hashes); } let hash = RootSignatureHash::new(&shader_hashes, immutable_sampler_keys, &sampler_hashes); let key = RootSignatureKey { hash }; self.inner.root_signatures.get_or_create(&key, || { let mut samplers = Vec::with_capacity(immutable_sampler_resources.len()); for sampler_list in immutable_sampler_resources { let cloned_sampler_list: Vec<_> = sampler_list .iter() .map(|x| x.get_raw().sampler.clone()) .collect(); samplers.push(cloned_sampler_list); } let mut immutable_samplers = Vec::with_capacity(samplers.len()); for i in 0..samplers.len() { immutable_samplers.push(RafxImmutableSamplers { key: immutable_sampler_keys[i].clone(), samplers: &samplers[i], }); } log::trace!("Creating root signature\n{:#?}", key); let shaders: Vec<_> = shader_resources .iter() .map(|x| x.get_raw().shader.clone()) .collect(); let root_signature = self.inner .device_context .create_root_signature(&RafxRootSignatureDef { shaders: &shaders, immutable_samplers: &immutable_samplers, })?; let shaders = shader_resources.iter().cloned().collect(); let mut immutable_samplers = vec![]; for resource_list in immutable_sampler_resources { for resource in resource_list { immutable_samplers.push(resource.clone()); } } let resource = RootSignatureResource { key, root_signature, shaders, immutable_samplers, }; log::trace!("Created root signature"); Ok(resource) }) } pub fn get_or_create_descriptor_set_layout( &self, root_signature: &ResourceArc<RootSignatureResource>, set_index: u32, descriptor_set_layout_def: &DescriptorSetLayout, ) -> RafxResult<ResourceArc<DescriptorSetLayoutResource>> { let hash = DescriptorSetLayoutHash::new( root_signature.get_raw().key.hash, set_index, descriptor_set_layout_def, ); let key = DescriptorSetLayoutKey { hash }; self.inner.descriptor_set_layouts.get_or_create(&key, || { log::trace!( "Creating descriptor set layout set_index={}, root_signature:\n{:#?}", set_index, root_signature ); // Create the resource object, which contains the descriptor set layout we created plus // ResourceArcs to the samplers, which must remain alive for the lifetime of the descriptor set let resource = DescriptorSetLayoutResource { root_signature_arc: root_signature.clone(), root_signature: root_signature.get_raw().root_signature.clone(), set_index, descriptor_set_layout_def: Arc::new(descriptor_set_layout_def.clone()), key: key.clone(), }; log::trace!("Created descriptor set layout {:?}", resource); Ok(resource) }) } pub fn get_or_create_material_pass( &self, shader: ResourceArc<ShaderResource>, root_signature: ResourceArc<RootSignatureResource>, descriptor_sets: Vec<ResourceArc<DescriptorSetLayoutResource>>, fixed_function_state: Arc<FixedFunctionState>, vertex_inputs: Arc<Vec<MaterialPassVertexInput>>, ) -> RafxResult<ResourceArc<MaterialPassResource>> { let descriptor_set_hashes: Vec<_> = descriptor_sets .iter() .map(|x| x.get_raw().key.hash) .collect(); let hash = MaterialPassHash::new( shader.get_raw().key.hash, root_signature.get_raw().key.hash, &descriptor_set_hashes, &*fixed_function_state, &*vertex_inputs, ); let material_pass_key = MaterialPassKey { hash }; self.inner .material_passes .get_or_create(&material_pass_key, || { log::trace!("Creating material pass\n{:#?}", material_pass_key); let resource = MaterialPassResource { material_pass_key: material_pass_key.clone(), root_signature, descriptor_set_layouts: Arc::new(descriptor_sets), shader, fixed_function_state, vertex_inputs, }; Ok(resource) }) } pub fn get_or_create_graphics_pipeline( &self, material_pass: &ResourceArc<MaterialPassResource>, render_target_meta: &GraphicsPipelineRenderTargetMeta, primitive_topology: RafxPrimitiveTopology, vertex_layout: &RafxVertexLayout, ) -> RafxResult<ResourceArc<GraphicsPipelineResource>> { let hash = GraphicsPipelineHash::new( material_pass.get_raw().material_pass_key.hash, render_target_meta, primitive_topology, vertex_layout, ); let pipeline_key = GraphicsPipelineKey { hash }; self.inner .graphics_pipelines .get_or_create(&pipeline_key, || { log::trace!("Creating graphics pipeline\n{:#?}", pipeline_key); let fixed_function_state = &material_pass.get_raw().fixed_function_state; let pipeline = self.inner.device_context.create_graphics_pipeline( &RafxGraphicsPipelineDef { root_signature: &material_pass .get_raw() .root_signature .get_raw() .root_signature, shader: &material_pass.get_raw().shader.get_raw().shader, blend_state: &fixed_function_state.blend_state, depth_state: &fixed_function_state.depth_state, rasterizer_state: &fixed_function_state.rasterizer_state, primitive_topology, vertex_layout: &vertex_layout, color_formats: &render_target_meta.color_formats(), depth_stencil_format: render_target_meta.depth_stencil_format(), sample_count: render_target_meta.sample_count(), }, )?; let resource = GraphicsPipelineResource { render_target_meta: render_target_meta.clone(), pipeline: Arc::new(pipeline), descriptor_set_layouts: material_pass.get_raw().descriptor_set_layouts.clone(), }; Ok(resource) }) } pub fn get_or_create_compute_pipeline( &self, shader: &ResourceArc<ShaderResource>, root_signature: &ResourceArc<RootSignatureResource>, descriptor_set_layouts: Vec<ResourceArc<DescriptorSetLayoutResource>>, ) -> RafxResult<ResourceArc<ComputePipelineResource>> { let descriptor_set_hashes: Vec<_> = descriptor_set_layouts .iter() .map(|x| x.get_raw().key.hash) .collect(); let hash = ComputePipelineHash::new( shader.get_raw().key.hash, root_signature.get_raw().key.hash, &descriptor_set_hashes, ); let pipeline_key = ComputePipelineKey { hash }; self.inner .compute_pipelines .get_or_create(&pipeline_key, || { log::trace!("Creating compute pipeline\n{:#?}", pipeline_key); let rafx_pipeline = self.inner .device_context .create_compute_pipeline(&RafxComputePipelineDef { root_signature: &root_signature.get_raw().root_signature, shader: &shader.get_raw().shader, })?; log::trace!("Created compute pipeline {:?}", rafx_pipeline); let resource = ComputePipelineResource { root_signature: root_signature.clone(), pipeline: Arc::new(rafx_pipeline), descriptor_set_layouts: Arc::new(descriptor_set_layouts), }; Ok(resource) }) } pub fn insert_image( &self, image: RafxTexture, ) -> ResourceArc<ImageResource> { let image_id = self.inner.next_image_id.fetch_add(1, Ordering::Relaxed); let image_key = ImageKey { id: image_id }; let resource = ImageResource { image, image_key: Some(image_key), }; self.inner .images .create(&image_key, || Ok(resource)) .unwrap() } //TODO: Support direct removal of raw images with verification that no references remain // A key difference between this insert_buffer and the insert_buffer in a DynResourceAllocator // is that these can be retrieved. This one is more appropriate to use with loaded assets, and // DynResourceAllocator with runtime assets pub fn insert_buffer( &self, buffer: RafxBuffer, ) -> ResourceArc<BufferResource> { let buffer_id = self.inner.next_buffer_id.fetch_add(1, Ordering::Relaxed); let buffer_key = BufferKey { id: buffer_id }; let resource = BufferResource { buffer: Arc::new(buffer), buffer_key: Some(buffer_key), }; self.inner .buffers .create(&buffer_key, || Ok(resource)) .unwrap() } pub fn get_or_create_image_view( &self, image: &ResourceArc<ImageResource>, texture_bind_type: Option<RafxTextureBindType>, ) -> RafxResult<ResourceArc<ImageViewResource>> { if image.get_raw().image_key.is_none() { log::error!("Tried to create an image view resource with a dynamic image"); return Err("Tried to create an image view resource with a dynamic image")?; } let image_view_key = ImageViewKey { image_key: image.get_raw().image_key.unwrap(), texture_bind_type, }; self.inner.image_views.get_or_create(&image_view_key, || { log::trace!("Creating image view\n{:#?}", image_view_key); let resource = ImageViewResource { image: image.clone(), texture_bind_type, image_view_key: Some(image_view_key.clone()), }; log::trace!("Created image view\n{:#?}", resource); Ok(resource) }) } }
34.618421
107
0.625035
38e36042efb3ad071aded7c9e4284ef6bfdf3ff6
1,170
extern crate bricks; extern crate structopt; use cli::Spaces; use std::io; use std::path::PathBuf; use structopt::StructOpt; mod cli { use super::*; #[derive(structopt::StructOpt, Debug)] #[structopt(about = "Replace tabs with spaces")] #[structopt(setting = structopt::clap::AppSettings::ColoredHelp)] pub struct Spaces { #[structopt( name = "file", help = "file to process, if none is specified stdin will be processed", parse(from_os_str) )] pub file: Option<PathBuf>, #[structopt( short = "n", long = "number-of-spaces", default_value = "4", help = "of spaces which shall be used to replace one tab" )] pub number_of_spaces: usize, } } fn main() -> io::Result<()> { let config = Spaces::from_args(); let _input_file: String = if let Some(path) = config.file { path.to_str().unwrap().to_string() } else { String::from("stdin") }; // TODO: Add actual implementation/functionality // let mut reader = bricks::create_reader(&input_file)?; std::process::exit(1); }
26
83
0.582051
67366b63a3b407c14210467b0b31f59fea7d8c30
2,452
use crate::platform::unix::x11::XConnection; use glutin_glx_sys as ffi; use std::sync::Arc; /// A guard for when you want to make the context current. Destroying the guard /// restores the previously-current context. #[derive(Debug)] pub struct MakeCurrentGuard { old_display: *mut ffi::Display, display: *mut ffi::Display, xconn: Arc<XConnection>, possibly_invalid: Option<MakeCurrentGuardInner>, } #[derive(Debug)] struct MakeCurrentGuardInner { old_drawable: ffi::glx::types::GLXDrawable, old_context: ffi::GLXContext, } impl MakeCurrentGuard { pub fn new( xconn: &Arc<XConnection>, drawable: ffi::glx::types::GLXDrawable, context: ffi::GLXContext, ) -> Result<Self, String> { unsafe { let glx = super::GLX.as_ref().unwrap(); let ret = MakeCurrentGuard { old_display: glx.GetCurrentDisplay() as *mut _, display: xconn.display as *mut _, xconn: Arc::clone(xconn), possibly_invalid: Some(MakeCurrentGuardInner { old_drawable: glx.GetCurrentDrawable(), old_context: glx.GetCurrentContext(), }), }; let res = glx.MakeCurrent(xconn.display as *mut _, drawable, context); if res == 0 { let err = xconn.check_errors(); Err(format!("`glXMakeCurrent` failed: {:?}", err)) } else { Ok(ret) } } } pub fn old_context(&mut self) -> Option<ffi::GLXContext> { self.possibly_invalid.as_ref().map(|pi| pi.old_context) } pub fn invalidate(&mut self) { self.possibly_invalid.take(); } } impl Drop for MakeCurrentGuard { fn drop(&mut self) { let glx = super::GLX.as_ref().unwrap(); let (drawable, context) = match self.possibly_invalid.take() { Some(inner) => (inner.old_drawable, inner.old_context), None => (0, std::ptr::null()), }; let display = match self.old_display { old_display if old_display == std::ptr::null_mut() => self.display, old_display => old_display, }; let res = unsafe { glx.MakeCurrent(display as *mut _, drawable, context) }; if res == 0 { let err = self.xconn.check_errors(); panic!("`glXMakeCurrent` failed: {:?}", err); } } }
29.902439
83
0.569331
14691712875f85ac4ba68696e653c6a2e7822a40
1,545
use std::collections::HashMap; pub fn f() { let input = std::fs::read_to_string("input/12").unwrap(); let mut caves: HashMap<&str, Vec<&str>> = HashMap::new(); for line in input.lines() { let (from, to) = line.split_once("-").unwrap(); if let Some(p) = caves.get_mut(from) { p.push(to); } else { caves.insert(from, vec![to]); } if let Some(p) = caves.get_mut(to) { p.push(from); } else { caves.insert(to, vec![from]); } } let paths = traverse(&caves); println!("{:?}", paths.len()); } fn traverse(caves: &HashMap<&str, Vec<&str>>) -> Vec<String> { derp(caves, "start", "start", false).unwrap() } fn derp(caves: &HashMap<&str, Vec<&str>>, past_path: &str, current_position: &str, double_dipped: bool) -> Option<Vec<String>> { if current_position == "end" { return Some(vec![past_path.to_string()]); } let next_moves = &caves[current_position]; let mut paths = Vec::new(); for m in next_moves { if m == &"start" { continue; } let mut dd = double_dipped; if m.chars().all(|x| x.is_lowercase()) && past_path.contains(*m) { if double_dipped { continue; } dd = true; } if let Some(p) = derp(caves, &format!("{},{}", past_path, m), m, dd).as_mut() { paths.append(p); } } if paths.len() > 0 { return Some(paths); } None }
25.327869
128
0.504207
bb7b98b48f4ba4bebb09ac4aed9e131117a7fe48
4,381
use std::borrow::Cow; use crate::requests::*; use crate::types::*; /// Use this method to send an audio #[derive(Debug, Clone, PartialEq, PartialOrd, Serialize)] #[must_use = "requests do nothing unless sent"] pub struct SendAudio<'c, 'p, 't> { chat_id: ChatRef, #[serde(skip)] audio: InputFile, caption: Option<Cow<'c, str>>, parse_mode: Option<ParseMode>, duration: Option<Integer>, performer: Option<Cow<'p, str>>, title: Option<Cow<'t, str>>, #[serde(skip)] thumb: Option<InputFile>, reply_to_message_id: Option<MessageId>, disable_notification: bool, reply_markup: Option<ReplyMarkup>, } impl<'c, 'p, 't> ToMultipart for SendAudio<'c, 'p, 't> { fn to_multipart(&self) -> Result<Multipart, Error> { multipart_map! { self, (chat_id (text)); (audio (raw)); (caption (text), optional); (parse_mode (text), optional); (duration (text), optional); (performer (text), optional); (title (text), optional); (thumb (raw), optional); (reply_to_message_id (text), optional); (disable_notification (text), when_true); (reply_markup (json), optional); } } } impl<'c, 'p, 't> Request for SendAudio<'c, 'p, 't> { type Type = MultipartRequestType<Self>; type Response = JsonIdResponse<Message>; fn serialize(&self) -> Result<HttpRequest, Error> { Self::Type::serialize(RequestUrl::method("sendAudio"), self) } } impl<'c, 'p, 't> SendAudio<'c, 'p, 't> { pub fn new<C, V>(chat: C, audio: V) -> Self where C: ToChatRef, V: Into<InputFile>, { Self { chat_id: chat.to_chat_ref(), audio: audio.into(), caption: None, parse_mode: None, duration: None, performer: None, title: None, thumb: None, reply_to_message_id: None, reply_markup: None, disable_notification: false, } } pub fn thumb<V>(&mut self, thumb: V) -> &mut Self where V: Into<InputFileUpload>, { self.thumb = Some(thumb.into().into()); self } pub fn caption<T>(&mut self, caption: T) -> &mut Self where T: Into<Cow<'c, str>>, { self.caption = Some(caption.into()); self } pub fn parse_mode(&mut self, parse_mode: ParseMode) -> &mut Self { self.parse_mode = Some(parse_mode); self } pub fn duration(&mut self, duration: Integer) -> &mut Self { self.duration = Some(duration); self } pub fn performer<T>(&mut self, performer: T) -> &mut Self where T: Into<Cow<'p, str>>, { self.performer = Some(performer.into()); self } pub fn title<T>(&mut self, title: T) -> &mut Self where T: Into<Cow<'t, str>>, { self.title = Some(title.into()); self } pub fn reply_to<R>(&mut self, to: R) -> &mut Self where R: ToMessageId, { self.reply_to_message_id = Some(to.to_message_id()); self } pub fn reply_markup<R>(&mut self, reply_markup: R) -> &mut Self where R: Into<ReplyMarkup>, { self.reply_markup = Some(reply_markup.into()); self } pub fn disable_notification(&mut self) -> &mut Self { self.disable_notification = true; self } } /// Can reply with an audio pub trait CanReplySendAudio { fn audio_reply<'c, 'p, 't, T>(&self, audio: T) -> SendAudio<'c, 'p, 't> where T: Into<InputFile>; } impl<M> CanReplySendAudio for M where M: ToMessageId + ToSourceChat, { fn audio_reply<'c, 'p, 't, T>(&self, audio: T) -> SendAudio<'c, 'p, 't> where T: Into<InputFile>, { let mut req = SendAudio::new(self.to_source_chat(), audio); req.reply_to(self); req } } /// Send an audio pub trait CanSendAudio { fn audio<'c, 'p, 't, T>(&self, audio: T) -> SendAudio<'c, 'p, 't> where T: Into<InputFile>; } impl<M> CanSendAudio for M where M: ToChatRef, { fn audio<'c, 'p, 't, T>(&self, audio: T) -> SendAudio<'c, 'p, 't> where T: Into<InputFile>, { SendAudio::new(self.to_chat_ref(), audio) } }
24.751412
75
0.546451
1d339b03af6717d9eb68e4908f3e6e4e7333bbf8
66,295
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // FIXME: cover these topics: // path, reader, writer, stream, raii (close not needed), // stdio, print!, println!, file access, process spawning, // error handling /*! I/O, including files, networking, timers, and processes `std::io` provides Rust's basic I/O types, for reading and writing to files, TCP, UDP, and other types of sockets and pipes, manipulating the file system, spawning processes and signal handling. # Examples Some examples of obvious things you might want to do * Read lines from stdin ```rust use std::io; for line in io::stdin().lines() { print!("{}", line.unwrap()); } ``` * Read a complete file ```rust use std::io::File; let contents = File::open(&Path::new("message.txt")).read_to_end(); ``` * Write a line to a file ```rust # #![allow(unused_must_use)] use std::io::File; let mut file = File::create(&Path::new("message.txt")); file.write(b"hello, file!\n"); # drop(file); # ::std::io::fs::unlink(&Path::new("message.txt")); ``` * Iterate over the lines of a file ```rust,no_run use std::io::BufferedReader; use std::io::File; let path = Path::new("message.txt"); let mut file = BufferedReader::new(File::open(&path)); for line in file.lines() { print!("{}", line.unwrap()); } ``` * Pull the lines of a file into a vector of strings ```rust,no_run use std::io::BufferedReader; use std::io::File; let path = Path::new("message.txt"); let mut file = BufferedReader::new(File::open(&path)); let lines: Vec<String> = file.lines().map(|x| x.unwrap()).collect(); ``` * Make a simple TCP client connection and request ```rust # #![allow(unused_must_use)] use std::io::TcpStream; # // connection doesn't fail if a server is running on 8080 # // locally, we still want to be type checking this code, so lets # // just stop it running (#11576) # if false { let mut socket = TcpStream::connect("127.0.0.1", 8080).unwrap(); socket.write(b"GET / HTTP/1.0\n\n"); let response = socket.read_to_end(); # } ``` * Make a simple TCP server ```rust # fn main() { } # fn foo() { # #![allow(dead_code)] use std::io::{TcpListener, TcpStream}; use std::io::{Acceptor, Listener}; let listener = TcpListener::bind("127.0.0.1", 80); // bind the listener to the specified address let mut acceptor = listener.listen(); fn handle_client(mut stream: TcpStream) { // ... # &mut stream; // silence unused mutability/variable warning } // accept connections and process them, spawning a new tasks for each one for stream in acceptor.incoming() { match stream { Err(e) => { /* connection failed */ } Ok(stream) => spawn(proc() { // connection succeeded handle_client(stream) }) } } // close the socket server drop(acceptor); # } ``` # Error Handling I/O is an area where nearly every operation can result in unexpected errors. Errors should be painfully visible when they happen, and handling them should be easy to work with. It should be convenient to handle specific I/O errors, and it should also be convenient to not deal with I/O errors. Rust's I/O employs a combination of techniques to reduce boilerplate while still providing feedback about errors. The basic strategy: * All I/O operations return `IoResult<T>` which is equivalent to `Result<T, IoError>`. The `Result` type is defined in the `std::result` module. * If the `Result` type goes unused, then the compiler will by default emit a warning about the unused result. This is because `Result` has the `#[must_use]` attribute. * Common traits are implemented for `IoResult`, e.g. `impl<R: Reader> Reader for IoResult<R>`, so that error values do not have to be 'unwrapped' before use. These features combine in the API to allow for expressions like `File::create(&Path::new("diary.txt")).write(b"Met a girl.\n")` without having to worry about whether "diary.txt" exists or whether the write succeeds. As written, if either `new` or `write_line` encounters an error then the result of the entire expression will be an error. If you wanted to handle the error though you might write: ```rust # #![allow(unused_must_use)] use std::io::File; match File::create(&Path::new("diary.txt")).write(b"Met a girl.\n") { Ok(()) => (), // succeeded Err(e) => println!("failed to write to my diary: {}", e), } # ::std::io::fs::unlink(&Path::new("diary.txt")); ``` So what actually happens if `create` encounters an error? It's important to know that what `new` returns is not a `File` but an `IoResult<File>`. If the file does not open, then `new` will simply return `Err(..)`. Because there is an implementation of `Writer` (the trait required ultimately required for types to implement `write_line`) there is no need to inspect or unwrap the `IoResult<File>` and we simply call `write_line` on it. If `new` returned an `Err(..)` then the followup call to `write_line` will also return an error. ## `try!` Explicit pattern matching on `IoResult`s can get quite verbose, especially when performing many I/O operations. Some examples (like those above) are alleviated with extra methods implemented on `IoResult`, but others have more complex interdependencies among each I/O operation. The `try!` macro from `std::macros` is provided as a method of early-return inside `Result`-returning functions. It expands to an early-return on `Err` and otherwise unwraps the contained `Ok` value. If you wanted to read several `u32`s from a file and return their product: ```rust use std::io::{File, IoResult}; fn file_product(p: &Path) -> IoResult<u32> { let mut f = File::open(p); let x1 = try!(f.read_le_u32()); let x2 = try!(f.read_le_u32()); Ok(x1 * x2) } match file_product(&Path::new("numbers.bin")) { Ok(x) => println!("{}", x), Err(e) => println!("Failed to read numbers!") } ``` With `try!` in `file_product`, each `read_le_u32` need not be directly concerned with error handling; instead its caller is responsible for responding to errors that may occur while attempting to read the numbers. */ #![experimental] #![deny(unused_must_use)] use char::Char; use collections::Collection; use fmt; use int; use iter::Iterator; use libc; use mem::transmute; use ops::{BitOr, BitAnd, Sub, Not}; use option::{Option, Some, None}; use os; use owned::Box; use result::{Ok, Err, Result}; use rt::rtio; use slice::{Vector, MutableVector, ImmutableVector}; use str::{Str, StrSlice, StrAllocating}; use str; use string::String; use uint; use vec::Vec; // Reexports pub use self::stdio::stdin; pub use self::stdio::stdout; pub use self::stdio::stderr; pub use self::stdio::print; pub use self::stdio::println; pub use self::fs::File; pub use self::timer::Timer; pub use self::net::ip::IpAddr; pub use self::net::tcp::TcpListener; pub use self::net::tcp::TcpStream; pub use self::net::udp::UdpStream; pub use self::pipe::PipeStream; pub use self::process::{Process, Command}; pub use self::tempfile::TempDir; pub use self::mem::{MemReader, BufReader, MemWriter, BufWriter}; pub use self::buffered::{BufferedReader, BufferedWriter, BufferedStream, LineBufferedWriter}; pub use self::comm_adapters::{ChanReader, ChanWriter}; // this comes first to get the iotest! macro pub mod test; mod buffered; mod comm_adapters; mod mem; mod result; mod tempfile; pub mod extensions; pub mod fs; pub mod net; pub mod pipe; pub mod process; pub mod signal; pub mod stdio; pub mod timer; pub mod util; /// The default buffer size for various I/O operations // libuv recommends 64k buffers to maximize throughput // https://groups.google.com/forum/#!topic/libuv/oQO1HJAIDdA static DEFAULT_BUF_SIZE: uint = 1024 * 64; /// A convenient typedef of the return value of any I/O action. pub type IoResult<T> = Result<T, IoError>; /// The type passed to I/O condition handlers to indicate error /// /// # FIXME /// /// Is something like this sufficient? It's kind of archaic #[deriving(PartialEq, Clone)] pub struct IoError { /// An enumeration which can be matched against for determining the flavor /// of error. pub kind: IoErrorKind, /// A human-readable description about the error pub desc: &'static str, /// Detailed information about this error, not always available pub detail: Option<String> } impl IoError { /// Convert an `errno` value into an `IoError`. /// /// If `detail` is `true`, the `detail` field of the `IoError` /// struct is filled with an allocated string describing the error /// in more detail, retrieved from the operating system. pub fn from_errno(errno: uint, detail: bool) -> IoError { #[cfg(windows)] fn get_err(errno: i32) -> (IoErrorKind, &'static str) { match errno { libc::EOF => (EndOfFile, "end of file"), libc::ERROR_NO_DATA => (BrokenPipe, "the pipe is being closed"), libc::ERROR_FILE_NOT_FOUND => (FileNotFound, "file not found"), libc::ERROR_INVALID_NAME => (InvalidInput, "invalid file name"), libc::WSAECONNREFUSED => (ConnectionRefused, "connection refused"), libc::WSAECONNRESET => (ConnectionReset, "connection reset"), libc::ERROR_ACCESS_DENIED | libc::WSAEACCES => (PermissionDenied, "permission denied"), libc::WSAEWOULDBLOCK => { (ResourceUnavailable, "resource temporarily unavailable") } libc::WSAENOTCONN => (NotConnected, "not connected"), libc::WSAECONNABORTED => (ConnectionAborted, "connection aborted"), libc::WSAEADDRNOTAVAIL => (ConnectionRefused, "address not available"), libc::WSAEADDRINUSE => (ConnectionRefused, "address in use"), libc::ERROR_BROKEN_PIPE => (EndOfFile, "the pipe has ended"), libc::ERROR_OPERATION_ABORTED => (TimedOut, "operation timed out"), libc::WSAEINVAL => (InvalidInput, "invalid argument"), libc::ERROR_CALL_NOT_IMPLEMENTED => (IoUnavailable, "function not implemented"), libc::ERROR_INVALID_HANDLE => (MismatchedFileTypeForOperation, "invalid handle provided to function"), libc::ERROR_NOTHING_TO_TERMINATE => (InvalidInput, "no process to kill"), // libuv maps this error code to EISDIR. we do too. if it is found // to be incorrect, we can add in some more machinery to only // return this message when ERROR_INVALID_FUNCTION after certain // win32 calls. libc::ERROR_INVALID_FUNCTION => (InvalidInput, "illegal operation on a directory"), _ => (OtherIoError, "unknown error") } } #[cfg(not(windows))] fn get_err(errno: i32) -> (IoErrorKind, &'static str) { // FIXME: this should probably be a bit more descriptive... match errno { libc::EOF => (EndOfFile, "end of file"), libc::ECONNREFUSED => (ConnectionRefused, "connection refused"), libc::ECONNRESET => (ConnectionReset, "connection reset"), libc::EPERM | libc::EACCES => (PermissionDenied, "permission denied"), libc::EPIPE => (BrokenPipe, "broken pipe"), libc::ENOTCONN => (NotConnected, "not connected"), libc::ECONNABORTED => (ConnectionAborted, "connection aborted"), libc::EADDRNOTAVAIL => (ConnectionRefused, "address not available"), libc::EADDRINUSE => (ConnectionRefused, "address in use"), libc::ENOENT => (FileNotFound, "no such file or directory"), libc::EISDIR => (InvalidInput, "illegal operation on a directory"), libc::ENOSYS => (IoUnavailable, "function not implemented"), libc::EINVAL => (InvalidInput, "invalid argument"), libc::ENOTTY => (MismatchedFileTypeForOperation, "file descriptor is not a TTY"), libc::ETIMEDOUT => (TimedOut, "operation timed out"), libc::ECANCELED => (TimedOut, "operation aborted"), // These two constants can have the same value on some systems, // but different values on others, so we can't use a match // clause x if x == libc::EAGAIN || x == libc::EWOULDBLOCK => (ResourceUnavailable, "resource temporarily unavailable"), _ => (OtherIoError, "unknown error") } } let (kind, desc) = get_err(errno as i32); IoError { kind: kind, desc: desc, detail: if detail && kind == OtherIoError { Some(os::error_string(errno).as_slice().chars().map(|c| c.to_lowercase()).collect()) } else { None }, } } /// Retrieve the last error to occur as a (detailed) IoError. /// /// This uses the OS `errno`, and so there should not be any task /// descheduling or migration (other than that performed by the /// operating system) between the call(s) for which errors are /// being checked and the call of this function. pub fn last_error() -> IoError { IoError::from_errno(os::errno() as uint, true) } fn from_rtio_error(err: rtio::IoError) -> IoError { let rtio::IoError { code, extra, detail } = err; let mut ioerr = IoError::from_errno(code, false); ioerr.detail = detail; ioerr.kind = match ioerr.kind { TimedOut if extra > 0 => ShortWrite(extra), k => k, }; return ioerr; } } impl fmt::Show for IoError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { match *self { IoError { kind: OtherIoError, desc: "unknown error", detail: Some(ref detail) } => write!(fmt, "{}", detail), IoError { detail: None, desc, .. } => write!(fmt, "{}", desc), IoError { detail: Some(ref detail), desc, .. } => write!(fmt, "{} ({})", desc, detail) } } } /// A list specifying general categories of I/O error. #[deriving(PartialEq, Clone, Show)] pub enum IoErrorKind { /// Any I/O error not part of this list. OtherIoError, /// The operation could not complete because end of file was reached. EndOfFile, /// The file was not found. FileNotFound, /// The file permissions disallowed access to this file. PermissionDenied, /// A network connection failed for some reason not specified in this list. ConnectionFailed, /// The network operation failed because the network connection was closed. Closed, /// The connection was refused by the remote server. ConnectionRefused, /// The connection was reset by the remote server. ConnectionReset, /// The connection was aborted (terminated) by the remote server. ConnectionAborted, /// The network operation failed because it was not connected yet. NotConnected, /// The operation failed because a pipe was closed. BrokenPipe, /// A file already existed with that name. PathAlreadyExists, /// No file exists at that location. PathDoesntExist, /// The path did not specify the type of file that this operation required. For example, /// attempting to copy a directory with the `fs::copy()` operation will fail with this error. MismatchedFileTypeForOperation, /// The operation temporarily failed (for example, because a signal was received), and retrying /// may succeed. ResourceUnavailable, /// No I/O functionality is available for this task. IoUnavailable, /// A parameter was incorrect in a way that caused an I/O error not part of this list. InvalidInput, /// The I/O operation's timeout expired, causing it to be canceled. TimedOut, /// This write operation failed to write all of its data. /// /// Normally the write() method on a Writer guarantees that all of its data /// has been written, but some operations may be terminated after only /// partially writing some data. An example of this is a timed out write /// which successfully wrote a known number of bytes, but bailed out after /// doing so. /// /// The payload contained as part of this variant is the number of bytes /// which are known to have been successfully written. ShortWrite(uint), /// The Reader returned 0 bytes from `read()` too many times. NoProgress, } /// A trait that lets you add a `detail` to an IoError easily trait UpdateIoError<T> { /// Returns an IoError with updated description and detail fn update_err(self, desc: &'static str, detail: |&IoError| -> String) -> Self; /// Returns an IoError with updated detail fn update_detail(self, detail: |&IoError| -> String) -> Self; /// Returns an IoError with update description fn update_desc(self, desc: &'static str) -> Self; } impl<T> UpdateIoError<T> for IoResult<T> { fn update_err(self, desc: &'static str, detail: |&IoError| -> String) -> IoResult<T> { self.map_err(|mut e| { let detail = detail(&e); e.desc = desc; e.detail = Some(detail); e }) } fn update_detail(self, detail: |&IoError| -> String) -> IoResult<T> { self.map_err(|mut e| { e.detail = Some(detail(&e)); e }) } fn update_desc(self, desc: &'static str) -> IoResult<T> { self.map_err(|mut e| { e.desc = desc; e }) } } static NO_PROGRESS_LIMIT: uint = 1000; /// A trait for objects which are byte-oriented streams. Readers are defined by /// one method, `read`. This function will block until data is available, /// filling in the provided buffer with any data read. /// /// Readers are intended to be composable with one another. Many objects /// throughout the I/O and related libraries take and provide types which /// implement the `Reader` trait. pub trait Reader { // Only method which need to get implemented for this trait /// Read bytes, up to the length of `buf` and place them in `buf`. /// Returns the number of bytes read. The number of bytes read may /// be less than the number requested, even 0. Returns `Err` on EOF. /// /// # Error /// /// If an error occurs during this I/O operation, then it is returned as /// `Err(IoError)`. Note that end-of-file is considered an error, and can be /// inspected for in the error's `kind` field. Also note that reading 0 /// bytes is not considered an error in all circumstances /// /// # Implementation Note /// /// When implementing this method on a new Reader, you are strongly encouraged /// not to return 0 if you can avoid it. fn read(&mut self, buf: &mut [u8]) -> IoResult<uint>; // Convenient helper methods based on the above methods /// Reads at least `min` bytes and places them in `buf`. /// Returns the number of bytes read. /// /// This will continue to call `read` until at least `min` bytes have been /// read. If `read` returns 0 too many times, `NoProgress` will be /// returned. /// /// # Error /// /// If an error occurs at any point, that error is returned, and no further /// bytes are read. fn read_at_least(&mut self, min: uint, buf: &mut [u8]) -> IoResult<uint> { if min > buf.len() { return Err(IoError { detail: Some("the buffer is too short".to_string()), ..standard_error(InvalidInput) }); } let mut read = 0; while read < min { let mut zeroes = 0; loop { match self.read(buf.mut_slice_from(read)) { Ok(0) => { zeroes += 1; if zeroes >= NO_PROGRESS_LIMIT { return Err(standard_error(NoProgress)); } } Ok(n) => { read += n; break; } err@Err(_) => return err } } } Ok(read) } /// Reads a single byte. Returns `Err` on EOF. fn read_byte(&mut self) -> IoResult<u8> { let mut buf = [0]; try!(self.read_at_least(1, buf)); Ok(buf[0]) } /// Reads up to `len` bytes and appends them to a vector. /// Returns the number of bytes read. The number of bytes read may be /// less than the number requested, even 0. Returns Err on EOF. /// /// # Error /// /// If an error occurs during this I/O operation, then it is returned /// as `Err(IoError)`. See `read()` for more details. fn push(&mut self, len: uint, buf: &mut Vec<u8>) -> IoResult<uint> { let start_len = buf.len(); buf.reserve_additional(len); let n = { let s = unsafe { slice_vec_capacity(buf, start_len, start_len + len) }; try!(self.read(s)) }; unsafe { buf.set_len(start_len + n) }; Ok(n) } /// Reads at least `min` bytes, but no more than `len`, and appends them to /// a vector. /// Returns the number of bytes read. /// /// This will continue to call `read` until at least `min` bytes have been /// read. If `read` returns 0 too many times, `NoProgress` will be /// returned. /// /// # Error /// /// If an error occurs at any point, that error is returned, and no further /// bytes are read. fn push_at_least(&mut self, min: uint, len: uint, buf: &mut Vec<u8>) -> IoResult<uint> { if min > len { return Err(IoError { detail: Some("the buffer is too short".to_string()), ..standard_error(InvalidInput) }); } let start_len = buf.len(); buf.reserve_additional(len); // we can't just use self.read_at_least(min, slice) because we need to push // successful reads onto the vector before any returned errors. let mut read = 0; while read < min { read += { let s = unsafe { slice_vec_capacity(buf, start_len + read, start_len + len) }; try!(self.read_at_least(1, s)) }; unsafe { buf.set_len(start_len + read) }; } Ok(read) } /// Reads exactly `len` bytes and gives you back a new vector of length /// `len` /// /// # Error /// /// Fails with the same conditions as `read`. Additionally returns error /// on EOF. Note that if an error is returned, then some number of bytes may /// have already been consumed from the underlying reader, and they are lost /// (not returned as part of the error). If this is unacceptable, then it is /// recommended to use the `push_at_least` or `read` methods. fn read_exact(&mut self, len: uint) -> IoResult<Vec<u8>> { let mut buf = Vec::with_capacity(len); match self.push_at_least(len, len, &mut buf) { Ok(_) => Ok(buf), Err(e) => Err(e), } } /// Reads all remaining bytes from the stream. /// /// # Error /// /// Returns any non-EOF error immediately. Previously read bytes are /// discarded when an error is returned. /// /// When EOF is encountered, all bytes read up to that point are returned. fn read_to_end(&mut self) -> IoResult<Vec<u8>> { let mut buf = Vec::with_capacity(DEFAULT_BUF_SIZE); loop { match self.push_at_least(1, DEFAULT_BUF_SIZE, &mut buf) { Ok(_) => {} Err(ref e) if e.kind == EndOfFile => break, Err(e) => return Err(e) } } return Ok(buf); } /// Reads all of the remaining bytes of this stream, interpreting them as a /// UTF-8 encoded stream. The corresponding string is returned. /// /// # Error /// /// This function returns all of the same errors as `read_to_end` with an /// additional error if the reader's contents are not a valid sequence of /// UTF-8 bytes. fn read_to_str(&mut self) -> IoResult<String> { self.read_to_end().and_then(|s| { match str::from_utf8(s.as_slice()) { Some(s) => Ok(s.to_string()), None => Err(standard_error(InvalidInput)), } }) } /// Create an iterator that reads a single byte on /// each iteration, until EOF. /// /// # Error /// /// Any error other than `EndOfFile` that is produced by the underlying Reader /// is returned by the iterator and should be handled by the caller. fn bytes<'r>(&'r mut self) -> extensions::Bytes<'r, Self> { extensions::Bytes::new(self) } // Byte conversion helpers /// Reads `n` little-endian unsigned integer bytes. /// /// `n` must be between 1 and 8, inclusive. fn read_le_uint_n(&mut self, nbytes: uint) -> IoResult<u64> { assert!(nbytes > 0 && nbytes <= 8); let mut val = 0u64; let mut pos = 0; let mut i = nbytes; while i > 0 { val += (try!(self.read_u8()) as u64) << pos; pos += 8; i -= 1; } Ok(val) } /// Reads `n` little-endian signed integer bytes. /// /// `n` must be between 1 and 8, inclusive. fn read_le_int_n(&mut self, nbytes: uint) -> IoResult<i64> { self.read_le_uint_n(nbytes).map(|i| extend_sign(i, nbytes)) } /// Reads `n` big-endian unsigned integer bytes. /// /// `n` must be between 1 and 8, inclusive. fn read_be_uint_n(&mut self, nbytes: uint) -> IoResult<u64> { assert!(nbytes > 0 && nbytes <= 8); let mut val = 0u64; let mut i = nbytes; while i > 0 { i -= 1; val += (try!(self.read_u8()) as u64) << i * 8; } Ok(val) } /// Reads `n` big-endian signed integer bytes. /// /// `n` must be between 1 and 8, inclusive. fn read_be_int_n(&mut self, nbytes: uint) -> IoResult<i64> { self.read_be_uint_n(nbytes).map(|i| extend_sign(i, nbytes)) } /// Reads a little-endian unsigned integer. /// /// The number of bytes returned is system-dependent. fn read_le_uint(&mut self) -> IoResult<uint> { self.read_le_uint_n(uint::BYTES).map(|i| i as uint) } /// Reads a little-endian integer. /// /// The number of bytes returned is system-dependent. fn read_le_int(&mut self) -> IoResult<int> { self.read_le_int_n(int::BYTES).map(|i| i as int) } /// Reads a big-endian unsigned integer. /// /// The number of bytes returned is system-dependent. fn read_be_uint(&mut self) -> IoResult<uint> { self.read_be_uint_n(uint::BYTES).map(|i| i as uint) } /// Reads a big-endian integer. /// /// The number of bytes returned is system-dependent. fn read_be_int(&mut self) -> IoResult<int> { self.read_be_int_n(int::BYTES).map(|i| i as int) } /// Reads a big-endian `u64`. /// /// `u64`s are 8 bytes long. fn read_be_u64(&mut self) -> IoResult<u64> { self.read_be_uint_n(8) } /// Reads a big-endian `u32`. /// /// `u32`s are 4 bytes long. fn read_be_u32(&mut self) -> IoResult<u32> { self.read_be_uint_n(4).map(|i| i as u32) } /// Reads a big-endian `u16`. /// /// `u16`s are 2 bytes long. fn read_be_u16(&mut self) -> IoResult<u16> { self.read_be_uint_n(2).map(|i| i as u16) } /// Reads a big-endian `i64`. /// /// `i64`s are 8 bytes long. fn read_be_i64(&mut self) -> IoResult<i64> { self.read_be_int_n(8) } /// Reads a big-endian `i32`. /// /// `i32`s are 4 bytes long. fn read_be_i32(&mut self) -> IoResult<i32> { self.read_be_int_n(4).map(|i| i as i32) } /// Reads a big-endian `i16`. /// /// `i16`s are 2 bytes long. fn read_be_i16(&mut self) -> IoResult<i16> { self.read_be_int_n(2).map(|i| i as i16) } /// Reads a big-endian `f64`. /// /// `f64`s are 8 byte, IEEE754 double-precision floating point numbers. fn read_be_f64(&mut self) -> IoResult<f64> { self.read_be_u64().map(|i| unsafe { transmute::<u64, f64>(i) }) } /// Reads a big-endian `f32`. /// /// `f32`s are 4 byte, IEEE754 single-precision floating point numbers. fn read_be_f32(&mut self) -> IoResult<f32> { self.read_be_u32().map(|i| unsafe { transmute::<u32, f32>(i) }) } /// Reads a little-endian `u64`. /// /// `u64`s are 8 bytes long. fn read_le_u64(&mut self) -> IoResult<u64> { self.read_le_uint_n(8) } /// Reads a little-endian `u32`. /// /// `u32`s are 4 bytes long. fn read_le_u32(&mut self) -> IoResult<u32> { self.read_le_uint_n(4).map(|i| i as u32) } /// Reads a little-endian `u16`. /// /// `u16`s are 2 bytes long. fn read_le_u16(&mut self) -> IoResult<u16> { self.read_le_uint_n(2).map(|i| i as u16) } /// Reads a little-endian `i64`. /// /// `i64`s are 8 bytes long. fn read_le_i64(&mut self) -> IoResult<i64> { self.read_le_int_n(8) } /// Reads a little-endian `i32`. /// /// `i32`s are 4 bytes long. fn read_le_i32(&mut self) -> IoResult<i32> { self.read_le_int_n(4).map(|i| i as i32) } /// Reads a little-endian `i16`. /// /// `i16`s are 2 bytes long. fn read_le_i16(&mut self) -> IoResult<i16> { self.read_le_int_n(2).map(|i| i as i16) } /// Reads a little-endian `f64`. /// /// `f64`s are 8 byte, IEEE754 double-precision floating point numbers. fn read_le_f64(&mut self) -> IoResult<f64> { self.read_le_u64().map(|i| unsafe { transmute::<u64, f64>(i) }) } /// Reads a little-endian `f32`. /// /// `f32`s are 4 byte, IEEE754 single-precision floating point numbers. fn read_le_f32(&mut self) -> IoResult<f32> { self.read_le_u32().map(|i| unsafe { transmute::<u32, f32>(i) }) } /// Read a u8. /// /// `u8`s are 1 byte. fn read_u8(&mut self) -> IoResult<u8> { self.read_byte() } /// Read an i8. /// /// `i8`s are 1 byte. fn read_i8(&mut self) -> IoResult<i8> { self.read_byte().map(|i| i as i8) } /// Creates a wrapper around a mutable reference to the reader. /// /// This is useful to allow applying adaptors while still /// retaining ownership of the original value. fn by_ref<'a>(&'a mut self) -> RefReader<'a, Self> { RefReader { inner: self } } } impl Reader for Box<Reader> { fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> { self.read(buf) } } impl<'a> Reader for &'a mut Reader { fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> { self.read(buf) } } /// Returns a slice of `v` between `start` and `end`. /// /// Similar to `slice()` except this function only bounds the slice on the /// capacity of `v`, not the length. /// /// # Failure /// /// Fails when `start` or `end` point outside the capacity of `v`, or when /// `start` > `end`. // Private function here because we aren't sure if we want to expose this as // API yet. If so, it should be a method on Vec. unsafe fn slice_vec_capacity<'a, T>(v: &'a mut Vec<T>, start: uint, end: uint) -> &'a mut [T] { use raw::Slice; use ptr::RawPtr; assert!(start <= end); assert!(end <= v.capacity()); transmute(Slice { data: v.as_ptr().offset(start as int), len: end - start }) } /// A `RefReader` is a struct implementing `Reader` which contains a reference /// to another reader. This is often useful when composing streams. /// /// # Example /// /// ``` /// # fn main() {} /// # fn process_input<R: Reader>(r: R) {} /// # fn foo() { /// use std::io; /// use std::io::util::LimitReader; /// /// let mut stream = io::stdin(); /// /// // Only allow the function to process at most one kilobyte of input /// { /// let stream = LimitReader::new(stream.by_ref(), 1024); /// process_input(stream); /// } /// /// // 'stream' is still available for use here /// /// # } /// ``` pub struct RefReader<'a, R> { /// The underlying reader which this is referencing inner: &'a mut R } impl<'a, R: Reader> Reader for RefReader<'a, R> { fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> { self.inner.read(buf) } } impl<'a, R: Buffer> Buffer for RefReader<'a, R> { fn fill_buf<'a>(&'a mut self) -> IoResult<&'a [u8]> { self.inner.fill_buf() } fn consume(&mut self, amt: uint) { self.inner.consume(amt) } } fn extend_sign(val: u64, nbytes: uint) -> i64 { let shift = (8 - nbytes) * 8; (val << shift) as i64 >> shift } /// A trait for objects which are byte-oriented streams. Writers are defined by /// one method, `write`. This function will block until the provided buffer of /// bytes has been entirely written, and it will return any failures which occur. /// /// Another commonly overridden method is the `flush` method for writers such as /// buffered writers. /// /// Writers are intended to be composable with one another. Many objects /// throughout the I/O and related libraries take and provide types which /// implement the `Writer` trait. pub trait Writer { /// Write the entirety of a given buffer /// /// # Errors /// /// If an error happens during the I/O operation, the error is returned as /// `Err`. Note that it is considered an error if the entire buffer could /// not be written, and if an error is returned then it is unknown how much /// data (if any) was actually written. fn write(&mut self, buf: &[u8]) -> IoResult<()>; /// Flush this output stream, ensuring that all intermediately buffered /// contents reach their destination. /// /// This is by default a no-op and implementers of the `Writer` trait should /// decide whether their stream needs to be buffered or not. fn flush(&mut self) -> IoResult<()> { Ok(()) } /// Writes a formatted string into this writer, returning any error /// encountered. /// /// This method is primarily used to interface with the `format_args!` /// macro, but it is rare that this should explicitly be called. The /// `write!` macro should be favored to invoke this method instead. /// /// # Errors /// /// This function will return any I/O error reported while formatting. fn write_fmt(&mut self, fmt: &fmt::Arguments) -> IoResult<()> { // Create a shim which translates a Writer to a FormatWriter and saves // off I/O errors. instead of discarding them struct Adaptor<'a, T> { inner: &'a mut T, error: IoResult<()>, } impl<'a, T: Writer> fmt::FormatWriter for Adaptor<'a, T> { fn write(&mut self, bytes: &[u8]) -> fmt::Result { match self.inner.write(bytes) { Ok(()) => Ok(()), Err(e) => { self.error = Err(e); Err(fmt::WriteError) } } } } let mut output = Adaptor { inner: self, error: Ok(()) }; match fmt::write(&mut output, fmt) { Ok(()) => Ok(()), Err(..) => output.error } } /// Write a rust string into this sink. /// /// The bytes written will be the UTF-8 encoded version of the input string. /// If other encodings are desired, it is recommended to compose this stream /// with another performing the conversion, or to use `write` with a /// converted byte-array instead. #[inline] fn write_str(&mut self, s: &str) -> IoResult<()> { self.write(s.as_bytes()) } /// Writes a string into this sink, and then writes a literal newline (`\n`) /// byte afterwards. Note that the writing of the newline is *not* atomic in /// the sense that the call to `write` is invoked twice (once with the /// string and once with a newline character). /// /// If other encodings or line ending flavors are desired, it is recommended /// that the `write` method is used specifically instead. #[inline] fn write_line(&mut self, s: &str) -> IoResult<()> { self.write_str(s).and_then(|()| self.write(['\n' as u8])) } /// Write a single char, encoded as UTF-8. #[inline] fn write_char(&mut self, c: char) -> IoResult<()> { let mut buf = [0u8, ..4]; let n = c.encode_utf8(buf.as_mut_slice()); self.write(buf.slice_to(n)) } /// Write the result of passing n through `int::to_str_bytes`. #[inline] fn write_int(&mut self, n: int) -> IoResult<()> { write!(self, "{:d}", n) } /// Write the result of passing n through `uint::to_str_bytes`. #[inline] fn write_uint(&mut self, n: uint) -> IoResult<()> { write!(self, "{:u}", n) } /// Write a little-endian uint (number of bytes depends on system). #[inline] fn write_le_uint(&mut self, n: uint) -> IoResult<()> { extensions::u64_to_le_bytes(n as u64, uint::BYTES, |v| self.write(v)) } /// Write a little-endian int (number of bytes depends on system). #[inline] fn write_le_int(&mut self, n: int) -> IoResult<()> { extensions::u64_to_le_bytes(n as u64, int::BYTES, |v| self.write(v)) } /// Write a big-endian uint (number of bytes depends on system). #[inline] fn write_be_uint(&mut self, n: uint) -> IoResult<()> { extensions::u64_to_be_bytes(n as u64, uint::BYTES, |v| self.write(v)) } /// Write a big-endian int (number of bytes depends on system). #[inline] fn write_be_int(&mut self, n: int) -> IoResult<()> { extensions::u64_to_be_bytes(n as u64, int::BYTES, |v| self.write(v)) } /// Write a big-endian u64 (8 bytes). #[inline] fn write_be_u64(&mut self, n: u64) -> IoResult<()> { extensions::u64_to_be_bytes(n, 8u, |v| self.write(v)) } /// Write a big-endian u32 (4 bytes). #[inline] fn write_be_u32(&mut self, n: u32) -> IoResult<()> { extensions::u64_to_be_bytes(n as u64, 4u, |v| self.write(v)) } /// Write a big-endian u16 (2 bytes). #[inline] fn write_be_u16(&mut self, n: u16) -> IoResult<()> { extensions::u64_to_be_bytes(n as u64, 2u, |v| self.write(v)) } /// Write a big-endian i64 (8 bytes). #[inline] fn write_be_i64(&mut self, n: i64) -> IoResult<()> { extensions::u64_to_be_bytes(n as u64, 8u, |v| self.write(v)) } /// Write a big-endian i32 (4 bytes). #[inline] fn write_be_i32(&mut self, n: i32) -> IoResult<()> { extensions::u64_to_be_bytes(n as u64, 4u, |v| self.write(v)) } /// Write a big-endian i16 (2 bytes). #[inline] fn write_be_i16(&mut self, n: i16) -> IoResult<()> { extensions::u64_to_be_bytes(n as u64, 2u, |v| self.write(v)) } /// Write a big-endian IEEE754 double-precision floating-point (8 bytes). #[inline] fn write_be_f64(&mut self, f: f64) -> IoResult<()> { unsafe { self.write_be_u64(transmute(f)) } } /// Write a big-endian IEEE754 single-precision floating-point (4 bytes). #[inline] fn write_be_f32(&mut self, f: f32) -> IoResult<()> { unsafe { self.write_be_u32(transmute(f)) } } /// Write a little-endian u64 (8 bytes). #[inline] fn write_le_u64(&mut self, n: u64) -> IoResult<()> { extensions::u64_to_le_bytes(n, 8u, |v| self.write(v)) } /// Write a little-endian u32 (4 bytes). #[inline] fn write_le_u32(&mut self, n: u32) -> IoResult<()> { extensions::u64_to_le_bytes(n as u64, 4u, |v| self.write(v)) } /// Write a little-endian u16 (2 bytes). #[inline] fn write_le_u16(&mut self, n: u16) -> IoResult<()> { extensions::u64_to_le_bytes(n as u64, 2u, |v| self.write(v)) } /// Write a little-endian i64 (8 bytes). #[inline] fn write_le_i64(&mut self, n: i64) -> IoResult<()> { extensions::u64_to_le_bytes(n as u64, 8u, |v| self.write(v)) } /// Write a little-endian i32 (4 bytes). #[inline] fn write_le_i32(&mut self, n: i32) -> IoResult<()> { extensions::u64_to_le_bytes(n as u64, 4u, |v| self.write(v)) } /// Write a little-endian i16 (2 bytes). #[inline] fn write_le_i16(&mut self, n: i16) -> IoResult<()> { extensions::u64_to_le_bytes(n as u64, 2u, |v| self.write(v)) } /// Write a little-endian IEEE754 double-precision floating-point /// (8 bytes). #[inline] fn write_le_f64(&mut self, f: f64) -> IoResult<()> { unsafe { self.write_le_u64(transmute(f)) } } /// Write a little-endian IEEE754 single-precision floating-point /// (4 bytes). #[inline] fn write_le_f32(&mut self, f: f32) -> IoResult<()> { unsafe { self.write_le_u32(transmute(f)) } } /// Write a u8 (1 byte). #[inline] fn write_u8(&mut self, n: u8) -> IoResult<()> { self.write([n]) } /// Write an i8 (1 byte). #[inline] fn write_i8(&mut self, n: i8) -> IoResult<()> { self.write([n as u8]) } /// Creates a wrapper around a mutable reference to the writer. /// /// This is useful to allow applying wrappers while still /// retaining ownership of the original value. #[inline] fn by_ref<'a>(&'a mut self) -> RefWriter<'a, Self> { RefWriter { inner: self } } } impl Writer for Box<Writer> { #[inline] fn write(&mut self, buf: &[u8]) -> IoResult<()> { self.write(buf) } #[inline] fn flush(&mut self) -> IoResult<()> { self.flush() } } impl<'a> Writer for &'a mut Writer { #[inline] fn write(&mut self, buf: &[u8]) -> IoResult<()> { self.write(buf) } #[inline] fn flush(&mut self) -> IoResult<()> { self.flush() } } /// A `RefWriter` is a struct implementing `Writer` which contains a reference /// to another writer. This is often useful when composing streams. /// /// # Example /// /// ``` /// # fn main() {} /// # fn process_input<R: Reader>(r: R) {} /// # fn foo () { /// use std::io::util::TeeReader; /// use std::io::{stdin, MemWriter}; /// /// let mut output = MemWriter::new(); /// /// { /// // Don't give ownership of 'output' to the 'tee'. Instead we keep a /// // handle to it in the outer scope /// let mut tee = TeeReader::new(stdin(), output.by_ref()); /// process_input(tee); /// } /// /// println!("input processed: {}", output.unwrap()); /// # } /// ``` pub struct RefWriter<'a, W> { /// The underlying writer which this is referencing inner: &'a mut W } impl<'a, W: Writer> Writer for RefWriter<'a, W> { #[inline] fn write(&mut self, buf: &[u8]) -> IoResult<()> { self.inner.write(buf) } #[inline] fn flush(&mut self) -> IoResult<()> { self.inner.flush() } } /// A Stream is a readable and a writable object. Data written is typically /// received by the object which reads receive data from. pub trait Stream: Reader + Writer { } impl<T: Reader + Writer> Stream for T {} /// An iterator that reads a line on each iteration, /// until `.read_line()` encounters `EndOfFile`. /// /// # Notes about the Iteration Protocol /// /// The `Lines` may yield `None` and thus terminate /// an iteration, but continue to yield elements if iteration /// is attempted again. /// /// # Error /// /// Any error other than `EndOfFile` that is produced by the underlying Reader /// is returned by the iterator and should be handled by the caller. pub struct Lines<'r, T> { buffer: &'r mut T, } impl<'r, T: Buffer> Iterator<IoResult<String>> for Lines<'r, T> { fn next(&mut self) -> Option<IoResult<String>> { match self.buffer.read_line() { Ok(x) => Some(Ok(x)), Err(IoError { kind: EndOfFile, ..}) => None, Err(y) => Some(Err(y)) } } } /// An iterator that reads a utf8-encoded character on each iteration, /// until `.read_char()` encounters `EndOfFile`. /// /// # Notes about the Iteration Protocol /// /// The `Chars` may yield `None` and thus terminate /// an iteration, but continue to yield elements if iteration /// is attempted again. /// /// # Error /// /// Any error other than `EndOfFile` that is produced by the underlying Reader /// is returned by the iterator and should be handled by the caller. pub struct Chars<'r, T> { buffer: &'r mut T } impl<'r, T: Buffer> Iterator<IoResult<char>> for Chars<'r, T> { fn next(&mut self) -> Option<IoResult<char>> { match self.buffer.read_char() { Ok(x) => Some(Ok(x)), Err(IoError { kind: EndOfFile, ..}) => None, Err(y) => Some(Err(y)) } } } /// A Buffer is a type of reader which has some form of internal buffering to /// allow certain kinds of reading operations to be more optimized than others. /// This type extends the `Reader` trait with a few methods that are not /// possible to reasonably implement with purely a read interface. pub trait Buffer: Reader { /// Fills the internal buffer of this object, returning the buffer contents. /// Note that none of the contents will be "read" in the sense that later /// calling `read` may return the same contents. /// /// The `consume` function must be called with the number of bytes that are /// consumed from this buffer returned to ensure that the bytes are never /// returned twice. /// /// # Error /// /// This function will return an I/O error if the underlying reader was /// read, but returned an error. Note that it is not an error to return a /// 0-length buffer. fn fill_buf<'a>(&'a mut self) -> IoResult<&'a [u8]>; /// Tells this buffer that `amt` bytes have been consumed from the buffer, /// so they should no longer be returned in calls to `read`. fn consume(&mut self, amt: uint); /// Reads the next line of input, interpreted as a sequence of UTF-8 /// encoded unicode codepoints. If a newline is encountered, then the /// newline is contained in the returned string. /// /// # Example /// /// ```rust /// use std::io; /// /// let mut reader = io::stdin(); /// let input = reader.read_line().ok().unwrap_or("nothing".to_string()); /// ``` /// /// # Error /// /// This function has the same error semantics as `read_until`: /// /// * All non-EOF errors will be returned immediately /// * If an error is returned previously consumed bytes are lost /// * EOF is only returned if no bytes have been read /// * Reach EOF may mean that the delimiter is not present in the return /// value /// /// Additionally, this function can fail if the line of input read is not a /// valid UTF-8 sequence of bytes. fn read_line(&mut self) -> IoResult<String> { self.read_until('\n' as u8).and_then(|line| match str::from_utf8(line.as_slice()) { Some(s) => Ok(s.to_string()), None => Err(standard_error(InvalidInput)), } ) } /// Create an iterator that reads a line on each iteration until EOF. /// /// # Error /// /// Any error other than `EndOfFile` that is produced by the underlying Reader /// is returned by the iterator and should be handled by the caller. fn lines<'r>(&'r mut self) -> Lines<'r, Self> { Lines { buffer: self } } /// Reads a sequence of bytes leading up to a specified delimiter. Once the /// specified byte is encountered, reading ceases and the bytes up to and /// including the delimiter are returned. /// /// # Error /// /// If any I/O error is encountered other than EOF, the error is immediately /// returned. Note that this may discard bytes which have already been read, /// and those bytes will *not* be returned. It is recommended to use other /// methods if this case is worrying. /// /// If EOF is encountered, then this function will return EOF if 0 bytes /// have been read, otherwise the pending byte buffer is returned. This /// is the reason that the byte buffer returned may not always contain the /// delimiter. fn read_until(&mut self, byte: u8) -> IoResult<Vec<u8>> { let mut res = Vec::new(); let mut used; loop { { let available = match self.fill_buf() { Ok(n) => n, Err(ref e) if res.len() > 0 && e.kind == EndOfFile => { used = 0; break } Err(e) => return Err(e) }; match available.iter().position(|&b| b == byte) { Some(i) => { res.push_all(available.slice_to(i + 1)); used = i + 1; break } None => { res.push_all(available); used = available.len(); } } } self.consume(used); } self.consume(used); Ok(res) } /// Reads the next utf8-encoded character from the underlying stream. /// /// # Error /// /// If an I/O error occurs, or EOF, then this function will return `Err`. /// This function will also return error if the stream does not contain a /// valid utf-8 encoded codepoint as the next few bytes in the stream. fn read_char(&mut self) -> IoResult<char> { let first_byte = try!(self.read_byte()); let width = str::utf8_char_width(first_byte); if width == 1 { return Ok(first_byte as char) } if width == 0 { return Err(standard_error(InvalidInput)) } // not utf8 let mut buf = [first_byte, 0, 0, 0]; { let mut start = 1; while start < width { match try!(self.read(buf.mut_slice(start, width))) { n if n == width - start => break, n if n < width - start => { start += n; } _ => return Err(standard_error(InvalidInput)), } } } match str::from_utf8(buf.slice_to(width)) { Some(s) => Ok(s.char_at(0)), None => Err(standard_error(InvalidInput)) } } /// Create an iterator that reads a utf8-encoded character on each iteration /// until EOF. /// /// # Error /// /// Any error other than `EndOfFile` that is produced by the underlying Reader /// is returned by the iterator and should be handled by the caller. fn chars<'r>(&'r mut self) -> Chars<'r, Self> { Chars { buffer: self } } } /// When seeking, the resulting cursor is offset from a base by the offset given /// to the `seek` function. The base used is specified by this enumeration. pub enum SeekStyle { /// Seek from the beginning of the stream SeekSet, /// Seek from the end of the stream SeekEnd, /// Seek from the current position SeekCur, } /// An object implementing `Seek` internally has some form of cursor which can /// be moved within a stream of bytes. The stream typically has a fixed size, /// allowing seeking relative to either end. pub trait Seek { /// Return position of file cursor in the stream fn tell(&self) -> IoResult<u64>; /// Seek to an offset in a stream /// /// A successful seek clears the EOF indicator. Seeking beyond EOF is /// allowed, but seeking before position 0 is not allowed. /// /// # Errors /// /// * Seeking to a negative offset is considered an error /// * Seeking past the end of the stream does not modify the underlying /// stream, but the next write may cause the previous data to be filled in /// with a bit pattern. fn seek(&mut self, pos: i64, style: SeekStyle) -> IoResult<()>; } /// A listener is a value that can consume itself to start listening for /// connections. /// /// Doing so produces some sort of Acceptor. pub trait Listener<T, A: Acceptor<T>> { /// Spin up the listener and start queuing incoming connections /// /// # Error /// /// Returns `Err` if this listener could not be bound to listen for /// connections. In all cases, this listener is consumed. fn listen(self) -> IoResult<A>; } /// An acceptor is a value that presents incoming connections pub trait Acceptor<T> { /// Wait for and accept an incoming connection /// /// # Error /// /// Returns `Err` if an I/O error is encountered. fn accept(&mut self) -> IoResult<T>; /// Create an iterator over incoming connection attempts. /// /// Note that I/O errors will be yielded by the iterator itself. fn incoming<'r>(&'r mut self) -> IncomingConnections<'r, Self> { IncomingConnections { inc: self } } } /// An infinite iterator over incoming connection attempts. /// Calling `next` will block the task until a connection is attempted. /// /// Since connection attempts can continue forever, this iterator always returns /// `Some`. The `Some` contains the `IoResult` representing whether the /// connection attempt was successful. A successful connection will be wrapped /// in `Ok`. A failed connection is represented as an `Err`. pub struct IncomingConnections<'a, A> { inc: &'a mut A, } impl<'a, T, A: Acceptor<T>> Iterator<IoResult<T>> for IncomingConnections<'a, A> { fn next(&mut self) -> Option<IoResult<T>> { Some(self.inc.accept()) } } /// Creates a standard error for a commonly used flavor of error. The `detail` /// field of the returned error will always be `None`. /// /// # Example /// /// ``` /// use std::io; /// /// let eof = io::standard_error(io::EndOfFile); /// let einval = io::standard_error(io::InvalidInput); /// ``` pub fn standard_error(kind: IoErrorKind) -> IoError { let desc = match kind { EndOfFile => "end of file", IoUnavailable => "I/O is unavailable", InvalidInput => "invalid input", OtherIoError => "unknown I/O error", FileNotFound => "file not found", PermissionDenied => "permission denied", ConnectionFailed => "connection failed", Closed => "stream is closed", ConnectionRefused => "connection refused", ConnectionReset => "connection reset", ConnectionAborted => "connection aborted", NotConnected => "not connected", BrokenPipe => "broken pipe", PathAlreadyExists => "file already exists", PathDoesntExist => "no such file", MismatchedFileTypeForOperation => "mismatched file type", ResourceUnavailable => "resource unavailable", TimedOut => "operation timed out", ShortWrite(..) => "short write", NoProgress => "no progress", }; IoError { kind: kind, desc: desc, detail: None, } } /// A mode specifies how a file should be opened or created. These modes are /// passed to `File::open_mode` and are used to control where the file is /// positioned when it is initially opened. pub enum FileMode { /// Opens a file positioned at the beginning. Open, /// Opens a file positioned at EOF. Append, /// Opens a file, truncating it if it already exists. Truncate, } /// Access permissions with which the file should be opened. `File`s /// opened with `Read` will return an error if written to. pub enum FileAccess { /// Read-only access, requests to write will result in an error Read, /// Write-only access, requests to read will result in an error Write, /// Read-write access, no requests are denied by default ReadWrite, } /// Different kinds of files which can be identified by a call to stat #[deriving(PartialEq, Show, Hash)] pub enum FileType { /// This is a normal file, corresponding to `S_IFREG` TypeFile, /// This file is a directory, corresponding to `S_IFDIR` TypeDirectory, /// This file is a named pipe, corresponding to `S_IFIFO` TypeNamedPipe, /// This file is a block device, corresponding to `S_IFBLK` TypeBlockSpecial, /// This file is a symbolic link to another file, corresponding to `S_IFLNK` TypeSymlink, /// The type of this file is not recognized as one of the other categories TypeUnknown, } /// A structure used to describe metadata information about a file. This /// structure is created through the `stat` method on a `Path`. /// /// # Example /// /// ``` /// # fn main() {} /// # fn foo() { /// let info = match Path::new("foo.txt").stat() { /// Ok(stat) => stat, /// Err(e) => fail!("couldn't read foo.txt: {}", e), /// }; /// /// println!("byte size: {}", info.size); /// # } /// ``` #[deriving(Hash)] pub struct FileStat { /// The size of the file, in bytes pub size: u64, /// The kind of file this path points to (directory, file, pipe, etc.) pub kind: FileType, /// The file permissions currently on the file pub perm: FilePermission, // FIXME(#10301): These time fields are pretty useless without an actual // time representation, what are the milliseconds relative // to? /// The time that the file was created at, in platform-dependent /// milliseconds pub created: u64, /// The time that this file was last modified, in platform-dependent /// milliseconds pub modified: u64, /// The time that this file was last accessed, in platform-dependent /// milliseconds pub accessed: u64, /// Information returned by stat() which is not guaranteed to be /// platform-independent. This information may be useful on some platforms, /// but it may have different meanings or no meaning at all on other /// platforms. /// /// Usage of this field is discouraged, but if access is desired then the /// fields are located here. #[unstable] pub unstable: UnstableFileStat, } /// This structure represents all of the possible information which can be /// returned from a `stat` syscall which is not contained in the `FileStat` /// structure. This information is not necessarily platform independent, and may /// have different meanings or no meaning at all on some platforms. #[unstable] #[deriving(Hash)] pub struct UnstableFileStat { /// The ID of the device containing the file. pub device: u64, /// The file serial number. pub inode: u64, /// The device ID. pub rdev: u64, /// The number of hard links to this file. pub nlink: u64, /// The user ID of the file. pub uid: u64, /// The group ID of the file. pub gid: u64, /// The optimal block size for I/O. pub blksize: u64, /// The blocks allocated for this file. pub blocks: u64, /// User-defined flags for the file. pub flags: u64, /// The file generation number. pub gen: u64, } bitflags!( #[doc="A set of permissions for a file or directory is represented by a set of flags which are or'd together."] #[deriving(Hash)] #[deriving(Show)] flags FilePermission: u32 { static UserRead = 0o400, static UserWrite = 0o200, static UserExecute = 0o100, static GroupRead = 0o040, static GroupWrite = 0o020, static GroupExecute = 0o010, static OtherRead = 0o004, static OtherWrite = 0o002, static OtherExecute = 0o001, static UserRWX = UserRead.bits | UserWrite.bits | UserExecute.bits, static GroupRWX = GroupRead.bits | GroupWrite.bits | GroupExecute.bits, static OtherRWX = OtherRead.bits | OtherWrite.bits | OtherExecute.bits, #[doc="Permissions for user owned files, equivalent to 0644 on unix-like systems."] static UserFile = UserRead.bits | UserWrite.bits | GroupRead.bits | OtherRead.bits, #[doc="Permissions for user owned directories, equivalent to 0755 on unix-like systems."] static UserDir = UserRWX.bits | GroupRead.bits | GroupExecute.bits | OtherRead.bits | OtherExecute.bits, #[doc="Permissions for user owned executables, equivalent to 0755 on unix-like systems."] static UserExec = UserDir.bits, #[doc="All possible permissions enabled."] static AllPermissions = UserRWX.bits | GroupRWX.bits | OtherRWX.bits } ) #[cfg(test)] mod tests { use super::{IoResult, Reader, MemReader, NoProgress, InvalidInput}; use prelude::*; use uint; #[deriving(Clone, PartialEq, Show)] enum BadReaderBehavior { GoodBehavior(uint), BadBehavior(uint) } struct BadReader<T> { r: T, behavior: Vec<BadReaderBehavior>, } impl<T: Reader> BadReader<T> { fn new(r: T, behavior: Vec<BadReaderBehavior>) -> BadReader<T> { BadReader { behavior: behavior, r: r } } } impl<T: Reader> Reader for BadReader<T> { fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> { let BadReader { ref mut behavior, ref mut r } = *self; loop { if behavior.is_empty() { // fall back on good return r.read(buf); } match behavior.as_mut_slice()[0] { GoodBehavior(0) => (), GoodBehavior(ref mut x) => { *x -= 1; return r.read(buf); } BadBehavior(0) => (), BadBehavior(ref mut x) => { *x -= 1; return Ok(0); } }; behavior.shift(); } } } #[test] fn test_read_at_least() { let mut r = BadReader::new(MemReader::new(Vec::from_slice(b"hello, world!")), Vec::from_slice([GoodBehavior(uint::MAX)])); let mut buf = [0u8, ..5]; assert!(r.read_at_least(1, buf).unwrap() >= 1); assert!(r.read_exact(5).unwrap().len() == 5); // read_exact uses read_at_least assert!(r.read_at_least(0, buf).is_ok()); let mut r = BadReader::new(MemReader::new(Vec::from_slice(b"hello, world!")), Vec::from_slice([BadBehavior(50), GoodBehavior(uint::MAX)])); assert!(r.read_at_least(1, buf).unwrap() >= 1); let mut r = BadReader::new(MemReader::new(Vec::from_slice(b"hello, world!")), Vec::from_slice([BadBehavior(1), GoodBehavior(1), BadBehavior(50), GoodBehavior(uint::MAX)])); assert!(r.read_at_least(1, buf).unwrap() >= 1); assert!(r.read_at_least(1, buf).unwrap() >= 1); let mut r = BadReader::new(MemReader::new(Vec::from_slice(b"hello, world!")), Vec::from_slice([BadBehavior(uint::MAX)])); assert_eq!(r.read_at_least(1, buf).unwrap_err().kind, NoProgress); let mut r = MemReader::new(Vec::from_slice(b"hello, world!")); assert_eq!(r.read_at_least(5, buf).unwrap(), 5); assert_eq!(r.read_at_least(6, buf).unwrap_err().kind, InvalidInput); } #[test] fn test_push_at_least() { let mut r = BadReader::new(MemReader::new(Vec::from_slice(b"hello, world!")), Vec::from_slice([GoodBehavior(uint::MAX)])); let mut buf = Vec::new(); assert!(r.push_at_least(1, 5, &mut buf).unwrap() >= 1); assert!(r.push_at_least(0, 5, &mut buf).is_ok()); let mut r = BadReader::new(MemReader::new(Vec::from_slice(b"hello, world!")), Vec::from_slice([BadBehavior(50), GoodBehavior(uint::MAX)])); assert!(r.push_at_least(1, 5, &mut buf).unwrap() >= 1); let mut r = BadReader::new(MemReader::new(Vec::from_slice(b"hello, world!")), Vec::from_slice([BadBehavior(1), GoodBehavior(1), BadBehavior(50), GoodBehavior(uint::MAX)])); assert!(r.push_at_least(1, 5, &mut buf).unwrap() >= 1); assert!(r.push_at_least(1, 5, &mut buf).unwrap() >= 1); let mut r = BadReader::new(MemReader::new(Vec::from_slice(b"hello, world!")), Vec::from_slice([BadBehavior(uint::MAX)])); assert_eq!(r.push_at_least(1, 5, &mut buf).unwrap_err().kind, NoProgress); let mut r = MemReader::new(Vec::from_slice(b"hello, world!")); assert_eq!(r.push_at_least(5, 1, &mut buf).unwrap_err().kind, InvalidInput); } }
34.314182
100
0.593514
79783600a81518cc75fbe0638a0a6920cc73d529
306
use crate::spec::Target; use crate::spec::sbf_base; pub fn target() -> Target { Target { llvm_target: "sbf".to_string(), pointer_width: 64, arch: "sbf".to_string(), data_layout: "e-m:e-p:64:64-i64:64-n32:64-S128".to_string(), options: sbf_base::opts(), } }
23.538462
68
0.575163
6aa77dc52ab890cce1b0d1436ffe53d1a17d6920
8,350
use crate::config::SshDeviceConfiguration; use crate::device::make_remote_app; use crate::errors::*; use crate::host::HostPlatform; use crate::platform::regular_platform::RegularPlatform; use crate::project::Project; use crate::utils::path_to_str; use crate::Build; use crate::BuildBundle; use crate::Device; use crate::DeviceCompatibility; use crate::Runnable; use std::fmt; use std::fmt::Formatter; use std::fmt::{Debug, Display}; use std::path::Path; use std::path::PathBuf; use std::process::Command; pub struct SshDevice { pub id: String, pub conf: SshDeviceConfiguration, } impl SshDevice { fn install_app( &self, project: &Project, build: &Build, runnable: &Runnable, ) -> Result<(BuildBundle, BuildBundle)> { debug!("make_remote_app {}", runnable.id); let build_bundle = make_remote_app(project, build, runnable)?; trace!("make_remote_app {} done", runnable.id); let remote_bundle = self.to_remote_bundle(&build_bundle)?; trace!("Create remote dir: {:?}", remote_bundle.bundle_dir); let _ = self .ssh_command()? .arg("mkdir") .arg("-p") .arg(&remote_bundle.bundle_dir) .status(); info!("Install {} to {}", runnable.id, self.id); self.sync(&build_bundle.bundle_dir, &remote_bundle.bundle_dir)?; self.sync(&build_bundle.lib_dir, &remote_bundle.lib_dir)?; Ok((build_bundle, remote_bundle)) } fn ssh_command(&self) -> Result<Command> { let mut command = Command::new("ssh"); if let Some(port) = self.conf.port { command.arg("-p").arg(&format!("{}", port)); } if atty::is(atty::Stream::Stdout) { command.arg("-t").arg("-o").arg("LogLevel=QUIET"); } command.arg(format!("{}@{}", self.conf.username, self.conf.hostname)); Ok(command) } fn sync_rsync(&self, rsync: Option<String>) -> Result<String> { match rsync { Some(rsync) => { let rsync_path = "/tmp/rsync"; let mut command = Command::new("scp"); command.arg("-q"); if let Some(port) = self.conf.port { command.arg("-P").arg(&format!("{}", port)); } command.arg(format!("{}", rsync)); command.arg(format!( "{}@{}:{}", self.conf.username, self.conf.hostname, rsync_path )); debug!("Running {:?}", command); if !command.status()?.success() { bail!("Error copying rsync binary ({:?})", command) } Ok(rsync_path.to_string()) } None => Ok("/usr/bin/rsync".to_string()), } } fn sync<FP: AsRef<Path>, TP: AsRef<Path>>(&self, from_path: FP, to_path: TP) -> Result<()> { let rsync = self.sync_rsync(self.conf.install_adhoc_rsync_local_path.clone()); let rsync = match rsync { Ok(rsync_path) => rsync_path, Err(error) => bail!("Problem with rsync on the target: {:?}", error), }; let mut command = Command::new("rsync"); command.arg(&format!("--rsync-path={}", rsync)); command.arg("-a").arg("-v"); if let Some(port) = self.conf.port { command.arg("-e").arg(&*format!("ssh -p {}", port)); }; if !log_enabled!(::log::Level::Debug) { command.stdout(::std::process::Stdio::null()); command.stderr(::std::process::Stdio::null()); } command .arg(&format!("{}/", path_to_str(&from_path.as_ref())?)) .arg(&format!( "{}@{}:{}/", self.conf.username, self.conf.hostname, path_to_str(&to_path.as_ref())? )); debug!("Running {:?}", command); if !command.status().with_context(||format!("failed to run '{:?}'", command))?.success() { bail!("Error syncing ssh directory ({:?})", command) } else { Ok(()) } } fn to_remote_bundle(&self, build_bundle: &BuildBundle) -> Result<BuildBundle> { let remote_prefix = PathBuf::from(self.conf.path.clone().unwrap_or("/tmp".into())).join("dinghy"); build_bundle.replace_prefix_with(remote_prefix) } } impl DeviceCompatibility for SshDevice { fn is_compatible_with_regular_platform(&self, platform: &RegularPlatform) -> bool { self.conf .platform .as_ref() .map_or(false, |it| *it == platform.id) } fn is_compatible_with_host_platform(&self, platform: &HostPlatform) -> bool { self.conf .platform .as_ref() .map_or(true, |it| *it == platform.id) } } impl Device for SshDevice { fn clean_app(&self, build_bundle: &BuildBundle) -> Result<()> { let status = self .ssh_command()? .arg(&format!( "rm -rf {}", path_to_str(&build_bundle.bundle_exe)? )) .status()?; if !status.success() { bail!("test fail.") } Ok(()) } fn debug_app( &self, _project: &Project, _build: &Build, _args: &[&str], _envs: &[&str], ) -> Result<BuildBundle> { unimplemented!() } fn id(&self) -> &str { &self.id } fn name(&self) -> &str { &self.id } fn run_app( &self, project: &Project, build: &Build, args: &[&str], envs: &[&str], ) -> Result<Vec<BuildBundle>> { let mut build_bundles = vec![]; let remote_shell_vars_as_context = |a: &str| -> Option<std::borrow::Cow<str>> { self.conf.remote_shell_vars.get(a).map(|s| s.into()) }; let args: Vec<String> = args .iter() .map(|&a| { shellexpand::full_with_context_no_errors( a, || remote_shell_vars_as_context("HOME").map(|s| PathBuf::from(&*s)), remote_shell_vars_as_context, ) }) .map(|a| ::shell_escape::escape(a).to_string()) .collect(); for runnable in &build.runnables { info!("Install {:?}", runnable.id); let (build_bundle, remote_bundle) = self.install_app(&project, &build, &runnable)?; debug!("Installed {:?}", runnable.id); let command = format!( "cd '{}' ; {} RUST_BACKTRACE=1 DINGHY=1 LD_LIBRARY_PATH=\"{}:$LD_LIBRARY_PATH\" {} {} {}", path_to_str(&remote_bundle.bundle_dir)?, envs.join(" "), path_to_str(&remote_bundle.lib_dir)?, path_to_str(&remote_bundle.bundle_exe)?, if build.build_args.compile_mode == ::cargo::core::compiler::CompileMode::Bench { "--bench" } else { "" }, args.join(" ") ); trace!("Ssh command: {}", command); info!( "Run {} on {} ({:?})", runnable.id, self.id, build.build_args.compile_mode ); let status = self.ssh_command()?.arg(&command).status()?; if !status.success() { bail!("Test failed 🐛") } build_bundles.push(build_bundle); } Ok(build_bundles) } fn start_remote_lldb(&self) -> Result<String> { unimplemented!() } } impl Debug for SshDevice { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { Ok(fmt.write_str(format!("Ssh {{ \"id\": \"{}\", \"hostname\": \"{}\", \"username\": \"{}\", \"port\": \"{}\" }}", self.id, self.conf.hostname, self.conf.username, self.conf.port.as_ref().map_or("none".to_string(), |it| it.to_string())).as_str())?) } } impl Display for SshDevice { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "{}", self.conf.hostname) } }
33.943089
130
0.502635
d94c3c89c1bd0a115f107e95d3373eea6f3fe3c7
11,593
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 //! An implementation of x25519 elliptic curve key pairs required for [Diffie-Hellman key exchange](https://en.wikipedia.org/wiki/Diffie%E2%80%93Hellman_key_exchange) //! in the Libra project. //! //! This is an API for [Elliptic Curves for Security - RFC 7748](https://tools.ietf.org/html/rfc7748) //! and it only deals with long-term key generation and handling. //! //! Warning: This API will soon be updated in the [`nextgen`] module. //! //! # Examples //! //! ``` //! use crypto::x25519::{ //! derive_keypair_from_seed, generate_keypair, generate_keypair_from_rng, //! generate_keypair_hybrid, //! }; //! use rand::{rngs::StdRng, SeedableRng}; //! //! // Derive an X25519 from seed using the extract-then-expand HKDF method from RFC 5869. //! let salt = &b"some salt"[..]; //! // In production, ensure seed has at least 256 bits of entropy. //! let seed = [5u8; 32]; // seed is denoted as IKM in HKDF RFC 5869. //! let info = &b"some app info"[..]; //! //! let (private_key1, public_key1) = derive_keypair_from_seed(Some(salt), &seed, Some(info)); //! let (private_key2, public_key2) = derive_keypair_from_seed(Some(salt), &seed, Some(info)); //! assert_eq!(public_key1, public_key2); //! //! // Generate a random X25519 key pair. //! let (private_key, public_key) = generate_keypair(); //! //! // Generate an X25519 key pair from an RNG (in this example a SeedableRng). //! let seed = [1u8; 32]; //! let mut rng: StdRng = SeedableRng::from_seed(seed); //! let (private_key, public_key) = generate_keypair_from_rng(&mut rng); //! //! // Generate an X25519 key pair from an RNG and a user-provided seed. //! let salt = &b"some salt"[..]; //! // In production, ensure seed has at least 256 bits of entropy. //! let seed = [5u8; 32]; // seed is denoted as IKM in HKDF RFC 5869. //! let info = &b"some app info"[..]; //! let (private_key1, public_key1) = generate_keypair_hybrid(Some(salt), &seed, Some(info)); //! let (private_key2, public_key2) = generate_keypair_hybrid(Some(salt), &seed, Some(info)); //! assert_ne!(public_key1, public_key2); //! ``` use crate::{hkdf::Hkdf, utils::*}; use core::fmt; use crypto_derive::{SilentDebug, SilentDisplay}; use derive_deref::Deref; use failure::prelude::*; use proptest::{ arbitrary::any, prelude::{Arbitrary, BoxedStrategy}, strategy::*, }; use rand::{ rngs::{EntropyRng, StdRng}, CryptoRng, RngCore, SeedableRng, }; use serde::{de, export, ser}; use sha2::Sha256; use std::fmt::{Debug, Display}; use x25519_dalek; /// An x25519 private key. #[derive(Deref, SilentDisplay, SilentDebug)] pub struct X25519PrivateKey { value: x25519_dalek::StaticSecret, } /// An x25519 public key. #[derive(Copy, Clone, Deref)] pub struct X25519PublicKey { value: x25519_dalek::PublicKey, } #[deprecated( since = "1.0.0", note = "This will be superseded by the new cryptography API" )] impl Clone for X25519PrivateKey { fn clone(&self) -> Self { let bytes = self.to_bytes(); X25519PrivateKey { value: x25519_dalek::StaticSecret::from(bytes), } } } impl X25519PrivateKey { /// Length of the private key in bytes. pub const LENGTH: usize = 32; } impl X25519PublicKey { /// Length of the public key in bytes. pub const LENGTH: usize = 32; /// Obtain a public key from a slice. pub fn from_slice(data: &[u8]) -> Result<Self> { assert_eq!( data.len(), X25519PublicKey::LENGTH, "X25519 Public key wrong length error; expected {} but received {}", X25519PublicKey::LENGTH, data.len() ); let mut fixed_size_data: [u8; X25519PublicKey::LENGTH] = Default::default(); fixed_size_data.copy_from_slice(data); let key = x25519_dalek::PublicKey::from(fixed_size_data); Ok(X25519PublicKey { value: key }) } /// Convert a public key into a slice. pub fn to_slice(&self) -> [u8; Self::LENGTH] { *self.value.as_bytes() } fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", hex::encode(&self.to_slice())) } } impl PartialEq<X25519PublicKey> for X25519PublicKey { fn eq(&self, other: &X25519PublicKey) -> bool { self.as_bytes() == other.as_bytes() } } impl Eq for X25519PublicKey {} impl Display for X25519PublicKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { X25519PublicKey::fmt(self, f) } } impl Debug for X25519PublicKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { X25519PublicKey::fmt(self, f) } } impl ser::Serialize for X25519PrivateKey { fn serialize<S>(&self, serializer: S) -> export::Result<S::Ok, S::Error> where S: ser::Serializer, { serializer.serialize_bytes(&self.to_bytes()) } } impl ser::Serialize for X25519PublicKey { fn serialize<S>(&self, serializer: S) -> export::Result<S::Ok, S::Error> where S: ser::Serializer, { serializer.serialize_bytes(self.as_bytes()) } } impl From<&X25519PrivateKey> for X25519PublicKey { fn from(private_key: &X25519PrivateKey) -> Self { let public_key = (&private_key.value).into(); Self { value: public_key } } } struct X25519PrivateKeyVisitor; struct X25519PublicKeyVisitor; impl<'de> de::Visitor<'de> for X25519PrivateKeyVisitor { type Value = X25519PrivateKey; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("X25519_dalek private key in bytes") } fn visit_bytes<E>(self, value: &[u8]) -> export::Result<X25519PrivateKey, E> where E: de::Error, { let mut fixed_size_data: [u8; X25519PrivateKey::LENGTH] = Default::default(); fixed_size_data.copy_from_slice(value); let key = x25519_dalek::StaticSecret::from(fixed_size_data); Ok(X25519PrivateKey { value: key }) } } impl<'de> de::Visitor<'de> for X25519PublicKeyVisitor { type Value = X25519PublicKey; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("X25519_dalek public key in bytes") } fn visit_bytes<E>(self, value: &[u8]) -> export::Result<X25519PublicKey, E> where E: de::Error, { let mut fixed_size_data: [u8; X25519PublicKey::LENGTH] = Default::default(); fixed_size_data.copy_from_slice(value); let key = x25519_dalek::PublicKey::from(fixed_size_data); Ok(X25519PublicKey { value: key }) } } impl<'de> de::Deserialize<'de> for X25519PrivateKey { fn deserialize<D>(deserializer: D) -> export::Result<Self, D::Error> where D: de::Deserializer<'de>, { deserializer.deserialize_bytes(X25519PrivateKeyVisitor {}) } } impl<'de> de::Deserialize<'de> for X25519PublicKey { fn deserialize<D>(deserializer: D) -> export::Result<Self, D::Error> where D: de::Deserializer<'de>, { deserializer.deserialize_bytes(X25519PublicKeyVisitor {}) } } impl Arbitrary for X25519PublicKey { type Parameters = (); fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { public_key_strategy().boxed() } type Strategy = BoxedStrategy<Self>; } fn public_key_strategy() -> impl Strategy<Value = X25519PublicKey> { any::<[u8; X25519PublicKey::LENGTH]>() .prop_map(|seed| { let mut rng: StdRng = SeedableRng::from_seed(seed); let (_, public_key) = generate_keypair_from_rng(&mut rng); public_key }) .no_shrink() } /// Generates a consistent keypair `(X25519PrivateKey, X25519PublicKey)` for unit tests. pub fn generate_keypair_for_testing<R>(rng: &mut R) -> (X25519PrivateKey, X25519PublicKey) where R: ::rand::SeedableRng + ::rand::RngCore + ::rand::CryptoRng, { generate_keypair_from_rng(rng) } /// Generates a random key-pair `(X25519PrivateKey, X25519PublicKey)`. pub fn generate_keypair() -> (X25519PrivateKey, X25519PublicKey) { let mut rng = EntropyRng::new(); generate_keypair_from_rng(&mut rng) } /// Derives a keypair `(X25519PrivateKey, X25519PublicKey)` from /// a) salt (optional) - denoted as 'salt' in RFC 5869 /// b) seed - denoted as 'IKM' in RFC 5869 /// c) application info (optional) - denoted as 'info' in RFC 5869 /// /// using the HKDF key derivation protocol, as defined in RFC 5869. /// This implementation uses the full extract-then-expand HKDF steps /// based on the SHA-256 hash function. /// /// **Warning**: This function will soon be updated to return a KeyPair struct. pub fn derive_keypair_from_seed( salt: Option<&[u8]>, seed: &[u8], app_info: Option<&[u8]>, ) -> (X25519PrivateKey, X25519PublicKey) { let derived_bytes = Hkdf::<Sha256>::extract_then_expand(salt, seed, app_info, X25519PrivateKey::LENGTH); let mut key_bytes = [0u8; X25519PrivateKey::LENGTH]; key_bytes.copy_from_slice(derived_bytes.unwrap().as_slice()); let secret: x25519_dalek::StaticSecret = x25519_dalek::StaticSecret::from(key_bytes); let public: x25519_dalek::PublicKey = (&secret).into(); ( X25519PrivateKey { value: secret }, X25519PublicKey { value: public }, ) } /// Generates a keypair `(X25519PrivateKey, X25519PublicKey)` based on an RNG. pub fn generate_keypair_from_rng<R>(rng: &mut R) -> (X25519PrivateKey, X25519PublicKey) where R: RngCore + CryptoRng, { let secret: x25519_dalek::StaticSecret = x25519_dalek::StaticSecret::new(rng); let public: x25519_dalek::PublicKey = (&secret).into(); ( X25519PrivateKey { value: secret }, X25519PublicKey { value: public }, ) } /// Generates a random keypair `(X25519PrivateKey, X25519PublicKey)` and returns string /// representations tuple: /// 1. human readable public key. /// 2. hex encoded serialized public key. /// 3. hex encoded serialized private key. pub fn generate_and_encode_keypair() -> (String, String, String) { let (private_key, public_key) = generate_keypair(); let pub_key_human = hex::encode(public_key.to_slice()); let public_key_serialized_str = encode_to_string(&public_key); let private_key_serialized_str = encode_to_string(&private_key); ( pub_key_human, public_key_serialized_str, private_key_serialized_str, ) } /// Generates a random keypair `(PrivateKey, PublicKey)` by combining the output of `EntropyRng` /// with a user-provided seed. This concatenated seed is used as the seed to HKDF (RFC 5869). /// /// Similarly to `derive_keypair_from_seed` the user provides the following inputs: /// a) salt (optional) - denoted as 'salt' in RFC 5869 /// b) seed - denoted as 'IKM' in RFC 5869 /// c) application info (optional) - denoted as 'info' in RFC 5869 /// /// Note that this method is not deterministic, but the (random + static seed) key /// generation makes it safer against low entropy pools and weak RNGs. /// /// **Warning**: This function will soon be updated to return a [`KeyPair`] struct. pub fn generate_keypair_hybrid( salt: Option<&[u8]>, seed: &[u8], app_info: Option<&[u8]>, ) -> (X25519PrivateKey, X25519PublicKey) { let mut rng = EntropyRng::new(); let mut seed_from_rng = [0u8; ed25519_dalek::SECRET_KEY_LENGTH]; rng.fill_bytes(&mut seed_from_rng); let mut final_seed = seed.to_vec(); final_seed.extend_from_slice(&seed_from_rng); derive_keypair_from_seed(salt, &final_seed, app_info) }
33.602899
166
0.665919
ed1d890a66b4b8e76631b1de9d876ce29aed54c3
5,561
mod order_0; mod order_1; use std::io::{self, Write}; use byteorder::{LittleEndian, WriteBytesExt}; use super::Order; // Base `b`. const BASE: usize = 256; // Lower bound `L`. const LOWER_BOUND: u32 = 0x800000; #[allow(dead_code)] pub fn rans_encode(order: Order, data: &[u8]) -> io::Result<Vec<u8>> { let compressed_blob = match order { Order::Zero => { let (normalized_frequencies, compressed_data) = order_0::encode(data)?; let mut compressed_blob = Vec::new(); order_0::write_frequencies(&mut compressed_blob, &normalized_frequencies)?; compressed_blob.extend(&compressed_data); compressed_blob } Order::One => { let (normalized_contexts, compressed_data) = order_1::encode(data)?; let mut compressed_blob = Vec::new(); order_1::write_contexts(&mut compressed_blob, &normalized_contexts)?; compressed_blob.extend(&compressed_data); compressed_blob } }; let mut writer = Vec::new(); write_header( &mut writer, order, compressed_blob.len() as u32, data.len() as u32, )?; writer.write_all(&compressed_blob)?; Ok(writer) } fn write_header<W>( writer: &mut W, order: Order, compressed_len: u32, data_len: u32, ) -> io::Result<()> where W: Write, { writer.write_u8(u8::from(order))?; writer.write_u32::<LittleEndian>(compressed_len)?; writer.write_u32::<LittleEndian>(data_len)?; Ok(()) } fn normalize_frequencies(frequencies: &[u32]) -> Vec<u32> { const SCALE: u32 = 4095; let mut sum = 0; let mut max = 0; let mut max_index = 0; for (i, &f) in frequencies.iter().enumerate() { if f >= max { max = f; max_index = i; } sum += f; } if sum == 0 { return vec![0; frequencies.len()]; } let mut normalized_sum = 0; let mut normalized_frequencies = vec![0; frequencies.len()]; for (i, &f) in frequencies.iter().enumerate() { let normalized_frequency = f * SCALE / sum; normalized_frequencies[i] = normalized_frequency; normalized_sum += normalized_frequency; } // Because the calculation of `normalized_frequency` uses integer division (truncation), it's // possible that the sum of all the normalized frequencies is smaller than the scale value. In // this case, the difference is added to the last max value. if normalized_sum < SCALE { normalized_frequencies[max_index] += SCALE - normalized_sum; } normalized_frequencies } fn build_cumulative_frequencies(frequencies: &[u32]) -> Vec<u32> { let mut cumulative_frequencies = vec![0; frequencies.len()]; for i in 0..frequencies.len() - 1 { cumulative_frequencies[i + 1] = cumulative_frequencies[i] + frequencies[i]; } cumulative_frequencies } fn normalize<W>(writer: &mut W, mut x: u32, freq_i: u32) -> io::Result<u32> where W: Write, { while x >= (LOWER_BOUND >> 4) * freq_i { let b = (x & 0xff) as u8; writer.write_u8(b)?; x >>= 8; } Ok(x) } fn update(x: u32, freq_i: u32, cfreq_i: u32) -> u32 { (x / freq_i) * 0x1000 + cfreq_i + (x % freq_i) } #[cfg(test)] mod tests { use super::*; #[test] fn test_rans_encode_with_order_0() -> io::Result<()> { let data = b"noodles"; let actual = rans_encode(Order::Zero, data)?; let expected = [ 0x00, 0x25, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x64, 0x82, 0x49, 0x65, 0x00, 0x82, 0x49, 0x6c, 0x82, 0x49, 0x6e, 0x82, 0x49, 0x6f, 0x00, 0x84, 0x92, 0x73, 0x82, 0x49, 0x00, 0xe2, 0x06, 0x83, 0x18, 0x74, 0x7b, 0x41, 0x0c, 0x2b, 0xa9, 0x41, 0x0c, 0x25, 0x31, 0x80, 0x03, ]; assert_eq!(actual, expected); Ok(()) } #[test] fn test_rans_encode_with_order_1() -> io::Result<()> { let data = b"noodles"; let actual = rans_encode(Order::One, data)?; let expected = [ 0x01, 0x3b, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x00, 0x64, 0x83, 0xff, 0x6e, 0x83, 0xff, 0x6f, 0x00, 0x88, 0x01, 0x00, 0x64, 0x6c, 0x8f, 0xff, 0x00, 0x65, 0x00, 0x73, 0x8f, 0xff, 0x00, 0x6c, 0x65, 0x8f, 0xff, 0x00, 0x6e, 0x6f, 0x8f, 0xff, 0x00, 0x6f, 0x00, 0x64, 0x87, 0xff, 0x6f, 0x88, 0x00, 0x00, 0x00, 0x07, 0x84, 0x00, 0x02, 0x00, 0xe8, 0xff, 0x00, 0x00, 0xe8, 0xff, 0x00, 0x10, 0xe0, 0x00, 0x02, ]; assert_eq!(actual, expected); Ok(()) } #[test] fn test_write_header() -> io::Result<()> { let mut writer = Vec::new(); write_header(&mut writer, Order::One, 14930352, 9227465)?; let expected = [ 0x01, // order 0xb0, 0xd1, 0xe3, 0x00, // compressed length 0xc9, 0xcc, 0x8c, 0x00, // data length ]; assert_eq!(writer, expected); Ok(()) } #[test] fn test_normalize_frequencies() { let frequencies = [1, 2, 3, 0]; assert_eq!(normalize_frequencies(&frequencies), [682, 1365, 2048, 0]); let frequencies = [0, 0, 0, 0]; assert_eq!(normalize_frequencies(&frequencies), [0, 0, 0, 0]); } #[test] fn test_build_cumulative_frequencies() { let frequencies = [682, 1365, 2048, 0]; assert_eq!( build_cumulative_frequencies(&frequencies), [0, 682, 2047, 4095] ); } }
26.735577
98
0.577954
1428e8964e40358ceaae01c0db041bb1e01a2bd8
3,170
use crate::routers::api::user::WRONG_PASS_ATTEMPT_THRESHOLD; use crate::session::UserInfo; use crate::State; use actix_session::Session; use actix_web::error::{ErrorInternalServerError, ErrorUnauthorized}; use actix_web::web::Data; use actix_web::{HttpResponse, Result}; use entity::sea_orm::{EntityTrait, PaginatorTrait, QueryFilter, QueryOrder}; use sea_query::{Expr, Order}; use tachyon_template::{view::UserTemplate, AsyncRenderOnce}; #[derive(serde::Serialize, serde::Deserialize)] pub struct UserRequest { page_no: Option<usize>, page_size: Option<usize>, search_string: Option<String>, } fn convert_user_info<I>(user: I) -> Vec<tachyon_template::view::UserItem> where I: Iterator<Item = entity::user::Model>, { user.filter_map(|user| { user.fingerprint() .map(|f| { tachyon_template::view::UserItem::new( user.id, user.email, user.name, user.wrong_pass_attempt >= WRONG_PASS_ATTEMPT_THRESHOLD, f, ) }) .ok() }) .collect() } pub async fn handler( request: actix_web::web::Query<UserRequest>, data: Data<State>, session: Session, ) -> Result<HttpResponse> { match session.get::<UserInfo>("user")? { None => Err(ErrorUnauthorized("login info not found")), Some(user) => { let mut page = entity::user::Entity::find(); if let Some(keywords) = &request.search_string { log::debug!("search string: {}", keywords); let expr = Expr::cust_with_values( "user_search_vector @@ plainto_tsquery(?)", vec![keywords.to_string()], ); page = page.filter(expr); } let page_size = request.page_size.unwrap_or(10); let paginator = page .order_by(entity::user::Column::Id, Order::Asc) .paginate(&data.sql_db, page_size); let items = paginator .fetch_page(request.page_no.unwrap_or(0)) .await .map_err(ErrorInternalServerError)?; let num_pages = paginator .num_pages() .await .map_err(ErrorInternalServerError)?; let prev_page = match request.page_no { None | Some(0) => None, Some(no) => Some(no - 1), }; let next_page = match request.page_no.unwrap_or(0) { n if n + 1 >= num_pages => None, n => Some(n + 1), }; log::debug!("select {} items", items.len()); let converted = convert_user_info(items.into_iter()); UserTemplate::new( user.perms.user_management, "User | Project Tachyon", user.email, converted, page_size, prev_page, next_page, request.search_string.clone(), ) .render_response() .await } } }
33.723404
76
0.530915
9c316af8528839b555558ead12af8c77f5dfbb9c
28,564
//! Handling of Call Frame Information (stack frame info). //! //! The root type exposed by this crate is [`CfiCache`], which offers a high-level API to extract //! CFI from object files and serialize a format that the Breakpad processor can understand. //! //! # Background //! //! Call Frame Information (CFI) is used by the [processor] to improve the quality of stacktraces //! during stackwalking. When the executable was compiled with frame pointer omission, the call //! stack does not contain sufficient information to resolve frames on its own. CFI contains //! programs that can calculate the base address of a frame based on register values of the current //! frame. //! //! Without CFI, the stackwalker needs to scan the stack memory for values that look like valid base //! addresses. This fequently yields false-positives. //! //! [processor]: ../processor/index.html //! [`CfiCache`]: struct.CfiCache.html use std::collections::HashMap; use std::io::{self, Write}; use std::ops::Range; use failure::{Fail, ResultExt}; use symbolic_common::{derive_failure, Arch, ByteView, UnknownArchError}; use symbolic_debuginfo::breakpad::{BreakpadObject, BreakpadStackRecord}; use symbolic_debuginfo::dwarf::gimli::{ BaseAddresses, CfaRule, CieOrFde, DebugFrame, EhFrame, Error, FrameDescriptionEntry, Reader, Register, RegisterRule, UninitializedUnwindContext, UnwindSection, }; use symbolic_debuginfo::dwarf::Dwarf; use symbolic_debuginfo::pdb::pdb::{self, FallibleIterator, FrameData, Rva, StringTable}; use symbolic_debuginfo::pdb::PdbObject; use symbolic_debuginfo::pe::{PeObject, RuntimeFunction, UnwindOperation}; use symbolic_debuginfo::{Object, ObjectLike}; /// The latest version of the file format. pub const CFICACHE_LATEST_VERSION: u32 = 1; /// Used to detect empty runtime function entries in PEs. const EMPTY_FUNCTION: RuntimeFunction = RuntimeFunction { begin_address: 0, end_address: 0, unwind_info_address: 0, }; /// Possible error kinds of `CfiError`. #[derive(Debug, Fail, Copy, Clone)] pub enum CfiErrorKind { /// Required debug sections are missing in the `Object` file. #[fail(display = "missing cfi debug sections")] MissingDebugInfo, /// The debug information in the `Object` file is not supported. #[fail(display = "unsupported debug format")] UnsupportedDebugFormat, /// The debug information in the `Object` file is invalid. #[fail(display = "bad debug information")] BadDebugInfo, /// The `Object`s architecture is not supported by symbolic. #[fail(display = "unsupported architecture")] UnsupportedArch, /// CFI for an invalid address outside the mapped range was encountered. #[fail(display = "invalid cfi address")] InvalidAddress, /// Generic error when writing CFI information, likely IO. #[fail(display = "failed to write cfi")] WriteError, /// Invalid magic bytes in the cfi cache header. #[fail(display = "bad cfi cache magic")] BadFileMagic, } derive_failure!( CfiError, CfiErrorKind, doc = "An error returned by [`AsciiCfiWriter`](struct.AsciiCfiWriter.html)." ); impl From<UnknownArchError> for CfiError { fn from(_: UnknownArchError) -> CfiError { CfiErrorKind::UnsupportedArch.into() } } /// Temporary helper trait to set the address size on any unwind section. trait UnwindSectionExt<R>: UnwindSection<R> where R: Reader, { fn set_address_size(&mut self, address_size: u8); } impl<R: Reader> UnwindSectionExt<R> for EhFrame<R> { fn set_address_size(&mut self, address_size: u8) { self.set_address_size(address_size) } } impl<R: Reader> UnwindSectionExt<R> for DebugFrame<R> { fn set_address_size(&mut self, address_size: u8) { self.set_address_size(address_size) } } /// Context information for unwinding. struct UnwindInfo<U> { arch: Arch, load_address: u64, section: U, bases: BaseAddresses, } impl<U> UnwindInfo<U> { pub fn new<O, R>(object: &O, addr: u64, mut section: U) -> Self where O: ObjectLike, R: Reader, U: UnwindSectionExt<R>, { let arch = object.arch(); let load_address = object.load_address(); // CFI information can have relative offsets to the virtual address of thir respective debug // section (either `.eh_frame` or `.debug_frame`). We need to supply this offset to the // entries iterator before starting to interpret instructions. The other base addresses are // not needed for CFI. let bases = BaseAddresses::default().set_eh_frame(addr); // Based on the architecture, pointers inside eh_frame and debug_frame have different sizes. // Configure the section to read them appropriately. if let Some(pointer_size) = arch.pointer_size() { section.set_address_size(pointer_size as u8); } UnwindInfo { arch, load_address, section, bases, } } } /// A service that converts call frame information (CFI) from an object file to Breakpad ASCII /// format and writes it to the given writer. /// /// The default way to use this writer is to create a writer, pass it to the `AsciiCfiWriter` and /// then process an object: /// /// ```rust,no_run /// use symbolic_common::ByteView; /// use symbolic_debuginfo::Object; /// use symbolic_minidump::cfi::AsciiCfiWriter; /// /// # fn main() -> Result<(), failure::Error> { /// let view = ByteView::open("/path/to/object")?; /// let object = Object::parse(&view)?; /// /// let mut writer = Vec::new(); /// AsciiCfiWriter::new(&mut writer).process(&object)?; /// # Ok(()) /// # } /// ``` /// /// For writers that implement `Default`, there is a convenience method that creates an instance and /// returns it right away: /// /// ```rust,no_run /// use symbolic_common::ByteView; /// use symbolic_debuginfo::Object; /// use symbolic_minidump::cfi::AsciiCfiWriter; /// /// # fn main() -> Result<(), failure::Error> { /// let view = ByteView::open("/path/to/object")?; /// let object = Object::parse(&view)?; /// /// let buffer = AsciiCfiWriter::<Vec<u8>>::transform(&object)?; /// # Ok(()) /// # } /// ``` pub struct AsciiCfiWriter<W: Write> { inner: W, } impl<W: Write> AsciiCfiWriter<W> { /// Creates a new `AsciiCfiWriter` that outputs to a writer. pub fn new(inner: W) -> Self { AsciiCfiWriter { inner } } /// Extracts CFI from the given object file. pub fn process(&mut self, object: &Object<'_>) -> Result<(), CfiError> { match object { Object::Breakpad(o) => self.process_breakpad(o), Object::MachO(o) => self.process_dwarf(o), Object::Elf(o) => self.process_dwarf(o), Object::Pdb(o) => self.process_pdb(o), Object::Pe(o) => self.process_pe(o), Object::SourceBundle(_) => Ok(()), } } /// Returns the wrapped writer from this instance. pub fn into_inner(self) -> W { self.inner } fn process_breakpad(&mut self, object: &BreakpadObject<'_>) -> Result<(), CfiError> { for record in object.stack_records() { match record.context(CfiErrorKind::BadDebugInfo)? { BreakpadStackRecord::Cfi(r) => writeln!(self.inner, "STACK CFI {}", r.text), BreakpadStackRecord::Win(r) => writeln!(self.inner, "STACK WIN {}", r.text), } .context(CfiErrorKind::WriteError)? } Ok(()) } fn process_dwarf<'o, O>(&mut self, object: &O) -> Result<(), CfiError> where O: ObjectLike + Dwarf<'o>, { let endian = object.endianity(); // First load information from the DWARF debug_frame section. It does not contain any // references to other DWARF sections. if let Some(section) = object.section("debug_frame") { let frame = DebugFrame::new(&section.data, endian); let info = UnwindInfo::new(object, section.address, frame); self.read_cfi(&info)?; } // Indepdendently, Linux C++ exception handling information can also provide unwind info. if let Some(section) = object.section("eh_frame") { let frame = EhFrame::new(&section.data, endian); let info = UnwindInfo::new(object, section.address, frame); self.read_cfi(&info)?; } // Ignore if no information was found at all. Ok(()) } fn read_cfi<U, R>(&mut self, info: &UnwindInfo<U>) -> Result<(), CfiError> where R: Reader + Eq, U: UnwindSection<R>, { // Initialize an unwind context once and reuse it for the entire section. let mut ctx = UninitializedUnwindContext::new(); let mut entries = info.section.entries(&info.bases); while let Some(entry) = entries.next().context(CfiErrorKind::BadDebugInfo)? { // We skip all Common Information Entries and only process Frame Description Items here. // The iterator yields partial FDEs which need their associated CIE passed in via a // callback. This function is provided by the UnwindSection (frame), which then parses // the CIE and returns it for the FDE. if let CieOrFde::Fde(partial_fde) = entry { if let Ok(fde) = partial_fde.parse(U::cie_from_offset) { self.process_fde(info, &mut ctx, &fde)? } } } Ok(()) } fn process_fde<R, U>( &mut self, info: &UnwindInfo<U>, ctx: &mut UninitializedUnwindContext<R>, fde: &FrameDescriptionEntry<R>, ) -> Result<(), CfiError> where R: Reader + Eq, U: UnwindSection<R>, { // Retrieves the register that specifies the return address. We need to assign a special // format to this register for Breakpad. let ra = fde.cie().return_address_register(); // Interpret all DWARF instructions of this Frame Description Entry. This gives us an unwind // table that contains rules for retrieving registers at every instruction address. These // rules can directly be transcribed to breakpad STACK CFI records. let mut table = fde .rows(&info.section, &info.bases, ctx) .context(CfiErrorKind::BadDebugInfo)?; // Collect all rows first, as we need to know the final end address in order to write the // CFI INIT record describing the extent of the whole unwind table. let mut rows = Vec::new(); loop { match table.next_row() { Ok(None) => break, Ok(Some(row)) => rows.push(row.clone()), Err(Error::UnknownCallFrameInstruction(_)) => { continue; } Err(e) => { return Err(e.context(CfiErrorKind::BadDebugInfo).into()); } } } if let Some(first_row) = rows.first() { // Calculate the start address and total range covered by the CFI INIT record and its // subsequent CFI records. This information will be written into the CFI INIT record. let start = first_row.start_address(); let length = rows.last().unwrap().end_address() - start; // Verify that the CFI entry is in range of the mapped module. Zero values are a special // case and seem to indicate that the entry is no longer valid. All other cases are // considered erroneous CFI. if start < info.load_address { return match start { 0 => Ok(()), _ => Err(CfiErrorKind::InvalidAddress.into()), }; } // Every register rule in the table will be cached so that it can be compared with // subsequent occurrences. Only registers with changed rules will be written. let mut rule_cache = HashMap::new(); let mut cfa_cache = None; // Write records for every entry in the unwind table. for row in &rows { let mut written = false; let mut line = Vec::new(); // Depending on whether this is the first row or any subsequent row, print a INIT or // normal STACK CFI record. if row.start_address() == start { let start_addr = start - info.load_address; write!(line, "STACK CFI INIT {:x} {:x}", start_addr, length) .context(CfiErrorKind::WriteError)?; } else { let start_addr = row.start_address() - info.load_address; write!(line, "STACK CFI {:x}", start_addr).context(CfiErrorKind::WriteError)?; } // Write the mandatory CFA rule for this row, followed by optional register rules. // The actual formatting of the rules depends on their rule type. if cfa_cache != Some(row.cfa()) { cfa_cache = Some(row.cfa()); written |= Self::write_cfa_rule(&mut line, info.arch, row.cfa())?; } // Print only registers that have changed rules to their previous occurrence to // reduce the number of rules per row. Then, cache the new occurrence for the next // row. for &(register, ref rule) in row.registers() { if !rule_cache.get(&register).map_or(false, |c| c == &rule) { rule_cache.insert(register, rule); written |= Self::write_register_rule(&mut line, info.arch, register, rule, ra)?; } } if written { self.inner .write_all(&line) .and_then(|_| writeln!(self.inner)) .context(CfiErrorKind::WriteError)?; } } } Ok(()) } fn write_cfa_rule<R: Reader, T: Write>( mut target: T, arch: Arch, rule: &CfaRule<R>, ) -> Result<bool, CfiError> { let formatted = match rule { CfaRule::RegisterAndOffset { register, offset } => { match arch.register_name(register.0) { Some(register) => format!("{} {} +", register, *offset), None => return Ok(false), } } CfaRule::Expression(_) => return Ok(false), }; write!(target, " .cfa: {}", formatted).context(CfiErrorKind::WriteError)?; Ok(true) } fn write_register_rule<R: Reader, T: Write>( mut target: T, arch: Arch, register: Register, rule: &RegisterRule<R>, ra: Register, ) -> Result<bool, CfiError> { let formatted = match rule { RegisterRule::Undefined => return Ok(false), RegisterRule::SameValue => match arch.register_name(register.0) { Some(reg) => reg.into(), None => return Ok(false), }, RegisterRule::Offset(offset) => format!(".cfa {} + ^", offset), RegisterRule::ValOffset(offset) => format!(".cfa {} +", offset), RegisterRule::Register(register) => match arch.register_name(register.0) { Some(reg) => reg.into(), None => return Ok(false), }, RegisterRule::Expression(_) => return Ok(false), RegisterRule::ValExpression(_) => return Ok(false), RegisterRule::Architectural => return Ok(false), }; // Breakpad requires an explicit name for the return address register. In all other cases, // we use platform specific names for each register as specified by Breakpad. let register_name = if register == ra { ".ra" } else { match arch.register_name(register.0) { Some(reg) => reg, None => return Ok(false), } }; write!(target, " {}: {}", register_name, formatted).context(CfiErrorKind::WriteError)?; Ok(true) } fn process_pdb(&mut self, pdb: &PdbObject<'_>) -> Result<(), CfiError> { let mut pdb = pdb.inner().write(); let frame_table = pdb.frame_table().context(CfiErrorKind::BadDebugInfo)?; let address_map = pdb.address_map().context(CfiErrorKind::BadDebugInfo)?; // See `PdbDebugSession::build`. let string_table = match pdb.string_table() { Ok(string_table) => Some(string_table), Err(pdb::Error::StreamNameNotFound) => None, Err(e) => Err(e).context(CfiErrorKind::BadDebugInfo)?, }; let mut frames = frame_table.iter(); let mut last_frame: Option<FrameData> = None; while let Some(frame) = frames.next().context(CfiErrorKind::BadDebugInfo)? { // Frame data information sometimes contains code_size values close to the maximum `u32` // value, such as `0xffffff6e`. Documentation does not describe the meaning of such // values, but clearly they are not actual code sizes. Since these values also always // occur with a `code_start` close to the end of a function's code range, it seems // likely that these belong to the function epilog and code_size has a different meaning // in this case. Until this value is understood, skip these entries. if frame.code_size > i32::max_value() as u32 { continue; } // Only print a stack record if information has changed from the last list. It is // surprisingly common (especially in system library PDBs) for DIA to return a series of // identical IDiaFrameData objects. For kernel32.pdb from Windows XP SP2 on x86, this // check reduces the size of the dumped symbol file by a third. if let Some(ref last) = last_frame { if frame.ty == last.ty && frame.code_start == last.code_start && frame.code_size == last.code_size && frame.prolog_size == last.prolog_size { continue; } } // Address ranges need to be translated to the RVA address space. The prolog and the // code portions of the frame have to be treated independently as they may have // independently changed in size, or may even have been split. let prolog_size = u32::from(frame.prolog_size); let prolog_end = frame.code_start + prolog_size; let code_end = frame.code_start + frame.code_size; let mut prolog_ranges = address_map .rva_ranges(frame.code_start..prolog_end) .collect::<Vec<_>>(); let mut code_ranges = address_map .rva_ranges(prolog_end..code_end) .collect::<Vec<_>>(); // Check if the prolog and code bytes remain contiguous and only output a single record. // This is only done for compactness of the symbol file. Since the majority of PDBs // other than the Kernel do not have translated address spaces, this will be true for // most records. let is_contiguous = prolog_ranges.len() == 1 && code_ranges.len() == 1 && prolog_ranges[0].end == code_ranges[0].start; if is_contiguous { self.write_pdb_stackinfo( string_table.as_ref(), &frame, prolog_ranges[0].start, code_ranges[0].end, prolog_ranges[0].end - prolog_ranges[0].start, )?; } else { // Output the prolog first, and then code frames in RVA order. prolog_ranges.sort_unstable_by_key(|range| range.start); code_ranges.sort_unstable_by_key(|range| range.start); for Range { start, end } in prolog_ranges { self.write_pdb_stackinfo( string_table.as_ref(), &frame, start, end, end - start, )?; } for Range { start, end } in code_ranges { self.write_pdb_stackinfo(string_table.as_ref(), &frame, start, end, 0)?; } } last_frame = Some(frame); } Ok(()) } fn write_pdb_stackinfo( &mut self, string_table: Option<&StringTable<'_>>, frame: &FrameData, start: Rva, end: Rva, prolog_size: u32, ) -> Result<(), CfiError> { let code_size = end - start; let program_or_bp = frame.program.is_some() && string_table.is_some() || frame.uses_base_pointer; write!( self.inner, "STACK WIN {:x} {:x} {:x} {:x} {:x} {:x} {:x} {:x} {:x} {} ", frame.ty as u8, start.0, code_size, prolog_size, 0, // epilog_size frame.params_size, frame.saved_regs_size, frame.locals_size, frame.max_stack_size.unwrap_or(0), if program_or_bp { 1 } else { 0 }, ) .context(CfiErrorKind::WriteError)?; match frame.program { Some(ref prog_ref) => { let string_table = match string_table { Some(string_table) => string_table, None => return Ok(writeln!(self.inner).context(CfiErrorKind::WriteError)?), }; let program_string = prog_ref .to_string_lossy(&string_table) .context(CfiErrorKind::BadDebugInfo)?; writeln!(self.inner, "{}", program_string.trim()) .context(CfiErrorKind::WriteError)?; } None => { writeln!(self.inner, "{}", if program_or_bp { 1 } else { 0 }) .context(CfiErrorKind::WriteError)?; } } Ok(()) } fn process_pe(&mut self, pe: &PeObject<'_>) -> Result<(), CfiError> { let sections = pe.sections(); let exception_data = match pe.exception_data() { Some(data) => data, None => return Ok(()), }; for function_result in exception_data { let function = function_result.context(CfiErrorKind::BadDebugInfo)?; // Exception directories can contain zeroed out sections which need to be skipped. // Neither their start/end RVA nor the unwind info RVA is valid. if function == EMPTY_FUNCTION { continue; } // The minimal stack size is 8 for RIP let mut stack_size = 8; // Special handling for machine frames let mut machine_frame_offset = 0; let mut next_function = Some(function); while let Some(next) = next_function { let unwind_info = exception_data .get_unwind_info(next, sections) .context(CfiErrorKind::BadDebugInfo)?; for code_result in &unwind_info { let code = code_result.context(CfiErrorKind::BadDebugInfo)?; match code.operation { UnwindOperation::PushNonVolatile(_) => { stack_size += 8; } UnwindOperation::Alloc(size) => { stack_size += size; } UnwindOperation::PushMachineFrame(is_error) => { stack_size += if is_error { 48 } else { 40 }; machine_frame_offset = stack_size; } _ => { // All other codes do not modify RSP } } } next_function = unwind_info.chained_info; } writeln!( self.inner, "STACK CFI INIT {:x} {:x} .cfa: $rsp 8 + .ra: .cfa 8 - ^", function.begin_address, function.end_address - function.begin_address, ) .context(CfiErrorKind::WriteError)?; if machine_frame_offset > 0 { writeln!( self.inner, "STACK CFI {:x} .cfa: $rsp {} + $rsp: .cfa {} - ^ .ra: .cfa {} - ^", function.begin_address, stack_size, stack_size - machine_frame_offset + 24, // old RSP offset stack_size - machine_frame_offset + 48, // entire frame offset ) .context(CfiErrorKind::WriteError)? } else { writeln!( self.inner, "STACK CFI {:x} .cfa: $rsp {} +", function.begin_address, stack_size, ) .context(CfiErrorKind::WriteError)? } } Ok(()) } } impl<W: Write + Default> AsciiCfiWriter<W> { /// Extracts CFI from the given object and pipes it to a new writer instance. pub fn transform(object: &Object<'_>) -> Result<W, CfiError> { let mut writer = Default::default(); AsciiCfiWriter::new(&mut writer).process(object)?; Ok(writer) } } struct CfiCacheV1<'a> { byteview: ByteView<'a>, } impl<'a> CfiCacheV1<'a> { pub fn raw(&self) -> &[u8] { &self.byteview } } enum CfiCacheInner<'a> { V1(CfiCacheV1<'a>), } /// A cache file for call frame information (CFI). /// /// The default way to use this cache is to construct it from an `Object` and save it to a file. /// Then, load it from the file and pass it to the minidump processor. /// /// ```rust,no_run /// use std::fs::File; /// use symbolic_common::ByteView; /// use symbolic_debuginfo::Object; /// use symbolic_minidump::cfi::CfiCache; /// /// # fn main() -> Result<(), failure::Error> { /// let view = ByteView::open("/path/to/object")?; /// let object = Object::parse(&view)?; /// let cache = CfiCache::from_object(&object)?; /// cache.write_to(File::create("my.cficache")?)?; /// # Ok(()) /// # } /// ``` /// /// ```rust,no_run /// use symbolic_common::ByteView; /// use symbolic_minidump::cfi::CfiCache; /// /// # fn main() -> Result<(), failure::Error> { /// let view = ByteView::open("my.cficache")?; /// let cache = CfiCache::from_bytes(view)?; /// # Ok(()) /// # } /// ``` /// pub struct CfiCache<'a> { inner: CfiCacheInner<'a>, } impl CfiCache<'static> { /// Construct a CFI cache from an `Object`. pub fn from_object(object: &Object<'_>) -> Result<Self, CfiError> { let buffer = AsciiCfiWriter::transform(object)?; let byteview = ByteView::from_vec(buffer); let inner = CfiCacheInner::V1(CfiCacheV1 { byteview }); Ok(CfiCache { inner }) } } impl<'a> CfiCache<'a> { /// Load a symcache from a `ByteView`. pub fn from_bytes(byteview: ByteView<'a>) -> Result<Self, CfiError> { if byteview.len() == 0 || byteview.starts_with(b"STACK") { let inner = CfiCacheInner::V1(CfiCacheV1 { byteview }); return Ok(CfiCache { inner }); } Err(CfiErrorKind::BadFileMagic.into()) } /// Returns the cache file format version. pub fn version(&self) -> u32 { match self.inner { CfiCacheInner::V1(_) => 1, } } /// Returns whether this cache is up-to-date. pub fn is_latest(&self) -> bool { self.version() == CFICACHE_LATEST_VERSION } /// Returns the raw buffer of the cache file. pub fn as_slice(&self) -> &[u8] { match self.inner { CfiCacheInner::V1(ref v1) => v1.raw(), } } /// Writes the cache to the given writer. pub fn write_to<W: Write>(&self, mut writer: W) -> Result<(), io::Error> { io::copy(&mut self.as_slice(), &mut writer)?; Ok(()) } }
37.04799
100
0.565572
1c7be710c27075d73ce0ee0dcdbca7b118ae4ed8
1,161
use anyhow::Result; use async_graphql::{Context, Object}; use sql_builder::prelude::*; use sqlx::{query_as, sqlite::SqlitePool}; #[derive(sqlx::FromRow)] pub struct Entity { pub id: i32, pub name: String, parent_id: Option<i32>, } #[Object] impl Entity { async fn id(&self) -> &i32 { &self.id } async fn name(&self) -> &String { &self.name } async fn parent_id(&self) -> &Option<i32> { &self.parent_id } async fn parent<'a>(&self, ctx: &'a Context<'_>) -> Option<&'a Self> { let cache = ctx .data::<Vec<Entity>>() .expect("Couldn't get entity cache"); cache.iter().find(|e| Some(e.id) == self.parent_id) } } fn base_select() -> SqlBuilder { let fields = ["Z_ENT as id", "Z_NAME as name", "Z_SUPER as parent_id"]; let mut builder = SqlBuilder::select_from("Z_PRIMARYKEY"); builder.fields(&fields); builder } pub async fn entities(pool: &SqlitePool) -> Result<Vec<Entity>> { let select = base_select(); let records = query_as::<_, Entity>(select.sql()?.as_str()) .fetch_all(pool) .await?; Ok(records) }
23.22
75
0.584841
3982f6d6b72a93587dc3c2acaeae0f8dd3bcbdd7
2,915
use actix_web::{test, App}; use guardian_backend::api; use guardian_backend::data::v1::database_pool; use guardian_backend::models::v2::gurl::{Gurl, GurlRequest}; #[actix_rt::test] pub async fn get_gurl_most_recently_added_works() { dotenv::dotenv().ok(); let database_pool = database_pool::get("DATABASE_URL"); let mut guardian_service = test::init_service( App::new() .data(database_pool.clone()) .configure(api::v2::gurl::configure) .configure(api::v2::gurl_most_recently_added::configure), ) .await; let url_test_value: String = "api_v2_gurl_most_recently_added_works".to_string(); let liked_test_value: bool = false; let gurl_request_insert = GurlRequest { id: None, url: Some(url_test_value.to_string()), liked: Some(liked_test_value), }; let request_insert_one = test::TestRequest::post() .uri(api::v2::paths::GURL) .set_json(&gurl_request_insert) .to_request(); let _response_insert_one = test::call_service(&mut guardian_service, request_insert_one).await; let request_insert_two = test::TestRequest::post() .uri(api::v2::paths::GURL) .set_json(&gurl_request_insert) .to_request(); let _response_insert_two = test::call_service(&mut guardian_service, request_insert_two).await; let gurl_request_get = GurlRequest { id: None, url: Some(url_test_value.to_string()), liked: None, }; let request_get = test::TestRequest::get() .uri(api::v2::paths::GURL) .set_json(&gurl_request_get) .to_request(); let response_get = test::call_service(&mut guardian_service, request_get).await; let vector_of_gurls: Vec<Gurl> = test::read_body_json(response_get).await; assert_eq!(vector_of_gurls.len(), 2); let vector_of_ids: Vec<i32> = vector_of_gurls.iter().map(|gurl| gurl.id).collect(); let request_get_most_recently_added_gurl = test::TestRequest::get() .uri(api::v2::paths::GURL_MOST_RECENTLY_ADDED) .to_request(); let response_get_most_recently_added_gurl = test::call_service(&mut guardian_service, request_get_most_recently_added_gurl).await; let most_recently_added_gurl: Option<Gurl> = test::read_body_json(response_get_most_recently_added_gurl).await; assert!(most_recently_added_gurl.is_some()); assert_eq!(vector_of_ids[1], most_recently_added_gurl.unwrap().id); for gurl_id in vector_of_ids { let gurl_request_delete = GurlRequest { id: Some(gurl_id), url: None, liked: None, }; let request_delete = test::TestRequest::delete() .uri(api::v2::paths::GURL) .set_json(&gurl_request_delete) .to_request(); let _response_delete = test::call_service(&mut guardian_service, request_delete).await; } }
38.355263
99
0.672041
1ae11938527b0e32d63b28f5bae3064ea17dd0e4
1,080
table! { lists (id) { id -> Nullable<Integer>, user_id -> Integer, name -> Text, description -> Nullable<Text>, } } table! { lists_tasks (id) { id -> Nullable<Integer>, list_id -> Nullable<Integer>, task_id -> Integer, } } table! { tasks (id) { id -> Nullable<Integer>, name -> Text, user_id -> Integer, created_at -> Timestamp, updated_at -> Nullable<Timestamp>, description -> Nullable<Text>, due_date -> Nullable<Timestamp>, reminders -> Nullable<Text>, } } table! { users (id) { id -> Nullable<Integer>, created_at -> Timestamp, updated_at -> Nullable<Timestamp>, username -> Text, email -> Text, password -> Text, } } joinable!(lists -> users (user_id)); joinable!(lists_tasks -> lists (list_id)); joinable!(lists_tasks -> tasks (task_id)); joinable!(tasks -> users (user_id)); allow_tables_to_appear_in_same_query!( lists, lists_tasks, tasks, users, );
20.377358
42
0.549074
3a308be021746360e3faad2861998103c38542f5
1,894
// min-version: 1.44.0 // max-version: 1.54.0 // === LLDB TESTS ================================================================================== // lldb-command:run // lldb-command:print os_string // lldbg-check:[...]os_string = "A∆й中" [...] // lldb-command:print empty_os_string // lldbg-check:[...]empty_os_string = "" [...] // lldb-command:print c_string1 // lldbg-check:[...]c_string1 = "A∆й中" [...] // lldb-command:print path_buf // lldbg-check:[...]path_buf = "/a/b/∂" [...] // lldb-command:print empty_path_buf // lldbg-check:[...]empty_path_buf = "" [...] // lldb-command:print os_str1 // lldbg-check:[...]os_str1 = [...] "A∆й中" // lldb-command:print os_str2 // lldbg-check:[...]os_str2 = [...] "A∆й中" // lldb-command:print empty_os_str // lldbg-check:[...]empty_os_str = [...] "" // lldb-command:print path1 // lldbg-check:[...]path1 = [...] "/a/b/∂" // lldb-command:print empty_path // lldbg-check:[...]empty_path = [...] "" // lldb-command:print c_str1 // lldbg-check:[...]c_str1 = [...] "abcd" // === GDB TESTS ================================================================================== // gdb-command:run // gdb-command:print os_string // gdbg-check:[...]$1 = "A∆й中" // gdb-command:print empty_os_string // gdbg-check:[...]$2 = "" use std::ffi::{CStr, CString, OsStr, OsString}; use std::ops::DerefMut; use std::path::{Path, PathBuf}; fn main() { let mut os_string = OsString::from("A∆й中"); let empty_os_string = OsString::from(""); let c_string1 = CString::new("A∆й中").unwrap(); let path_buf = PathBuf::from("/a/b/∂"); let empty_path_buf = PathBuf::from(""); let os_str1 = OsStr::new("A∆й中"); let os_str2 = os_string.deref_mut(); let empty_os_str = OsStr::new(""); let path1 = Path::new("/a/b/∂"); let empty_path = Path::new(""); let c_str1 = CStr::from_bytes_with_nul(b"abcd\0").unwrap(); print!(""); // #break }
32.101695
100
0.550158
293db1bfa14ebfd22f438bd402eceab7d1c744ef
1,064
use crate::filters::{ IIRFilterStatePacket, IIRLowPassFilter, ModulatedFilter, ModulatedFilterStatePacket, StateVariableTPTFilter, StateVariableTPTFilterStatePacket, }; use cpal::SampleRate; use once_cell::sync::OnceCell; pub mod adsr; pub mod filters; pub mod lfo; // TODO: rather janky solution to avoid cyclic dependency with ravetable bin crate // rework a better option in the future. pub static SAMPLE_RATE: OnceCell<SampleRate> = OnceCell::new(); pub fn get_sample_rate() -> f32 { SAMPLE_RATE.get().unwrap().0 as f32 } pub fn set_effects_sample_rate(sample_rate: SampleRate) { SAMPLE_RATE.set(sample_rate).unwrap(); } pub enum Effect { ModulatedFilter(ModulatedFilter), IIRFilter(IIRLowPassFilter), // TODO: IIRFilter should be more than lowpass StateVariablePTPFilter(StateVariableTPTFilter), } #[derive(Clone, Copy, PartialEq, Debug)] pub enum EffectStatePacket { ModulatedFilter(ModulatedFilterStatePacket), IIRFilter(IIRFilterStatePacket), StateVariablePTPFilter(StateVariableTPTFilterStatePacket), }
29.555556
88
0.771617
e6c5a7341e8cb31e9e82c9c768f7a5eba8428714
1,345
use futures::{Future, Poll}; use svc; pub fn layer<L>(per_make: L) -> Layer<L> { Layer(per_make) } #[derive(Clone, Debug)] pub struct Layer<L>(L); #[derive(Clone, Debug)] pub struct PerMake<L, M> { inner: M, layer: L, } pub struct MakeFuture<L, F> { inner: F, layer: L, } impl<M, L: Clone> super::Layer<M> for Layer<L> { type Service = PerMake<L, M>; fn layer(&self, inner: M) -> Self::Service { PerMake { inner, layer: self.0.clone(), } } } impl<T, L, M> svc::Service<T> for PerMake<L, M> where L: super::Layer<M::Response> + Clone, M: svc::Service<T>, { type Response = L::Service; type Error = M::Error; type Future = MakeFuture<L, M::Future>; fn poll_ready(&mut self) -> Poll<(), Self::Error> { self.inner.poll_ready() } fn call(&mut self, target: T) -> Self::Future { let inner = self.inner.call(target); MakeFuture { layer: self.layer.clone(), inner, } } } impl<L, F> Future for MakeFuture<L, F> where L: super::Layer<F::Item>, F: Future, { type Item = L::Service; type Error = F::Error; fn poll(&mut self) -> Poll<Self::Item, Self::Error> { let inner = try_ready!(self.inner.poll()); Ok(self.layer.layer(inner).into()) } }
19.779412
57
0.545725
62e1abac74f4b9553e78b18941d47a271afb2cef
19,306
// By default, Windows creates an additional console window for our program. // // // This is silently ignored on non-windows systems. // See https://docs.microsoft.com/en-us/cpp/build/reference/subsystem?view=msvc-160 for details. #![windows_subsystem = "windows"] mod audio; mod custom_event; mod executor; mod locale; mod navigator; mod storage; mod task; mod ui; use crate::custom_event::RuffleEvent; use crate::executor::GlutinAsyncExecutor; use clap::Clap; use isahc::{config::RedirectPolicy, prelude::*, HttpClient}; use ruffle_core::{backend::audio::AudioBackend, config::Letterbox, Player}; use ruffle_render_wgpu::WgpuRenderBackend; use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Instant; use tinyfiledialogs::open_file_dialog; use url::Url; use ruffle_core::tag_utils::SwfMovie; use ruffle_render_wgpu::clap::{GraphicsBackend, PowerPreference}; use std::io::Read; use std::rc::Rc; use winit::dpi::{LogicalSize, PhysicalPosition}; use winit::event::{ ElementState, KeyboardInput, MouseButton, MouseScrollDelta, VirtualKeyCode, WindowEvent, }; use winit::event_loop::{ControlFlow, EventLoop}; use winit::window::{Fullscreen, Icon, WindowBuilder}; #[derive(Clap, Debug)] #[clap( name = "Ruffle", author, version = include_str!(concat!(env!("OUT_DIR"), "/version-info.txt")), )] struct Opt { /// Path to a flash movie (swf) to play #[clap(name = "FILE", parse(from_os_str))] input_path: Option<PathBuf>, /// A "flashvars" parameter to provide to the movie. /// This can be repeated multiple times, for example -Pkey=value -Pfoo=bar #[clap(short = 'P', number_of_values = 1)] parameters: Vec<String>, /// Type of graphics backend to use. Not all options may be supported by your current system. /// Default will attempt to pick the most supported graphics backend. #[clap( long, short, case_insensitive = true, default_value = "default", arg_enum )] graphics: GraphicsBackend, /// Power preference for the graphics device used. High power usage tends to prefer dedicated GPUs, /// whereas a low power usage tends prefer integrated GPUs. #[clap(long, short, case_insensitive = true, default_value = "high", arg_enum)] power: PowerPreference, /// Location to store a wgpu trace output #[clap(long, parse(from_os_str))] #[cfg(feature = "render_trace")] trace_path: Option<PathBuf>, /// (Optional) Proxy to use when loading movies via URL #[clap(long, case_insensitive = true)] proxy: Option<Url>, /// (Optional) Replace all embedded http URLs with https #[clap(long, case_insensitive = true, takes_value = false)] upgrade_to_https: bool, #[clap(long, case_insensitive = true, takes_value = false)] timedemo: bool, } #[cfg(feature = "render_trace")] fn trace_path(opt: &Opt) -> Option<&Path> { if let Some(path) = &opt.trace_path { let _ = std::fs::create_dir_all(path); Some(path) } else { None } } #[cfg(not(feature = "render_trace"))] fn trace_path(_opt: &Opt) -> Option<&Path> { None } fn main() { // When linked with the windows subsystem windows won't automatically attach // to the console of the parent process, so we do it explicitly. This fails // silently if the parent has no console. #[cfg(windows)] unsafe { use winapi::um::wincon::{AttachConsole, ATTACH_PARENT_PROCESS}; AttachConsole(ATTACH_PARENT_PROCESS); } env_logger::init(); let opt = Opt::parse(); let ret = if opt.timedemo { run_timedemo(opt) } else { run_player(opt) }; if let Err(e) = ret { eprintln!("Fatal error:\n{}", e); std::process::exit(-1); } // Without explicitly detaching the console cmd won't redraw it's prompt. #[cfg(windows)] unsafe { winapi::um::wincon::FreeConsole(); } } fn load_movie_from_path( movie_url: Url, proxy: Option<&Url>, ) -> Result<SwfMovie, Box<dyn std::error::Error>> { if movie_url.scheme() == "file" { if let Ok(path) = movie_url.to_file_path() { return SwfMovie::from_path(path); } } let proxy = proxy.and_then(|url| url.as_str().parse().ok()); let builder = HttpClient::builder() .proxy(proxy) .redirect_policy(RedirectPolicy::Follow); let client = builder.build()?; let res = client.get(movie_url.to_string())?; let mut buffer: Vec<u8> = Vec::new(); res.into_body().read_to_end(&mut buffer)?; SwfMovie::from_data(&buffer, Some(movie_url.to_string())) } fn set_movie_parameters(movie: &mut SwfMovie, parameters: &[String]) { for parameter in parameters { let mut split = parameter.splitn(2, '='); if let (Some(key), Some(value)) = (split.next(), split.next()) { movie.parameters_mut().insert(key, value.to_string(), true); } else { movie .parameters_mut() .insert(&parameter, "".to_string(), true); } } } fn run_player(opt: Opt) -> Result<(), Box<dyn std::error::Error>> { let movie_url = match &opt.input_path { Some(path) => { if path.exists() { let absolute_path = path.canonicalize().unwrap_or_else(|_| path.to_owned()); Url::from_file_path(absolute_path) .map_err(|_| "Path must be absolute and cannot be a URL")? } else { Url::parse(path.to_str().unwrap_or_default()) .map_err(|_| "Input path is not a file and could not be parsed as a URL.")? } } None => { let result = open_file_dialog("Load a Flash File", "", Some((&["*.swf"], ".swf"))); let selected = match result { Some(file_path) => PathBuf::from(file_path), None => return Ok(()), }; let absolute_path = selected .canonicalize() .unwrap_or_else(|_| selected.to_owned()); Url::from_file_path(absolute_path) .map_err(|_| "Path must be absolute and cannot be a URL")? } }; let mut movie = load_movie_from_path(movie_url.to_owned(), opt.proxy.as_ref())?; set_movie_parameters(&mut movie, &opt.parameters); let movie_size = LogicalSize::new(movie.width(), movie.height()); let icon_bytes = include_bytes!("../assets/favicon-32.rgba"); let icon = Icon::from_rgba(icon_bytes.to_vec(), 32, 32)?; let event_loop: EventLoop<RuffleEvent> = EventLoop::with_user_event(); let window_title = movie_url .path_segments() .and_then(|segments| segments.last()) .unwrap_or_else(|| movie_url.as_str()); let window = Rc::new( WindowBuilder::new() .with_title(format!("Ruffle - {}", window_title)) .with_window_icon(Some(icon)) .with_max_inner_size(LogicalSize::new(i16::MAX, i16::MAX)) .build(&event_loop)?, ); window.set_inner_size(movie_size); let viewport_size = window.inner_size(); let renderer = Box::new(WgpuRenderBackend::for_window( window.as_ref(), (viewport_size.width, viewport_size.height), opt.graphics.into(), opt.power.into(), trace_path(&opt), )?); let audio: Box<dyn AudioBackend> = match audio::CpalAudioBackend::new() { Ok(audio) => Box::new(audio), Err(e) => { log::error!("Unable to create audio device: {}", e); Box::new(ruffle_core::backend::audio::NullAudioBackend::new()) } }; let (executor, chan) = GlutinAsyncExecutor::new(event_loop.create_proxy()); let navigator = Box::new(navigator::ExternalNavigatorBackend::new( movie_url.clone(), chan, event_loop.create_proxy(), opt.proxy, opt.upgrade_to_https, )); //TODO: actually implement this backend type let storage = Box::new(storage::DiskStorageBackend::new()); let locale = Box::new(locale::DesktopLocaleBackend::new()); let log = Box::new(ruffle_core::backend::log::NullLogBackend::new()); let ui = Box::new(ui::DesktopUiBackend::new(window.clone())); let player = Player::new(renderer, audio, navigator, storage, locale, log, ui)?; { let mut player = player.lock().unwrap(); player.set_root_movie(Arc::new(movie)); player.set_is_playing(true); // Desktop player will auto-play. player.set_letterbox(Letterbox::On); player.set_viewport_dimensions(viewport_size.width, viewport_size.height); } let mut mouse_pos = PhysicalPosition::new(0.0, 0.0); let mut time = Instant::now(); let mut next_frame_time = Instant::now(); let mut minimized = false; let mut fullscreen_down = false; loop { // Poll UI events event_loop.run(move |event, _window_target, control_flow| { // Allow KeyboardInput.modifiers (ModifiersChanged event not functional yet). #[allow(deprecated)] match event { winit::event::Event::LoopDestroyed => { player.lock().unwrap().flush_shared_objects(); return; } // Core loop winit::event::Event::MainEventsCleared => { let new_time = Instant::now(); let dt = new_time.duration_since(time).as_micros(); if dt > 0 { time = new_time; let mut player_lock = player.lock().unwrap(); player_lock.tick(dt as f64 / 1000.0); next_frame_time = new_time + player_lock.time_til_next_frame(); if player_lock.needs_render() { window.request_redraw(); } } } // Render winit::event::Event::RedrawRequested(_) => { // Don't render when minimized to avoid potential swap chain errors in `wgpu`. if !minimized { player.lock().unwrap().render(); } } winit::event::Event::WindowEvent { event, .. } => match event { WindowEvent::Resized(size) => { // TODO: Change this when winit adds a `Window::minimzed` or `WindowEvent::Minimize`. minimized = size.width == 0 && size.height == 0; let mut player_lock = player.lock().unwrap(); player_lock.set_viewport_dimensions(size.width, size.height); player_lock .renderer_mut() .set_viewport_dimensions(size.width, size.height); window.request_redraw(); } WindowEvent::CursorMoved { position, .. } => { let mut player_lock = player.lock().unwrap(); mouse_pos = position; let event = ruffle_core::PlayerEvent::MouseMove { x: position.x, y: position.y, }; player_lock.handle_event(event); if player_lock.needs_render() { window.request_redraw(); } } WindowEvent::MouseInput { button: MouseButton::Left, state: pressed, .. } => { let mut player_lock = player.lock().unwrap(); let event = if pressed == ElementState::Pressed { ruffle_core::PlayerEvent::MouseDown { x: mouse_pos.x, y: mouse_pos.y, } } else { ruffle_core::PlayerEvent::MouseUp { x: mouse_pos.x, y: mouse_pos.y, } }; player_lock.handle_event(event); if player_lock.needs_render() { window.request_redraw(); } } WindowEvent::MouseWheel { delta, .. } => { use ruffle_core::events::MouseWheelDelta; let mut player_lock = player.lock().unwrap(); let delta = match delta { MouseScrollDelta::LineDelta(_, dy) => MouseWheelDelta::Lines(dy.into()), MouseScrollDelta::PixelDelta(pos) => MouseWheelDelta::Pixels(pos.y), }; let event = ruffle_core::PlayerEvent::MouseWheel { delta }; player_lock.handle_event(event); if player_lock.needs_render() { window.request_redraw(); } } WindowEvent::CursorLeft { .. } => { let mut player_lock = player.lock().unwrap(); player_lock.handle_event(ruffle_core::PlayerEvent::MouseLeft); if player_lock.needs_render() { window.request_redraw(); } } WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit, WindowEvent::KeyboardInput { input: KeyboardInput { state: ElementState::Pressed, virtual_keycode: Some(VirtualKeyCode::Return), modifiers, // TODO: Use WindowEvent::ModifiersChanged. .. }, .. } if modifiers.alt() => { if !fullscreen_down { window.set_fullscreen(match window.fullscreen() { None => Some(Fullscreen::Borderless(None)), Some(_) => None, }); } fullscreen_down = true; } WindowEvent::KeyboardInput { input: KeyboardInput { state: ElementState::Released, virtual_keycode: Some(VirtualKeyCode::Return), .. }, .. } if fullscreen_down => { fullscreen_down = false; } WindowEvent::KeyboardInput { input: KeyboardInput { state: ElementState::Pressed, virtual_keycode: Some(VirtualKeyCode::Escape), .. }, .. } => { window.set_fullscreen(None); } WindowEvent::KeyboardInput { .. } | WindowEvent::ReceivedCharacter(_) => { let mut player_lock = player.lock().unwrap(); if let Some(event) = player_lock .ui_mut() .downcast_mut::<ui::DesktopUiBackend>() .unwrap() .handle_event(event) { player_lock.handle_event(event); if player_lock.needs_render() { window.request_redraw(); } } } _ => (), }, winit::event::Event::UserEvent(RuffleEvent::TaskPoll) => executor .lock() .expect("active executor reference") .poll_all(), _ => (), } // After polling events, sleep the event loop until the next event or the next frame. if *control_flow != ControlFlow::Exit { *control_flow = ControlFlow::WaitUntil(next_frame_time); } }); } } fn run_timedemo(opt: Opt) -> Result<(), Box<dyn std::error::Error>> { let movie_url = match &opt.input_path { Some(path) => { if path.exists() { let absolute_path = path.canonicalize().unwrap_or_else(|_| path.to_owned()); Url::from_file_path(absolute_path) .map_err(|_| "Path must be absolute and cannot be a URL")? } else { Url::parse(path.to_str().unwrap_or_default()) .map_err(|_| "Input path is not a file and could not be parsed as a URL.")? } } None => return Err("Input file necessary for timedemo".into()), }; let mut movie = load_movie_from_path(movie_url, opt.proxy.as_ref())?; set_movie_parameters(&mut movie, &opt.parameters); let movie_frames = Some(movie.header().num_frames); let viewport_width = 1920; let viewport_height = 1080; let renderer = Box::new(WgpuRenderBackend::for_offscreen( (viewport_width, viewport_height), opt.graphics.into(), opt.power.into(), trace_path(&opt), )?); let audio: Box<dyn AudioBackend> = Box::new(ruffle_core::backend::audio::NullAudioBackend::new()); let navigator = Box::new(ruffle_core::backend::navigator::NullNavigatorBackend::new()); let storage = Box::new(ruffle_core::backend::storage::MemoryStorageBackend::default()); let locale = Box::new(locale::DesktopLocaleBackend::new()); let log = Box::new(ruffle_core::backend::log::NullLogBackend::new()); let ui = Box::new(ruffle_core::backend::ui::NullUiBackend::new()); let player = Player::new(renderer, audio, navigator, storage, locale, log, ui)?; player.lock().unwrap().set_root_movie(Arc::new(movie)); player.lock().unwrap().set_is_playing(true); player .lock() .unwrap() .set_viewport_dimensions(viewport_width, viewport_height); println!("Running {}...", opt.input_path.unwrap().to_string_lossy(),); let start = Instant::now(); let mut num_frames = 0; const MAX_FRAMES: u32 = 5000; let mut player = player.lock().unwrap(); while num_frames < MAX_FRAMES && player.current_frame() < movie_frames { player.run_frame(); player.render(); num_frames += 1; } let end = Instant::now(); let duration = end.duration_since(start); println!("Ran {} frames in {}s.", num_frames, duration.as_secs_f32()); Ok(()) }
39.080972
109
0.52507
87659bdead9ee2bed8c07894bf78bb4fcd043c69
4,506
#![cfg_attr(not(feature = "std"), no_std)] use frame_support::{decl_module, decl_storage, decl_event, dispatch::DispatchResult, ensure}; use system::ensure_signed; use sp_std::{vec, vec::Vec}; pub trait Trait: system::Trait { type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>; } decl_storage! { trait Store for Module<T: Trait> as VecMapStorage { Members get(fn members): Vec<T::AccountId>; } } decl_module! { pub struct Module<T: Trait> for enum Call where origin: T::Origin { fn deposit_event() = default; pub fn add_member(origin) -> DispatchResult { let member = ensure_signed(origin)?; ensure!(!Self::is_member(&member), "must not be a member to be added"); <Members<T>>::append(&mut vec![member.clone()])?; Self::deposit_event(RawEvent::MemberAdded(member)); Ok(()) } pub fn remove_member(origin) -> DispatchResult { let member = ensure_signed(origin)?; ensure!(Self::is_member(&member), "must be a member in order to leave"); <Members<T>>::mutate(|v| v.retain(|i| i != &member)); Self::deposit_event(RawEvent::MemberRemoved(member)); Ok(()) } } } impl <T: Trait> Module<T> { pub fn is_member(who: &T::AccountId) -> bool { <Members<T>>::get().contains(who) } } decl_event!( pub enum Event<T> where AccountId = <T as system::Trait>::AccountId, { MemberAdded(AccountId), MemberRemoved(AccountId), } ); #[cfg(test)] mod tests { use super::{Module, Trait, RawEvent}; use sp_core::H256; use frame_support::{impl_outer_origin, impl_outer_event, assert_ok, assert_err, parameter_types, weights::Weight}; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, testing::Header, Perbill, }; impl_outer_origin! { pub enum Origin for TestRuntime {} } #[derive(Clone, Eq, PartialEq)] pub struct TestRuntime; parameter_types! { pub const BlockHashCount: u64 = 250; pub const MaximumBlockWeight: Weight = 1024; pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); } impl system::Trait for TestRuntime { type Origin = Origin; type Call = (); type Index = u64; type BlockNumber = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup<Self::AccountId>; type Header = Header; type Event = TestEvent; type BlockHashCount = BlockHashCount; type MaximumBlockWeight = MaximumBlockWeight; type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type ModuleToIndex = (); } mod vec_value_event { pub use crate::vec_set::Event; } impl_outer_event! { pub enum TestEvent for TestRuntime { vec_value_event<T>, } } impl Trait for TestRuntime { type Event = TestEvent; } type System = system::Module<TestRuntime>; type VecValueModule = Module<TestRuntime>; pub struct ExtBuilder; impl ExtBuilder { pub fn build() -> sp_io::TestExternalities { let storage = system::GenesisConfig::default() .build_storage::<TestRuntime>() .unwrap(); sp_io::TestExternalities::from(storage) } } #[test] fn add_member_err_works() { ExtBuilder::build().execute_with(||{ assert_ok!(VecValueModule::add_member(Origin::signed(1))); assert_err!(VecValueModule::add_member(Origin::signed(1)), "must not be a member to be added"); }) } #[test] fn add_member_works() { ExtBuilder::build().execute_with(||{ assert_ok!(VecValueModule::add_member(Origin::signed(1))); let expected_event = TestEvent::vec_value_event( RawEvent::MemberAdded(1), ); assert!(System::events().iter().any(|a| a.event == expected_event)); assert_eq!(VecValueModule::members(), vec![1]); }) } #[test] fn remove_member_err_works() { ExtBuilder::build().execute_with(||{ assert_err!( VecValueModule::remove_member(Origin::signed(2)), "must be a member in order to leave" ) }) } #[test] fn remove_member_works() { ExtBuilder::build().execute_with(|| { assert_ok!(VecValueModule::add_member(Origin::signed(1))); assert_ok!(VecValueModule::remove_member(Origin::signed(1))); assert_ok!(VecValueModule::add_member(Origin::signed(2))); let expected_event = TestEvent::vec_value_event(RawEvent::MemberRemoved(1),); assert!(System::events().iter().any(|a| a.event == expected_event)); assert_eq!(VecValueModule::members(), vec![2]); }) } }
26.505882
115
0.675766
c1dd01c58adf10245a668a57efd498b11e3679df
30,771
use super::mobiledevice_sys::*; use super::xcode; use crate::device::make_remote_app_with_name; use crate::errors::*; use crate::ios::IosPlatform; use crate::project::Project; use crate::Build; use crate::BuildBundle; use crate::Device; use crate::DeviceCompatibility; use crate::Runnable; use core_foundation::array::CFArray; use core_foundation::base::{CFType, CFTypeRef, ItemRef, TCFType}; use core_foundation::boolean::CFBoolean; use core_foundation::data::CFData; use core_foundation::dictionary::{CFDictionary, CFDictionaryRef}; use core_foundation::number::CFNumber; use core_foundation::string::CFString; use core_foundation_sys::number::kCFBooleanTrue; use libc::*; use std::collections::HashMap; use std::fmt; use std::fmt::Display; use std::fmt::Formatter; use std::fs; use std::mem; use std::path::Path; use std::path::PathBuf; use std::process; use std::ptr; use std::thread; use std::time::Duration; #[derive(Clone, Debug)] pub struct IosDevice { pub id: String, pub name: String, ptr: *const am_device, arch_cpu: &'static str, rustc_triple: String, } #[derive(Clone, Debug)] pub struct IosSimDevice { pub id: String, pub name: String, pub os: String, } unsafe impl Send for IosDevice {} impl IosDevice { pub fn new(ptr: *const am_device) -> Result<IosDevice> { let _session = ensure_session(ptr)?; let name = match device_read_value(ptr, "DeviceName")? { Some(Value::String(s)) => s, x => bail!("DeviceName should have been a string, was {:?}", x), }; let cpu = match device_read_value(ptr, "CPUArchitecture")? { Some(Value::String(ref v)) if v == "arm64" || v == "arm64e" => "aarch64", _ => "armv7", }; let id = if let Value::String(id) = rustify(unsafe { AMDeviceCopyDeviceIdentifier(ptr) })? { id } else { bail!("unexpected id format") }; Ok(IosDevice { ptr: ptr, name: name, id: id, arch_cpu: cpu.into(), rustc_triple: format!("{}-apple-ios", cpu), }) } fn make_app( &self, project: &Project, build: &Build, runnable: &Runnable, ) -> Result<BuildBundle> { let signing = xcode::look_for_signature_settings(&self.id)? .pop() .ok_or_else(|| anyhow!("no signing identity found"))?; let app_id = signing .name .split(" ") .last() .ok_or_else(|| anyhow!("no app id ?"))?; let build_bundle = make_ios_app(project, build, runnable, &app_id)?; super::xcode::sign_app(&build_bundle, &signing)?; Ok(build_bundle) } fn install_app( &self, project: &Project, build: &Build, runnable: &Runnable, ) -> Result<BuildBundle> { let build_bundle = self.make_app(project, build, runnable)?; install_app(self.ptr, &build_bundle.bundle_dir)?; Ok(build_bundle) } } impl Device for IosDevice { fn clean_app(&self, _build_bundle: &BuildBundle) -> Result<()> { unimplemented!() } fn debug_app( &self, project: &Project, build: &Build, args: &[&str], envs: &[&str], ) -> Result<BuildBundle> { let runnable = build .runnables .iter() .next() .ok_or_else(|| anyhow!("No executable compiled"))?; let build_bundle = self.install_app(project, build, runnable)?; let lldb_proxy = self.start_remote_lldb()?; run_remote( self.ptr, &lldb_proxy, &build_bundle.bundle_dir, args, envs, true, )?; Ok(build_bundle) } fn id(&self) -> &str { &self.id } fn name(&self) -> &str { &self.name } fn run_app( &self, project: &Project, build: &Build, args: &[&str], envs: &[&str], ) -> Result<Vec<BuildBundle>> { let mut build_bundles = vec![]; for runnable in &build.runnables { let build_bundle = self.install_app(&project, &build, &runnable)?; let lldb_proxy = self.start_remote_lldb()?; run_remote( self.ptr, &lldb_proxy, &build_bundle.bundle_dir, args, envs, false, )?; build_bundles.push(build_bundle) } Ok(build_bundles) } fn start_remote_lldb(&self) -> Result<String> { let _ = ensure_session(self.ptr); let fd = start_remote_debug_server(self.ptr)?; debug!("start local lldb proxy"); let proxy = start_lldb_proxy(fd)?; let url = format!("localhost:{}", proxy); debug!("started lldb proxy {}", url); Ok(url) } } impl IosSimDevice { fn install_app( &self, project: &Project, build: &Build, runnable: &Runnable, ) -> Result<BuildBundle> { let build_bundle = IosSimDevice::make_app(project, build, runnable)?; let _ = process::Command::new("xcrun") .args(&["simctl", "uninstall", &self.id, "Dinghy"]) .status()?; let stat = process::Command::new("xcrun") .args(&[ "simctl", "install", &self.id, build_bundle .bundle_dir .to_str() .ok_or_else(|| anyhow!("conversion to string"))?, ]) .status()?; if stat.success() { Ok(build_bundle) } else { bail!( "Failed to install {} for {}", runnable.exe.display(), self.id ) } } fn make_app(project: &Project, build: &Build, runnable: &Runnable) -> Result<BuildBundle> { make_ios_app(project, build, runnable, "Dinghy") } } impl Device for IosSimDevice { fn clean_app(&self, _build_bundle: &BuildBundle) -> Result<()> { unimplemented!() } fn debug_app( &self, project: &Project, build: &Build, args: &[&str], envs: &[&str], ) -> Result<BuildBundle> { let runnable = build .runnables .iter() .next() .ok_or_else(|| anyhow!("No executable compiled"))?; let build_bundle = self.install_app(project, build, runnable)?; let install_path = String::from_utf8( process::Command::new("xcrun") .args(&["simctl", "get_app_container", &self.id, "Dinghy"]) .output()? .stdout, )?; launch_lldb_simulator(&self, &install_path, args, envs, true)?; Ok(build_bundle) } fn id(&self) -> &str { &self.id } fn name(&self) -> &str { &self.name } fn run_app( &self, project: &Project, build: &Build, args: &[&str], envs: &[&str], ) -> Result<Vec<BuildBundle>> { let mut build_bundles = vec![]; for runnable in &build.runnables { let build_bundle = self.install_app(&project, &build, &runnable)?; launch_app(&self, args, envs)?; build_bundles.push(build_bundle); } Ok(build_bundles) } fn start_remote_lldb(&self) -> Result<String> { unimplemented!() } } impl Display for IosDevice { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { Ok(fmt.write_str( format!( "IosDevice {{ \"id\": \"{}\", \"name\": {}, \"arch_cpu\": {} }}", self.id, self.name, self.arch_cpu ) .as_str(), )?) } } impl Display for IosSimDevice { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { Ok(fmt.write_str( format!( "IosSimDevice {{ \"id\": \"{}\", \"name\": {}, \"os\": {} }}", self.id, self.name, self.os ) .as_str(), )?) } } impl DeviceCompatibility for IosDevice { fn is_compatible_with_ios_platform(&self, platform: &IosPlatform) -> bool { if platform.sim { return false; } if platform.toolchain.rustc_triple == self.rustc_triple.as_str() { return true; } return false; } } impl DeviceCompatibility for IosSimDevice { fn is_compatible_with_ios_platform(&self, platform: &IosPlatform) -> bool { platform.sim && ( platform.toolchain.rustc_triple == "x86_64-apple-ios" || platform.toolchain.rustc_triple == "aarch64-apple-ios-sim" ) } } #[derive(Clone, Debug)] enum Value { String(String), Data(Vec<u8>), I64(i64), Boolean(bool), } fn mk_result(rv: i32) -> Result<()> { if rv as u32 == 0xe80000e2 { bail!("error: Device is locked. ({:x})", rv) } else if rv as u32 == 0xe80000be { bail!("error: 0xe80000be, kAMDMismatchedApplicationIdentifierEntitlementError: This application's application-identifier entitlement does not match that of the installed application. These values must match for an upgrade to be allowed. Help: check that the xcode project you created has \"Dinghy\" as Project Name, and make the prefix (Organisation identifier) something reasonably unique.") } else if rv as u32 == 0xe8000087 { bail!("error: 0xe8000087, Architecture mismatch") } else if rv as u32 == 0xe8008015 { bail!("error: 0xe8008015, A valid provisioning profile for this executable was not found.") } else if rv as u32 == 0xe8008016 { bail!("error: 0xe8008016, The executable was signed with invalid entitlements.") } else if rv as u32 == 0xe8008022 { bail!( "error: 0xe8000022, kAMDInvalidServiceError. (This one is relatively hard to diagnose. Try erasing the Dinghy app from the phone, rebooting the device, the computer, check for ios and xcode updates.)", ) } else if rv as u32 == 0xe800007f { bail!("error: e800007f, The device OS version is too low.") } else if rv as u32 == 0xe8000007 { bail!("error: e8000007: Invalid argument.") } else if rv != 0 { bail!("error: {:x}", rv) } else { Ok(()) } } fn rustify(raw: CFTypeRef) -> Result<Value> { unsafe { let cftype: CFType = TCFType::wrap_under_get_rule(mem::transmute(raw)); if cftype.type_of() == CFString::type_id() { let value: CFString = TCFType::wrap_under_get_rule(mem::transmute(raw)); return Ok(Value::String(value.to_string())); } if cftype.type_of() == CFData::type_id() { let value: CFData = TCFType::wrap_under_get_rule(mem::transmute(raw)); return Ok(Value::Data(value.bytes().to_vec())); } if cftype.type_of() == CFNumber::type_id() { let value: CFNumber = TCFType::wrap_under_get_rule(mem::transmute(raw)); if let Some(i) = value.to_i64() { return Ok(Value::I64(i)); } } if cftype.type_of() == CFBoolean::type_id() { return Ok(Value::Boolean(raw == mem::transmute(kCFBooleanTrue))); } cftype.show(); bail!("unknown value") } } fn device_read_value(dev: *const am_device, key: &str) -> Result<Option<Value>> { unsafe { let key = CFString::new(key); let raw = AMDeviceCopyValue(dev, ptr::null(), key.as_concrete_TypeRef()); if raw.is_null() { return Ok(None); } Ok(Some(rustify(raw)?)) } } fn xcode_dev_path() -> Result<PathBuf> { use std::process::Command; let command = Command::new("xcode-select").arg("-print-path").output()?; Ok(String::from_utf8(command.stdout)?.trim().into()) } fn device_support_path(dev: *const am_device) -> Result<PathBuf> { let os_version = device_read_value(dev, "ProductVersion")? .ok_or_else(|| anyhow!("Could not get OS version"))?; if let Value::String(v) = os_version { platform_support_path("iPhoneOS.platform", &v) } else { bail!( "expected ProductVersion to be a String, found {:?}", os_version ) } } fn platform_support_path(platform: &str, os_version: &str) -> Result<PathBuf> { let prefix = xcode_dev_path()? .join("Platforms") .join(platform) .join("DeviceSupport"); debug!( "Looking for device support directory in {:?} for iOS version {:?}", prefix, os_version ); let two_token_version: String = os_version .split(".") .take(2) .collect::<Vec<_>>() .join(".") .into(); for directory in fs::read_dir(&prefix)? { let directory = directory?; let last = directory .file_name() .into_string() .map_err(|e| anyhow!("Could not parse {:?}", e))?; if last.starts_with(&two_token_version) { debug!("Picked {:?}", last); return Ok(prefix.join(directory.path())); } } bail!( "No device support directory for iOS version {} in {:?}. Time for an XCode \ update?", two_token_version, prefix ) } extern "C" fn mount_callback(_dict: CFDictionaryRef, _arg: *mut libc::c_void) {} fn mount_developer_image(dev: *const am_device) -> Result<()> { unsafe { let _session = ensure_session(dev); let ds_path = device_support_path(dev)?; let image_path = ds_path.join("DeveloperDiskImage.dmg"); debug!("Developer image path: {:?}", image_path); let sig_image_path = ds_path.join("DeveloperDiskImage.dmg.signature"); let sig = fs::read(sig_image_path)?; let sig = CFData::from_buffer(&sig); let options = [ ( CFString::from_static_string("ImageType"), CFString::from_static_string("Developer").as_CFType(), ), ( CFString::from_static_string("ImageSignature"), sig.as_CFType(), ), ]; let options = CFDictionary::from_CFType_pairs(&options); let r = AMDeviceMountImage( dev, CFString::new(image_path.to_str().unwrap()).as_concrete_TypeRef(), options.as_concrete_TypeRef(), mount_callback, 0, ); debug!("AMDeviceMountImage returns: {:x}", r); if r as u32 == 0xe8000076 { debug!("Error, already mounted, going on"); return Ok(()); } mk_result(r)?; Ok(()) } } fn make_ios_app( project: &Project, build: &Build, runnable: &Runnable, app_id: &str, ) -> Result<BuildBundle> { use crate::project; let build_bundle = make_remote_app_with_name(project, build, runnable, Some("Dinghy.app"))?; project::rec_copy(&runnable.exe, build_bundle.bundle_dir.join("Dinghy"), false)?; let magic = process::Command::new("file") .arg( runnable .exe .to_str() .ok_or_else(|| anyhow!("path conversion to string: {:?}", runnable.exe))?, ) .output()?; let magic = String::from_utf8(magic.stdout)?; let target = magic .split(" ") .last() .ok_or_else(|| anyhow!("empty magic"))?; xcode::add_plist_to_app(&build_bundle, target, app_id)?; Ok(build_bundle) } struct Session(*const am_device); fn ensure_session(dev: *const am_device) -> Result<Session> { unsafe { mk_result(AMDeviceConnect(dev))?; if AMDeviceIsPaired(dev) == 0 { bail!("lost pairing") }; mk_result(AMDeviceValidatePairing(dev))?; let rv = AMDeviceStartSession(dev); // kAMDSessionActiveError if rv as u32 == 0xe800001d { Ok(Session(::std::ptr::null())) } else { mk_result(rv)?; Ok(Session(dev)) } } } impl Drop for Session { fn drop(&mut self) { unsafe { if !self.0.is_null() { if let Err(e) = mk_result(AMDeviceStopSession(self.0)) { debug!("Error closing session {:?}", e); } if let Err(e) = mk_result(AMDeviceDisconnect(self.0)) { error!("Error disconnecting {:?}", e); } } } } } pub fn install_app<P: AsRef<Path>>(dev: *const am_device, app: P) -> Result<()> { unsafe { let _session = ensure_session(dev)?; let path = app .as_ref() .to_str() .ok_or_else(|| anyhow!("failure to convert {:?}", app.as_ref()))?; let url = ::core_foundation::url::CFURL::from_file_system_path(CFString::new(path), 0, true); let options = [( CFString::from_static_string("PackageType"), CFString::from_static_string("Developer").as_CFType(), )]; let options = CFDictionary::from_CFType_pairs(&options); mk_result(AMDeviceSecureTransferPath( 0, dev, url.as_concrete_TypeRef(), options.as_concrete_TypeRef(), ptr::null(), ptr::null(), ))?; mk_result(AMDeviceSecureInstallApplication( 0, dev, url.as_concrete_TypeRef(), options.as_concrete_TypeRef(), ptr::null(), ptr::null(), ))?; } Ok(()) } fn start_remote_debug_server(dev: *const am_device) -> Result<c_int> { unsafe { debug!("mount developer image"); mount_developer_image(dev)?; debug!("start debugserver on phone"); let _session = ensure_session(dev)?; let mut handle: *const c_void = std::ptr::null(); mk_result(AMDeviceSecureStartService( dev, CFString::from_static_string("com.apple.debugserver").as_concrete_TypeRef(), ptr::null_mut(), &mut handle, ))?; debug!("debug server running"); let fd = AMDServiceConnectionGetSocket(handle); Ok(fd) } } fn start_lldb_proxy(fd: c_int) -> Result<u16> { use std::io::{Read, Write}; use std::net::{TcpListener, TcpStream}; use std::os::unix::io::FromRawFd; let device = unsafe { TcpStream::from_raw_fd(fd) }; let proxy = TcpListener::bind("127.0.0.1:0")?; let addr = proxy.local_addr()?; device.set_nonblocking(true)?; thread::spawn(move || { fn server(proxy: TcpListener, mut device: TcpStream) -> Result<()> { for stream in proxy.incoming() { let mut stream = stream.expect("Failure while accepting connection"); stream.set_nonblocking(true)?; let mut buffer = [0; 16384]; loop { if let Ok(n) = device.read(&mut buffer) { if n == 0 { break; } stream.write_all(&buffer[0..n])?; } else if let Ok(n) = stream.read(&mut buffer) { if n == 0 { break; } device.write_all(&buffer[0..n])?; } else { thread::sleep(Duration::new(0, 100)); } } } Ok(()) } server(proxy, device).unwrap(); }); Ok(addr.port()) } fn launch_lldb_device<P: AsRef<Path>, P2: AsRef<Path>>( dev: *const am_device, proxy: &str, local: P, remote: P2, args: &[&str], envs: &[&str], debugger: bool, ) -> Result<()> { use std::io::Write; use std::process::Command; let _session = ensure_session(dev); let dir = ::tempdir::TempDir::new("mobiledevice-rs-lldb")?; let tmppath = dir.path(); let lldb_script_filename = tmppath.join("lldb-script"); let sysroot = device_support_path(dev)? .to_str() .ok_or_else(|| anyhow!("could not read sysroot"))? .to_owned(); { let python_lldb_support = tmppath.join("helpers.py"); let helper_py = include_str!("helpers.py"); let helper_py = helper_py.replace("ENV_VAR_PLACEHOLDER", &envs.join("\", \"")); fs::File::create(&python_lldb_support)?.write_fmt(format_args!("{}", &helper_py))?; let mut script = fs::File::create(&lldb_script_filename)?; writeln!(script, "platform select remote-ios --sysroot '{}'", sysroot)?; writeln!( script, "target create {}", local .as_ref() .to_str() .ok_or_else(|| anyhow!("untranslatable path"))? )?; writeln!(script, "script pass")?; writeln!(script, "command script import {:?}", python_lldb_support)?; writeln!( script, "command script add -f helpers.set_remote_path set_remote_path" )?; writeln!( script, "command script add -f helpers.connect_command connect" )?; writeln!( script, "command script add -s synchronous -f helpers.start start" )?; writeln!(script, "connect connect://{}", proxy)?; writeln!( script, "set_remote_path {}", remote.as_ref().to_str().unwrap() )?; if !debugger { writeln!(script, "start {}", args.join(" "))?; writeln!(script, "quit")?; } } let stat = Command::new("lldb") .arg("-Q") .arg("-s") .arg(lldb_script_filename) .status()?; if stat.success() { Ok(()) } else { bail!("LLDB returned error code {:?}", stat.code()) } } fn launch_app(dev: &IosSimDevice, app_args: &[&str], _envs: &[&str]) -> Result<()> { use std::io::Write; let dir = ::tempdir::TempDir::new("mobiledevice-rs-lldb")?; let tmppath = dir.path(); let mut install_path = String::from_utf8( process::Command::new("xcrun") .args(&["simctl", "get_app_container", &dev.id, "Dinghy"]) .output()? .stdout, )?; install_path.pop(); let stdout = Path::new(&install_path) .join("stdout") .to_string_lossy() .into_owned(); let stdout_param = &format!("--stdout={}", stdout); let mut xcrun_args: Vec<&str> = vec!["simctl", "launch", "-w", stdout_param, &dev.id, "Dinghy"]; xcrun_args.extend(app_args); debug!("Launching app via xcrun using args: {:?}", xcrun_args); let launch_output = process::Command::new("xcrun").args(&xcrun_args).output()?; let launch_output = String::from_utf8_lossy(&launch_output.stdout); // Output from the launch command should be "Dinghy: $PID" which is after the 8th character. let dinghy_pid = launch_output.split_at(8).1; // Attaching to the processes needs to be done in a script, not a commandline parameter or // lldb will say "no simulators found". let lldb_script_filename = tmppath.join("lldb-script"); let mut script = fs::File::create(&lldb_script_filename)?; write!(script, "attach {}\n", dinghy_pid)?; write!(script, "continue\n")?; write!(script, "quit\n")?; let output = process::Command::new("lldb") .arg("") .arg("-s") .arg(lldb_script_filename) .output()?; let test_contents = std::fs::read_to_string(stdout)?; println!("{}", test_contents); let output: String = String::from_utf8_lossy(&output.stdout).to_string(); debug!("lldb script: \n{}", output); // The stdout from lldb is something like: // // (lldb) attach 34163 // Process 34163 stopped // * thread #1, stop reason = signal SIGSTOP // frame #0: 0x00000001019cd000 dyld`_dyld_start // dyld`_dyld_start: // -> 0x1019cd000 <+0>: popq %rdi // 0x1019cd001 <+1>: pushq $0x0 // 0x1019cd003 <+3>: movq %rsp, %rbp // 0x1019cd006 <+6>: andq $-0x10, %rsp // Target 0: (Dinghy) stopped. // Executable module set to ..... // Architecture set to: x86_64h-apple-ios-. // (lldb) continue // Process 34163 resuming // Process 34163 exited with status = 101 (0x00000065) // (lldb) quit // // We need the "exit with status" line which is the 3rd from the last let lines: Vec<&str> = output.lines().filter(|line| line.trim().len() > 0).rev().collect(); let exit_status_line = lines.get(1); debug!("exit status line: {:?}", exit_status_line); if let Some(exit_status_line) = exit_status_line { let words: Vec<&str> = exit_status_line.split_whitespace().rev().collect(); if let Some(exit_status) = words.get(1) { let exit_status = exit_status.parse::<u32>()?; if exit_status == 0 { Ok(()) } else { panic!("Non-zero exit code from lldb: {}", exit_status); } } else { panic!( "Failed to parse lldb exit line for an exit status. {:?}", words ); } } else { panic!("Failed to get the exit status line from lldb: {:?}", lines); } } fn launch_lldb_simulator( dev: &IosSimDevice, installed: &str, args: &[&str], envs: &[&str], debugger: bool, ) -> Result<()> { use std::io::Write; use std::process::Command; let dir = ::tempdir::TempDir::new("mobiledevice-rs-lldb")?; let tmppath = dir.path(); let lldb_script_filename = tmppath.join("lldb-script"); { let python_lldb_support = tmppath.join("helpers.py"); let helper_py = include_str!("helpers.py"); let helper_py = helper_py.replace("ENV_VAR_PLACEHOLDER", &envs.join("\", \"")); fs::File::create(&python_lldb_support)?.write_fmt(format_args!("{}", &helper_py))?; let mut script = fs::File::create(&lldb_script_filename)?; writeln!(script, "platform select ios-simulator")?; writeln!(script, "target create {}", installed)?; writeln!(script, "script pass")?; writeln!(script, "command script import {:?}", python_lldb_support)?; writeln!( script, "command script add -s synchronous -f helpers.start start" )?; writeln!( script, "command script add -f helpers.connect_command connect" )?; writeln!(script, "connect connect://{}", dev.id)?; if !debugger { writeln!(script, "start {}", args.join(" "))?; writeln!(script, "quit")?; } } let stat = Command::new("xcrun") .arg("lldb") .arg("-Q") .arg("-s") .arg(lldb_script_filename) .status()?; if stat.success() { Ok(()) } else { bail!("LLDB returned error code {:?}", stat.code()) } } pub fn run_remote<P: AsRef<Path>>( dev: *const am_device, lldb_proxy: &str, app_path: P, args: &[&str], envs: &[&str], debugger: bool, ) -> Result<()> { let _session = ensure_session(dev)?; let plist = plist::Value::from_file(app_path.as_ref().join("Info.plist"))?; let bundle_id = plist .as_dictionary() .and_then(|btreemap| btreemap.get("CFBundleIdentifier")) .and_then(|bi| bi.as_string()) .expect("failed to read CFBundleIdentifier"); let options = [( CFString::from_static_string("ReturnAttributes"), CFArray::from_CFTypes(&[ CFString::from_static_string("CFBundleIdentifier"), CFString::from_static_string("Path"), ]), )]; let options = CFDictionary::from_CFType_pairs(&options); let apps: CFDictionaryRef = ptr::null(); unsafe { mk_result(AMDeviceLookupApplications( dev, options.as_concrete_TypeRef(), std::mem::transmute(&apps), ))?; } let apps: CFDictionary<CFString, CFDictionary<CFString, CFTypeRef>> = unsafe { TCFType::wrap_under_get_rule(apps) }; let app_info: ItemRef<CFDictionary<CFString, CFTypeRef>> = apps.get(CFString::new(bundle_id).as_concrete_TypeRef()); let remote: String = if let Ok(Value::String(remote)) = rustify(*app_info.get(CFString::from_static_string("Path"))) { remote } else { bail!("Invalid info") }; launch_lldb_device(dev, lldb_proxy, app_path, remote, args, envs, debugger)?; Ok(()) } #[allow(dead_code)] fn properties(dev: *const am_device) -> Result<HashMap<&'static str, Value>> { let properties = [ "ActivationPublicKey", "ActivationState", "ActivationStateAcknowledged", "ActivityURL", "BasebandBootloaderVersion", "BasebandSerialNumber", "BasebandStatus", "BasebandVersion", "BluetoothAddress", "BuildVersion", "CPUArchitecture", "DeviceCertificate", "DeviceClass", "DeviceColor", "DeviceName", "DevicePublicKey", "DieID", "FirmwareVersion", "HardwareModel", "HardwarePlatform", "HostAttached", "IMLockdownEverRegisteredKey", "IntegratedCircuitCardIdentity", "InternationalMobileEquipmentIdentity", "InternationalMobileSubscriberIdentity", "iTunesHasConnected", "MLBSerialNumber", "MobileSubscriberCountryCode", "MobileSubscriberNetworkCode", "ModelNumber", "PartitionType", "PasswordProtected", "PhoneNumber", "ProductionSOC", "ProductType", "ProductVersion", "ProtocolVersion", "ProximitySensorCalibration", "RegionInfo", "SBLockdownEverRegisteredKey", "SerialNumber", "SIMStatus", "SoftwareBehavior", "SoftwareBundleVersion", "SupportedDeviceFamilies", "TelephonyCapability", "TimeIntervalSince1970", "TimeZone", "TimeZoneOffsetFromUTC", "TrustedHostAttached", "UniqueChipID", "UniqueDeviceID", "UseActivityURL", "UseRaptorCerts", "Uses24HourClock", "WeDelivered", "WiFiAddress", ]; let mut props = HashMap::new(); for p in properties.iter() { if let Some(v) = device_read_value(dev, p)? { props.insert(*p, v); } } Ok(props) }
31.887047
400
0.551103
48e42cb84ce9d4e14e153caec27dbb5cd85c2af6
22,243
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::P0_2 { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = "Possible values of the field `FUNC`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FUNCR { #[doc = "General purpose digital input/output\n pin."] P0_2, #[doc = "Transmitter output for UART0."] U0_TXD, #[doc = "Transmitter output for UART3."] U3_TXD, #[doc = r" Reserved"] _Reserved(u8), } impl FUNCR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { match *self { FUNCR::P0_2 => 0, FUNCR::U0_TXD => 1, FUNCR::U3_TXD => 2, FUNCR::_Reserved(bits) => bits, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: u8) -> FUNCR { match value { 0 => FUNCR::P0_2, 1 => FUNCR::U0_TXD, 2 => FUNCR::U3_TXD, i => FUNCR::_Reserved(i), } } #[doc = "Checks if the value of the field is `P0_2`"] #[inline] pub fn is_p0_2(&self) -> bool { *self == FUNCR::P0_2 } #[doc = "Checks if the value of the field is `U0_TXD`"] #[inline] pub fn is_u0_txd(&self) -> bool { *self == FUNCR::U0_TXD } #[doc = "Checks if the value of the field is `U3_TXD`"] #[inline] pub fn is_u3_txd(&self) -> bool { *self == FUNCR::U3_TXD } } #[doc = "Possible values of the field `MODE`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum MODER { #[doc = "Inactive (no pull-down/pull-up resistor\n enabled)."] INACTIVE, #[doc = "Pull-down resistor enabled."] PULLDOWN_EN, #[doc = "Pull-up resistor enabled."] PULLUP_EN, #[doc = "Repeater mode."] REPEATER_MODE, } impl MODER { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { match *self { MODER::INACTIVE => 0, MODER::PULLDOWN_EN => 1, MODER::PULLUP_EN => 2, MODER::REPEATER_MODE => 3, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: u8) -> MODER { match value { 0 => MODER::INACTIVE, 1 => MODER::PULLDOWN_EN, 2 => MODER::PULLUP_EN, 3 => MODER::REPEATER_MODE, _ => unreachable!(), } } #[doc = "Checks if the value of the field is `INACTIVE`"] #[inline] pub fn is_inactive(&self) -> bool { *self == MODER::INACTIVE } #[doc = "Checks if the value of the field is `PULLDOWN_EN`"] #[inline] pub fn is_pulldown_en(&self) -> bool { *self == MODER::PULLDOWN_EN } #[doc = "Checks if the value of the field is `PULLUP_EN`"] #[inline] pub fn is_pullup_en(&self) -> bool { *self == MODER::PULLUP_EN } #[doc = "Checks if the value of the field is `REPEATER_MODE`"] #[inline] pub fn is_repeater_mode(&self) -> bool { *self == MODER::REPEATER_MODE } } #[doc = "Possible values of the field `HYS`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum HYSR { #[doc = "Disable."] DISABLE, #[doc = "Enable."] ENABLE, } impl HYSR { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { HYSR::DISABLE => false, HYSR::ENABLE => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> HYSR { match value { false => HYSR::DISABLE, true => HYSR::ENABLE, } } #[doc = "Checks if the value of the field is `DISABLE`"] #[inline] pub fn is_disable(&self) -> bool { *self == HYSR::DISABLE } #[doc = "Checks if the value of the field is `ENABLE`"] #[inline] pub fn is_enable(&self) -> bool { *self == HYSR::ENABLE } } #[doc = "Possible values of the field `INV`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum INVR { #[doc = "Input not inverted (HIGH on pin reads as 1, LOW on pin\n reads as 0)."] INPUT_NOT_INVERTED, #[doc = "Input inverted (HIGH on pin reads as 0, LOW on pin reads as\n 1)."] INPUT_INVERTED_HIGH, } impl INVR { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { INVR::INPUT_NOT_INVERTED => false, INVR::INPUT_INVERTED_HIGH => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> INVR { match value { false => INVR::INPUT_NOT_INVERTED, true => INVR::INPUT_INVERTED_HIGH, } } #[doc = "Checks if the value of the field is `INPUT_NOT_INVERTED`"] #[inline] pub fn is_input_not_inverted(&self) -> bool { *self == INVR::INPUT_NOT_INVERTED } #[doc = "Checks if the value of the field is `INPUT_INVERTED_HIGH`"] #[inline] pub fn is_input_inverted_high(&self) -> bool { *self == INVR::INPUT_INVERTED_HIGH } } #[doc = "Possible values of the field `SLEW`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SLEWR { #[doc = "Standard mode, output slew rate control is enabled. More\n outputs can be switched simultaneously."] STANDARD, #[doc = "Fast mode, slew rate control is disabled. Refer to the\n appropriate specific device data sheet for details."] FAST, } impl SLEWR { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { SLEWR::STANDARD => false, SLEWR::FAST => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> SLEWR { match value { false => SLEWR::STANDARD, true => SLEWR::FAST, } } #[doc = "Checks if the value of the field is `STANDARD`"] #[inline] pub fn is_standard(&self) -> bool { *self == SLEWR::STANDARD } #[doc = "Checks if the value of the field is `FAST`"] #[inline] pub fn is_fast(&self) -> bool { *self == SLEWR::FAST } } #[doc = "Possible values of the field `OD`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum ODR { #[doc = "Disable."] DISABLE, #[doc = "Open-drain mode enabled. This is not a true open-drain\n mode. Input cannot be pulled up above VDD."] ENABLED, } impl ODR { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { ODR::DISABLE => false, ODR::ENABLED => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> ODR { match value { false => ODR::DISABLE, true => ODR::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLE`"] #[inline] pub fn is_disable(&self) -> bool { *self == ODR::DISABLE } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline] pub fn is_enabled(&self) -> bool { *self == ODR::ENABLED } } #[doc = "Values that can be written to the field `FUNC`"] pub enum FUNCW { #[doc = "General purpose digital input/output\n pin."] P0_2, #[doc = "Transmitter output for UART0."] U0_TXD, #[doc = "Transmitter output for UART3."] U3_TXD, } impl FUNCW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> u8 { match *self { FUNCW::P0_2 => 0, FUNCW::U0_TXD => 1, FUNCW::U3_TXD => 2, } } } #[doc = r" Proxy"] pub struct _FUNCW<'a> { w: &'a mut W, } impl<'a> _FUNCW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: FUNCW) -> &'a mut W { unsafe { self.bits(variant._bits()) } } #[doc = "General purpose digital input/output pin."] #[inline] pub fn p0_2(self) -> &'a mut W { self.variant(FUNCW::P0_2) } #[doc = "Transmitter output for UART0."] #[inline] pub fn u0_txd(self) -> &'a mut W { self.variant(FUNCW::U0_TXD) } #[doc = "Transmitter output for UART3."] #[inline] pub fn u3_txd(self) -> &'a mut W { self.variant(FUNCW::U3_TXD) } #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 7; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `MODE`"] pub enum MODEW { #[doc = "Inactive (no pull-down/pull-up resistor\n enabled)."] INACTIVE, #[doc = "Pull-down resistor enabled."] PULLDOWN_EN, #[doc = "Pull-up resistor enabled."] PULLUP_EN, #[doc = "Repeater mode."] REPEATER_MODE, } impl MODEW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> u8 { match *self { MODEW::INACTIVE => 0, MODEW::PULLDOWN_EN => 1, MODEW::PULLUP_EN => 2, MODEW::REPEATER_MODE => 3, } } } #[doc = r" Proxy"] pub struct _MODEW<'a> { w: &'a mut W, } impl<'a> _MODEW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: MODEW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Inactive (no pull-down/pull-up resistor enabled)."] #[inline] pub fn inactive(self) -> &'a mut W { self.variant(MODEW::INACTIVE) } #[doc = "Pull-down resistor enabled."] #[inline] pub fn pulldown_en(self) -> &'a mut W { self.variant(MODEW::PULLDOWN_EN) } #[doc = "Pull-up resistor enabled."] #[inline] pub fn pullup_en(self) -> &'a mut W { self.variant(MODEW::PULLUP_EN) } #[doc = "Repeater mode."] #[inline] pub fn repeater_mode(self) -> &'a mut W { self.variant(MODEW::REPEATER_MODE) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 3; const OFFSET: u8 = 3; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `HYS`"] pub enum HYSW { #[doc = "Disable."] DISABLE, #[doc = "Enable."] ENABLE, } impl HYSW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { HYSW::DISABLE => false, HYSW::ENABLE => true, } } } #[doc = r" Proxy"] pub struct _HYSW<'a> { w: &'a mut W, } impl<'a> _HYSW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: HYSW) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "Disable."] #[inline] pub fn disable(self) -> &'a mut W { self.variant(HYSW::DISABLE) } #[doc = "Enable."] #[inline] pub fn enable(self) -> &'a mut W { self.variant(HYSW::ENABLE) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 5; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `INV`"] pub enum INVW { #[doc = "Input not inverted (HIGH on pin reads as 1, LOW on pin\n reads as 0)."] INPUT_NOT_INVERTED, #[doc = "Input inverted (HIGH on pin reads as 0, LOW on pin reads as\n 1)."] INPUT_INVERTED_HIGH, } impl INVW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { INVW::INPUT_NOT_INVERTED => false, INVW::INPUT_INVERTED_HIGH => true, } } } #[doc = r" Proxy"] pub struct _INVW<'a> { w: &'a mut W, } impl<'a> _INVW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: INVW) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "Input not inverted (HIGH on pin reads as 1, LOW on pin reads as 0)."] #[inline] pub fn input_not_inverted(self) -> &'a mut W { self.variant(INVW::INPUT_NOT_INVERTED) } #[doc = "Input inverted (HIGH on pin reads as 0, LOW on pin reads as 1)."] #[inline] pub fn input_inverted_high(self) -> &'a mut W { self.variant(INVW::INPUT_INVERTED_HIGH) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 6; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `SLEW`"] pub enum SLEWW { #[doc = "Standard mode, output slew rate control is enabled. More\n outputs can be switched simultaneously."] STANDARD, #[doc = "Fast mode, slew rate control is disabled. Refer to the\n appropriate specific device data sheet for details."] FAST, } impl SLEWW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { SLEWW::STANDARD => false, SLEWW::FAST => true, } } } #[doc = r" Proxy"] pub struct _SLEWW<'a> { w: &'a mut W, } impl<'a> _SLEWW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: SLEWW) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "Standard mode, output slew rate control is enabled. More outputs can be switched simultaneously."] #[inline] pub fn standard(self) -> &'a mut W { self.variant(SLEWW::STANDARD) } #[doc = "Fast mode, slew rate control is disabled. Refer to the appropriate specific device data sheet for details."] #[inline] pub fn fast(self) -> &'a mut W { self.variant(SLEWW::FAST) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 9; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `OD`"] pub enum ODW { #[doc = "Disable."] DISABLE, #[doc = "Open-drain mode enabled. This is not a true open-drain\n mode. Input cannot be pulled up above VDD."] ENABLED, } impl ODW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { ODW::DISABLE => false, ODW::ENABLED => true, } } } #[doc = r" Proxy"] pub struct _ODW<'a> { w: &'a mut W, } impl<'a> _ODW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: ODW) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "Disable."] #[inline] pub fn disable(self) -> &'a mut W { self.variant(ODW::DISABLE) } #[doc = "Open-drain mode enabled. This is not a true open-drain mode. Input cannot be pulled up above VDD."] #[inline] pub fn enabled(self) -> &'a mut W { self.variant(ODW::ENABLED) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 10; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bits 0:2 - Selects pin function for pin P0[2]"] #[inline] pub fn func(&self) -> FUNCR { FUNCR::_from({ const MASK: u8 = 7; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bits 3:4 - Selects function mode (on-chip pull-up/pull-down resistor control)."] #[inline] pub fn mode(&self) -> MODER { MODER::_from({ const MASK: u8 = 3; const OFFSET: u8 = 3; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 5 - Hysteresis."] #[inline] pub fn hys(&self) -> HYSR { HYSR::_from({ const MASK: bool = true; const OFFSET: u8 = 5; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 6 - Invert input"] #[inline] pub fn inv(&self) -> INVR { INVR::_from({ const MASK: bool = true; const OFFSET: u8 = 6; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 9 - Driver slew rate"] #[inline] pub fn slew(&self) -> SLEWR { SLEWR::_from({ const MASK: bool = true; const OFFSET: u8 = 9; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 10 - Open-drain mode."] #[inline] pub fn od(&self) -> ODR { ODR::_from({ const MASK: bool = true; const OFFSET: u8 = 10; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 48 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bits 0:2 - Selects pin function for pin P0[2]"] #[inline] pub fn func(&mut self) -> _FUNCW { _FUNCW { w: self } } #[doc = "Bits 3:4 - Selects function mode (on-chip pull-up/pull-down resistor control)."] #[inline] pub fn mode(&mut self) -> _MODEW { _MODEW { w: self } } #[doc = "Bit 5 - Hysteresis."] #[inline] pub fn hys(&mut self) -> _HYSW { _HYSW { w: self } } #[doc = "Bit 6 - Invert input"] #[inline] pub fn inv(&mut self) -> _INVW { _INVW { w: self } } #[doc = "Bit 9 - Driver slew rate"] #[inline] pub fn slew(&mut self) -> _SLEWW { _SLEWW { w: self } } #[doc = "Bit 10 - Open-drain mode."] #[inline] pub fn od(&mut self) -> _ODW { _ODW { w: self } } }
27.908407
162
0.508025
62263771fca106fa237deb03b746376aaf7bf369
34,914
#![doc = "generated by AutoRust 0.1.0"] #![allow(non_camel_case_types)] #![allow(unused_imports)] use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ActiveDirectoryObject { #[serde(rename = "objectId", default, skip_serializing_if = "Option::is_none")] pub object_id: Option<String>, #[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")] pub tenant_id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Actor { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CallbackConfig { #[serde(rename = "serviceUri")] pub service_uri: String, #[serde(rename = "customHeaders", default, skip_serializing_if = "Option::is_none")] pub custom_headers: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Event { #[serde(flatten)] pub event_info: EventInfo, #[serde(rename = "eventRequestMessage", default, skip_serializing_if = "Option::is_none")] pub event_request_message: Option<EventRequestMessage>, #[serde(rename = "eventResponseMessage", default, skip_serializing_if = "Option::is_none")] pub event_response_message: Option<EventResponseMessage>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EventContent { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub timestamp: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub action: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub target: Option<Target>, #[serde(default, skip_serializing_if = "Option::is_none")] pub request: Option<Request>, #[serde(default, skip_serializing_if = "Option::is_none")] pub actor: Option<Actor>, #[serde(default, skip_serializing_if = "Option::is_none")] pub source: Option<Source>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EventInfo { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EventListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<Event>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EventRequestMessage { #[serde(default, skip_serializing_if = "Option::is_none")] pub content: Option<EventContent>, #[serde(default, skip_serializing_if = "Option::is_none")] pub headers: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub method: Option<String>, #[serde(rename = "requestUri", default, skip_serializing_if = "Option::is_none")] pub request_uri: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub version: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EventResponseMessage { #[serde(default, skip_serializing_if = "Option::is_none")] pub content: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub headers: Option<serde_json::Value>, #[serde(rename = "reasonPhrase", default, skip_serializing_if = "Option::is_none")] pub reason_phrase: Option<String>, #[serde(rename = "statusCode", default, skip_serializing_if = "Option::is_none")] pub status_code: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub version: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GenerateCredentialsParameters { #[serde(rename = "tokenId", default, skip_serializing_if = "Option::is_none")] pub token_id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub expiry: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<generate_credentials_parameters::Name>, } pub mod generate_credentials_parameters { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Name { #[serde(rename = "password1")] Password1, #[serde(rename = "password2")] Password2, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GenerateCredentialsResult { #[serde(default, skip_serializing_if = "Option::is_none")] pub username: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub passwords: Vec<TokenPassword>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IpRule { #[serde(default, skip_serializing_if = "Option::is_none")] pub action: Option<ip_rule::Action>, pub value: String, } pub mod ip_rule { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Action { Allow, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ImportImageParameters { pub source: ImportSource, #[serde(rename = "targetTags", default, skip_serializing_if = "Vec::is_empty")] pub target_tags: Vec<String>, #[serde(rename = "untaggedTargetRepositories", default, skip_serializing_if = "Vec::is_empty")] pub untagged_target_repositories: Vec<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub mode: Option<import_image_parameters::Mode>, } pub mod import_image_parameters { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Mode { NoForce, Force, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ImportSource { #[serde(rename = "resourceId", default, skip_serializing_if = "Option::is_none")] pub resource_id: Option<String>, #[serde(rename = "registryUri", default, skip_serializing_if = "Option::is_none")] pub registry_uri: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub credentials: Option<ImportSourceCredentials>, #[serde(rename = "sourceImage")] pub source_image: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ImportSourceCredentials { #[serde(default, skip_serializing_if = "Option::is_none")] pub username: Option<String>, pub password: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct NetworkRuleSet { #[serde(rename = "defaultAction")] pub default_action: network_rule_set::DefaultAction, #[serde(rename = "virtualNetworkRules", default, skip_serializing_if = "Vec::is_empty")] pub virtual_network_rules: Vec<VirtualNetworkRule>, #[serde(rename = "ipRules", default, skip_serializing_if = "Vec::is_empty")] pub ip_rules: Vec<IpRule>, } pub mod network_rule_set { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum DefaultAction { Allow, Deny, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationDefinition { #[serde(default, skip_serializing_if = "Option::is_none")] pub origin: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub display: Option<OperationDisplayDefinition>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<OperationPropertiesDefinition>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationDisplayDefinition { #[serde(default, skip_serializing_if = "Option::is_none")] pub provider: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub resource: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub operation: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<OperationDefinition>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationMetricSpecificationDefinition { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")] pub display_name: Option<String>, #[serde(rename = "displayDescription", default, skip_serializing_if = "Option::is_none")] pub display_description: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub unit: Option<String>, #[serde(rename = "aggregationType", default, skip_serializing_if = "Option::is_none")] pub aggregation_type: Option<String>, #[serde(rename = "internalMetricName", default, skip_serializing_if = "Option::is_none")] pub internal_metric_name: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationPropertiesDefinition { #[serde(rename = "serviceSpecification", default, skip_serializing_if = "Option::is_none")] pub service_specification: Option<OperationServiceSpecificationDefinition>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationServiceSpecificationDefinition { #[serde(rename = "metricSpecifications", default, skip_serializing_if = "Vec::is_empty")] pub metric_specifications: Vec<OperationMetricSpecificationDefinition>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ProxyResource { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, #[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")] pub system_data: Option<SystemData>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct QuarantinePolicy { #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<quarantine_policy::Status>, } pub mod quarantine_policy { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Status { #[serde(rename = "enabled")] Enabled, #[serde(rename = "disabled")] Disabled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RegenerateCredentialParameters { pub name: regenerate_credential_parameters::Name, } pub mod regenerate_credential_parameters { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Name { #[serde(rename = "password")] Password, #[serde(rename = "password2")] Password2, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Registry { #[serde(flatten)] pub resource: Resource, pub sku: Sku, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<RegistryProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RegistryListCredentialsResult { #[serde(default, skip_serializing_if = "Option::is_none")] pub username: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub passwords: Vec<RegistryPassword>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RegistryListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<Registry>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RegistryNameCheckRequest { pub name: String, #[serde(rename = "type")] pub type_: registry_name_check_request::Type, } pub mod registry_name_check_request { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Type { #[serde(rename = "Microsoft.ContainerRegistry/registries")] MicrosoftContainerRegistryRegistries, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RegistryNameStatus { #[serde(rename = "nameAvailable", default, skip_serializing_if = "Option::is_none")] pub name_available: Option<bool>, #[serde(default, skip_serializing_if = "Option::is_none")] pub reason: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RegistryPassword { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<registry_password::Name>, #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option<String>, } pub mod registry_password { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Name { #[serde(rename = "password")] Password, #[serde(rename = "password2")] Password2, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RegistryPolicies { #[serde(rename = "quarantinePolicy", default, skip_serializing_if = "Option::is_none")] pub quarantine_policy: Option<QuarantinePolicy>, #[serde(rename = "trustPolicy", default, skip_serializing_if = "Option::is_none")] pub trust_policy: Option<TrustPolicy>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RegistryProperties { #[serde(rename = "loginServer", default, skip_serializing_if = "Option::is_none")] pub login_server: Option<String>, #[serde(rename = "creationDate", default, skip_serializing_if = "Option::is_none")] pub creation_date: Option<String>, #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<registry_properties::ProvisioningState>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<Status>, #[serde(rename = "adminUserEnabled", default, skip_serializing_if = "Option::is_none")] pub admin_user_enabled: Option<bool>, #[serde(rename = "storageAccount", default, skip_serializing_if = "Option::is_none")] pub storage_account: Option<StorageAccountProperties>, #[serde(rename = "networkRuleSet", default, skip_serializing_if = "Option::is_none")] pub network_rule_set: Option<NetworkRuleSet>, } pub mod registry_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ProvisioningState { Creating, Updating, Deleting, Succeeded, Failed, Canceled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RegistryPropertiesUpdateParameters { #[serde(rename = "adminUserEnabled", default, skip_serializing_if = "Option::is_none")] pub admin_user_enabled: Option<bool>, #[serde(rename = "storageAccount", default, skip_serializing_if = "Option::is_none")] pub storage_account: Option<StorageAccountProperties>, #[serde(rename = "networkRuleSet", default, skip_serializing_if = "Option::is_none")] pub network_rule_set: Option<NetworkRuleSet>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RegistryUpdateParameters { #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub sku: Option<Sku>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<RegistryPropertiesUpdateParameters>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RegistryUsage { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub limit: Option<i64>, #[serde(rename = "currentValue", default, skip_serializing_if = "Option::is_none")] pub current_value: Option<i64>, #[serde(default, skip_serializing_if = "Option::is_none")] pub unit: Option<registry_usage::Unit>, } pub mod registry_usage { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Unit { Count, Bytes, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RegistryUsageListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<RegistryUsage>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Replication { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<ReplicationProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ReplicationListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<Replication>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ReplicationProperties { #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<replication_properties::ProvisioningState>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<Status>, } pub mod replication_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ProvisioningState { Creating, Updating, Deleting, Succeeded, Failed, Canceled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ReplicationUpdateParameters { #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Request { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub addr: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub method: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub useragent: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Resource { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, pub location: String, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ScopeMap { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<ScopeMapProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ScopeMapListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<ScopeMap>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ScopeMapProperties { #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, #[serde(rename = "creationDate", default, skip_serializing_if = "Option::is_none")] pub creation_date: Option<String>, #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<scope_map_properties::ProvisioningState>, pub actions: Vec<String>, } pub mod scope_map_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ProvisioningState { Creating, Updating, Deleting, Succeeded, Failed, Canceled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ScopeMapPropertiesUpdateParameters { #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub actions: Vec<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ScopeMapUpdateParameters { #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<ScopeMapPropertiesUpdateParameters>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Sku { pub name: sku::Name, #[serde(default, skip_serializing_if = "Option::is_none")] pub tier: Option<sku::Tier>, } pub mod sku { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Name { Classic, Basic, Standard, Premium, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Tier { Classic, Basic, Standard, Premium, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Source { #[serde(default, skip_serializing_if = "Option::is_none")] pub addr: Option<String>, #[serde(rename = "instanceID", default, skip_serializing_if = "Option::is_none")] pub instance_id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Status { #[serde(rename = "displayStatus", default, skip_serializing_if = "Option::is_none")] pub display_status: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub timestamp: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct StorageAccountProperties { pub id: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SystemData { #[serde(rename = "createdBy", default, skip_serializing_if = "Option::is_none")] pub created_by: Option<String>, #[serde(rename = "createdByType", default, skip_serializing_if = "Option::is_none")] pub created_by_type: Option<system_data::CreatedByType>, #[serde(rename = "createdAt", default, skip_serializing_if = "Option::is_none")] pub created_at: Option<String>, #[serde(rename = "lastModifiedBy", default, skip_serializing_if = "Option::is_none")] pub last_modified_by: Option<String>, #[serde(rename = "lastModifiedByType", default, skip_serializing_if = "Option::is_none")] pub last_modified_by_type: Option<system_data::LastModifiedByType>, #[serde(rename = "lastModifiedAt", default, skip_serializing_if = "Option::is_none")] pub last_modified_at: Option<String>, } pub mod system_data { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum CreatedByType { User, Application, ManagedIdentity, Key, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum LastModifiedByType { User, Application, ManagedIdentity, Key, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Target { #[serde(rename = "mediaType", default, skip_serializing_if = "Option::is_none")] pub media_type: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub size: Option<i64>, #[serde(default, skip_serializing_if = "Option::is_none")] pub digest: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub length: Option<i64>, #[serde(default, skip_serializing_if = "Option::is_none")] pub repository: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub url: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tag: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub version: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Token { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<TokenProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TokenCertificate { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<token_certificate::Name>, #[serde(default, skip_serializing_if = "Option::is_none")] pub expiry: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub thumbprint: Option<String>, #[serde(rename = "encodedPemCertificate", default, skip_serializing_if = "Option::is_none")] pub encoded_pem_certificate: Option<String>, } pub mod token_certificate { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Name { #[serde(rename = "certificate1")] Certificate1, #[serde(rename = "certificate2")] Certificate2, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TokenCredentialsProperties { #[serde(rename = "activeDirectoryObject", default, skip_serializing_if = "Option::is_none")] pub active_directory_object: Option<ActiveDirectoryObject>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub certificates: Vec<TokenCertificate>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub passwords: Vec<TokenPassword>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TokenListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<Token>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TokenPassword { #[serde(rename = "creationTime", default, skip_serializing_if = "Option::is_none")] pub creation_time: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub expiry: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<token_password::Name>, #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option<String>, } pub mod token_password { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Name { #[serde(rename = "password1")] Password1, #[serde(rename = "password2")] Password2, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TokenProperties { #[serde(rename = "creationDate", default, skip_serializing_if = "Option::is_none")] pub creation_date: Option<String>, #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<token_properties::ProvisioningState>, #[serde(rename = "scopeMapId", default, skip_serializing_if = "Option::is_none")] pub scope_map_id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub credentials: Option<TokenCredentialsProperties>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<token_properties::Status>, } pub mod token_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ProvisioningState { Creating, Updating, Deleting, Succeeded, Failed, Canceled, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Status { #[serde(rename = "enabled")] Enabled, #[serde(rename = "disabled")] Disabled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TokenUpdateParameters { #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<TokenUpdateProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TokenUpdateProperties { #[serde(rename = "scopeMapId", default, skip_serializing_if = "Option::is_none")] pub scope_map_id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<token_update_properties::Status>, #[serde(default, skip_serializing_if = "Option::is_none")] pub credentials: Option<TokenCredentialsProperties>, } pub mod token_update_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Status { #[serde(rename = "enabled")] Enabled, #[serde(rename = "disabled")] Disabled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TrustPolicy { #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<trust_policy::Type>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<trust_policy::Status>, } pub mod trust_policy { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Type { Notary, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Status { #[serde(rename = "enabled")] Enabled, #[serde(rename = "disabled")] Disabled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct VirtualNetworkRule { #[serde(default, skip_serializing_if = "Option::is_none")] pub action: Option<virtual_network_rule::Action>, pub id: String, } pub mod virtual_network_rule { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Action { Allow, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Webhook { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<WebhookProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WebhookCreateParameters { #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, pub location: String, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<WebhookPropertiesCreateParameters>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WebhookListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<Webhook>, #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WebhookProperties { #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<webhook_properties::Status>, #[serde(default, skip_serializing_if = "Option::is_none")] pub scope: Option<String>, pub actions: Vec<String>, #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<webhook_properties::ProvisioningState>, } pub mod webhook_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Status { #[serde(rename = "enabled")] Enabled, #[serde(rename = "disabled")] Disabled, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ProvisioningState { Creating, Updating, Deleting, Succeeded, Failed, Canceled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WebhookPropertiesCreateParameters { #[serde(rename = "serviceUri")] pub service_uri: String, #[serde(rename = "customHeaders", default, skip_serializing_if = "Option::is_none")] pub custom_headers: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<webhook_properties_create_parameters::Status>, #[serde(default, skip_serializing_if = "Option::is_none")] pub scope: Option<String>, pub actions: Vec<String>, } pub mod webhook_properties_create_parameters { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Status { #[serde(rename = "enabled")] Enabled, #[serde(rename = "disabled")] Disabled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WebhookPropertiesUpdateParameters { #[serde(rename = "serviceUri", default, skip_serializing_if = "Option::is_none")] pub service_uri: Option<String>, #[serde(rename = "customHeaders", default, skip_serializing_if = "Option::is_none")] pub custom_headers: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<webhook_properties_update_parameters::Status>, #[serde(default, skip_serializing_if = "Option::is_none")] pub scope: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub actions: Vec<String>, } pub mod webhook_properties_update_parameters { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Status { #[serde(rename = "enabled")] Enabled, #[serde(rename = "disabled")] Disabled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WebhookUpdateParameters { #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<WebhookPropertiesUpdateParameters>, }
39.675
99
0.697371
75d79a804848aec21fd13e1cd43e3436e567c9f7
26,531
use std::{ collections::{hash_map::Entry, HashMap, VecDeque}, future::Future, hash::{Hash, Hasher}, pin::Pin, time::{Duration, Instant}, }; use futures::{ task::{Context, Poll}, FutureExt, }; use never::Never; use tokio::sync::{ mpsc, oneshot::{channel, Receiver, Sender}, }; use vecrem::VecExt; use crate::{ adaptors::throttle::chan_send::{ChanSend, SendTy}, requests::{HasPayload, Output, Request, Requester}, types::*, }; // Throttling is quite complicated this comment describes the algorithm of // current implementation. NOTE: this only describes CURRENT implementation. // Implementation may change at any time. // // ### Request // // When throttling request is sent, it sends a tuple of `ChatId` (more // accurately, just local `Id`) and `Sender<()>` to the worker. Then the request // waits for notification from worker. When notification is received it sends // underlying request. // // ### Worker // // Worker does the most important job - it checks for limit exceed. // // The worker stores "history" of requests sent in last minute (and to which // chats the were sent) and queue of pending updates. // // The worker does the following algorithm loop: // // 1. If queue is empty wait for the first message in incoming channel (and adds // it to queue). // // 2. Read all present messages from incoming channel and transfer them to // queue. // // 3. Record current time. // // 4. Clear history from records which time < (current - minute) // // 5. Count all requests in which were sent last second, // `allowed = limit.overall_s - count` // // 6. If `allowed == 0` wait a bit and `continue` to the next iteration // // 7. Count how many requests were sent to which chats (i.e.: create // `Map<ChatId, Count>`) (note: the same map, but for last minute also // exists, but it's updated, instead of recreation) // // 8. While `allowed >= 0` search for requests which chat hasn't exceed limits // (i.e.: map[chat] < limit), if one is found, decrease `allowed`, notify // request that it can be now executed, increase counts, add record to // history. const MINUTE: Duration = Duration::from_secs(60); const SECOND: Duration = Duration::from_secs(1); // Delay between worker iterations. // // For now it's `second/4`, but that number is chosen pretty randomly, we may // want to change this. const DELAY: Duration = Duration::from_millis(250); /// Telegram request limits. /// /// This struct is used in [`Throttle`]. /// /// Note that you may ask telegram [@BotSupport] to increase limits for your /// particular bot if it has a lot of users (but they may or may not do that). /// /// [@BotSupport]: https://t.me/botsupport #[derive(Copy, Clone, Eq, PartialEq, Hash)] pub struct Limits { /// Allowed messages in one chat per second pub chat_s: u32, /// Allowed messages per second pub overall_s: u32, /// Allowed messages in one chat per minute pub chat_m: u32, } /// Defaults are taken from [telegram documentation][tgdoc]. /// /// [tgdoc]: https://core.telegram.org/bots/faq#my-bot-is-hitting-limits-how-do-i-avoid-this impl Default for Limits { fn default() -> Self { Self { chat_s: 1, overall_s: 30, chat_m: 20, } } } /// Automatic request limits respecting mechanism. /// /// Telegram has strict [limits], which, if exceeded will sooner or later cause /// `RequestError::RetryAfter(_)` errors. These errors can cause users of your /// bot to never receive responds from the bot or receive them in wrong order. /// /// This bot wrapper automatically checks for limits, suspending requests until /// they could be sent without exceeding limits (request order in chats is not /// changed). /// /// It's recommended to use this wrapper before other wrappers (i.e.: /// `SomeWrapper<Throttle<Bot>>`) because if done otherwise inner wrappers may /// cause `Throttle` to miscalculate limits usage. /// /// [limits]: https://core.telegram.org/bots/faq#my-bot-is-hitting-limits-how-do-i-avoid-this /// /// ## Examples /// /// ```no_run (throttle fails to spawn task without tokio runtime) /// use teloxide_core::{adaptors::throttle::Limits, requests::RequesterExt, Bot}; /// /// # #[allow(deprecated)] /// let bot = Bot::new("TOKEN").throttle(Limits::default()); /// /// /* send many requests here */ /// ``` /// /// ## Note about send-by-@channelusername /// /// Telegram have limits on sending messages to _the same chat_. To check them /// we store `chat_id`s of several last requests. _However_ there is no good way /// to tell if given `ChatId::Id(x)` corresponds to the same chat as /// `ChatId::ChannelUsername(u)`. /// /// Our current approach is to just give up and check `chat_id_a == chat_id_b`. /// This may give incorrect results. /// /// As such, we encourage not to use `ChatId::ChannelUsername(u)` with this bot /// wrapper. pub struct Throttle<B> { bot: B, // Sender<Never> is used to pass the signal to unlock by closing the channel. queue: mpsc::Sender<(Id, Sender<Never>)>, } async fn worker(limits: Limits, mut queue_rx: mpsc::Receiver<(Id, Sender<Never>)>) { // +- Same idea as in `Throttle::new` let cap = limits.overall_s + (limits.overall_s / 4); // FIXME(waffle): Make an research about data structures for this queue. // Currently this is O(n) removing (n = number of elements // stayed), amortized O(1) push (vec+vecrem). let mut queue: Vec<(Id, Sender<Never>)> = Vec::with_capacity(cap as usize); // I wish there was special data structure for history which removed the // need in 2 hashmaps // (waffle) let mut history: VecDeque<(Id, Instant)> = VecDeque::new(); // hchats[chat] = history.iter().filter(|(c, _)| c == chat).count() let mut hchats: HashMap<Id, u32> = HashMap::new(); let mut hchats_s = HashMap::new(); // set to true when `queue_rx` is closed let mut close = false; while !close || !queue.is_empty() { // If there are no pending requests we are just waiting if queue.is_empty() { match queue_rx.recv().await { Some(req) => queue.push(req), None => close = true, } } // update local queue with latest requests loop { // FIXME(waffle): https://github.com/tokio-rs/tokio/issues/3350 match queue_rx.recv().now_or_never() { Some(Some(req)) => queue.push(req), // There are no items in queue None => break, // The queue was closed Some(None) => close = true, } } // _Maybe_ we need to use `spawn_blocking` here, because there is // decent amount of blocking work. However _for now_ I've decided not // to use it here. // // Reasons (not to use `spawn_blocking`): // // 1. The work seems not very CPU-bound, it's not heavy computations, // it's more like light computations. // // 2. `spawn_blocking` is not zero-cost — it spawns a new system thread // + do so other work. This may actually be *worse* then current // "just do everything in this async fn" approach. // // 3. With `rt-threaded` feature, tokio uses [`num_cpus()`] threads // which should be enough to work fine with one a-bit-blocking task. // Crucially current behaviour will be problem mostly with // single-threaded runtimes (and in case you're using one, you // probably don't want to spawn unnecessary threads anyway). // // I think if we'll ever change this behaviour, we need to make it // _configurable_. // // See also [discussion (ru)]. // // NOTE: If you are reading this because you have any problems because // of this worker, open an [issue on github] // // [`num_cpus()`]: https://vee.gg/JGwq2 // [discussion (ru)]: https://t.me/rust_async/27891 // [issue on github]: https://github.com/teloxide/teloxide/issues/new // // (waffle) let now = Instant::now(); let min_back = now - MINUTE; let sec_back = now - SECOND; // make history and hchats up-to-date while let Some((_, time)) = history.front() { // history is sorted, we found first up-to-date thing if time >= &min_back { break; } if let Some((chat, _)) = history.pop_front() { let ent = hchats.entry(chat).and_modify(|count| { *count -= 1; }); if let Entry::Occupied(entry) = ent { if *entry.get() == 0 { entry.remove_entry(); } } } } // as truncates which is ok since in case of truncation it would always be >= // limits.overall_s let used = history .iter() .take_while(|(_, time)| time > &sec_back) .count() as u32; let mut allowed = limits.overall_s.saturating_sub(used); if allowed == 0 { hchats_s.clear(); tokio::time::sleep(DELAY).await; continue; } for (chat, _) in history.iter().take_while(|(_, time)| time > &sec_back) { *hchats_s.entry(*chat).or_insert(0) += 1; } let mut queue_rem = queue.removing(); while let Some(entry) = queue_rem.next() { let chat = &entry.value().0; let cond = { hchats_s.get(chat).copied().unwrap_or(0) < limits.chat_s && hchats.get(chat).copied().unwrap_or(0) < limits.chat_m }; if cond { { *hchats_s.entry(*chat).or_insert(0) += 1; *hchats.entry(*chat).or_insert(0) += 1; history.push_back((*chat, Instant::now())); } // This will close the channel unlocking associated request drop(entry.remove()); // We've "sent" 1 request, so now we can send 1 less allowed -= 1; if allowed == 0 { break; } } else { entry.skip(); } } drop(queue_rem); // It's easier to just recompute last second stats, instead of keeping // track of it alongside with minute stats, so we just throw this away. hchats_s.clear(); tokio::time::sleep(DELAY).await; } } impl<B> Throttle<B> { /// Creates new [`Throttle`] alongside with worker future. /// /// Note: [`Throttle`] will only send requests if returned worker is /// polled/spawned/awaited. pub fn new(bot: B, limits: Limits) -> (Self, impl Future<Output = ()>) { // A buffer made slightly bigger (112.5%) than overall limit // so we won't lose performance when hitting limits. // // (I hope this makes sense) (waffle) let buffer = limits.overall_s + (limits.overall_s / 8); let (queue_tx, queue_rx) = mpsc::channel(buffer as usize); let worker = worker(limits, queue_rx); let this = Self { bot, queue: queue_tx, }; (this, worker) } /// Creates new [`Throttle`] spawning the worker with `tokio::spawn` /// /// Note: it's recommended to use [`RequesterExt::throttle`] instead. /// /// [`RequesterExt::throttle`]: crate::requests::RequesterExt::throttle pub fn new_spawn(bot: B, limits: Limits) -> Self where // Basically, I hate this bound. // This is yet another problem caused by [rust-lang/#76882]. // And I think it *is* a bug. // // [rust-lang/#76882]: https://github.com/rust-lang/rust/issues/76882 // // Though crucially I can't think of a case with non-static bot. // But anyway, it doesn't change the fact that this bound is redundant. // // (waffle) B: 'static, { let (this, worker) = Self::new(bot, limits); tokio::spawn(worker); this } /// Allows to access inner bot pub fn inner(&self) -> &B { &self.bot } /// Unwraps inner bot pub fn into_inner(self) -> B { self.bot } } macro_rules! f { ($m:ident $this:ident ($($arg:ident : $T:ty),*)) => { ThrottlingRequest( $this.inner().$m($($arg),*), $this.queue.clone(), |p| (&p.payload_ref().chat_id).into(), ) }; } macro_rules! fty { ($T:ident) => { ThrottlingRequest<B::$T> }; } macro_rules! fid { ($m:ident $this:ident ($($arg:ident : $T:ty),*)) => { $this.inner().$m($($arg),*) }; } macro_rules! ftyid { ($T:ident) => { B::$T }; } impl<B: Requester> Requester for Throttle<B> where B::SendMessage: Send, B::ForwardMessage: Send, B::SendPhoto: Send, B::SendAudio: Send, B::SendDocument: Send, B::SendVideo: Send, B::SendAnimation: Send, B::SendVoice: Send, B::SendVideoNote: Send, B::SendMediaGroup: Send, B::SendLocation: Send, B::SendVenue: Send, B::SendContact: Send, B::SendPoll: Send, B::SendDice: Send, B::SendSticker: Send, B::SendInvoice: Send, { type Err = B::Err; requester_forward! { send_message, forward_message, send_photo, send_audio, send_document, send_video, send_animation, send_voice, send_video_note, send_media_group, send_location, send_venue, send_contact, send_poll, send_dice, send_sticker, => f, fty } type SendInvoice = ThrottlingRequest<B::SendInvoice>; fn send_invoice<T, D, Pa, P, S, C, Pri>( &self, chat_id: i32, title: T, description: D, payload: Pa, provider_token: P, start_parameter: S, currency: C, prices: Pri, ) -> Self::SendInvoice where T: Into<String>, D: Into<String>, Pa: Into<String>, P: Into<String>, S: Into<String>, C: Into<String>, Pri: IntoIterator<Item = LabeledPrice>, { ThrottlingRequest( self.inner().send_invoice( chat_id, title, description, payload, provider_token, start_parameter, currency, prices, ), self.queue.clone(), |p| Id::Id(p.payload_ref().chat_id as _), ) } requester_forward! { get_me, get_updates, set_webhook, delete_webhook, get_webhook_info, edit_message_live_location, edit_message_live_location_inline, stop_message_live_location, stop_message_live_location_inline, send_chat_action, get_user_profile_photos, get_file, kick_chat_member, unban_chat_member, restrict_chat_member, promote_chat_member, set_chat_administrator_custom_title, set_chat_permissions, export_chat_invite_link, set_chat_photo, delete_chat_photo, set_chat_title, set_chat_description, pin_chat_message, unpin_chat_message, leave_chat, get_chat, get_chat_administrators, get_chat_members_count, get_chat_member, set_chat_sticker_set, delete_chat_sticker_set, answer_callback_query, set_my_commands, get_my_commands, answer_inline_query, edit_message_text, edit_message_text_inline, edit_message_caption, edit_message_caption_inline, edit_message_media, edit_message_media_inline, edit_message_reply_markup, edit_message_reply_markup_inline, stop_poll, delete_message, get_sticker_set, upload_sticker_file, create_new_sticker_set, add_sticker_to_set, set_sticker_position_in_set, delete_sticker_from_set, set_sticker_set_thumb, answer_shipping_query, answer_pre_checkout_query, set_passport_data_errors, send_game, set_game_score, set_game_score_inline, get_game_high_scores => fid, ftyid } } download_forward! { 'w B Throttle<B> { this => this.inner() } } /// Id used in worker. /// /// It is used instead of `ChatId` to make copying cheap even in case of /// usernames. (It just hashes username) #[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)] enum Id { Id(i64), Ch(u64), } impl From<&ChatId> for Id { fn from(value: &ChatId) -> Self { match value { ChatId::Id(id) => Id::Id(*id), ChatId::ChannelUsername(username) => { let mut hasher = std::collections::hash_map::DefaultHasher::new(); username.hash(&mut hasher); let hash = hasher.finish(); Id::Ch(hash) } } } } pub struct ThrottlingRequest<R: HasPayload>( R, mpsc::Sender<(Id, Sender<Never>)>, fn(&R::Payload) -> Id, ); impl<R: HasPayload> HasPayload for ThrottlingRequest<R> { type Payload = R::Payload; fn payload_mut(&mut self) -> &mut Self::Payload { self.0.payload_mut() } fn payload_ref(&self) -> &Self::Payload { self.0.payload_ref() } } impl<R> Request for ThrottlingRequest<R> where R: Request + Send, { type Err = R::Err; type Send = ThrottlingSend<R>; type SendRef = ThrottlingSendRef<R>; fn send(self) -> Self::Send { let (tx, rx) = channel(); let id = self.2(self.payload_ref()); let send = self.1.send_t((id, tx)); ThrottlingSend(ThrottlingSendInner::Registering { request: self.0, send, wait: rx, }) } fn send_ref(&self) -> Self::SendRef { let (tx, rx) = channel(); let send = self.1.clone().send_t((self.2(self.payload_ref()), tx)); // As we can't move self.0 (request) out, as we do in `send` we are // forced to call `send_ref()`. This may have overhead and/or lead to // wrong results because `R::send_ref` does the send. // // However `Request` documentation explicitly notes that `send{,_ref}` // should **not** do any kind of work, so it's ok. let request = self.0.send_ref(); ThrottlingSendRef(ThrottlingSendRefInner::Registering { request, send, wait: rx, }) } } #[pin_project::pin_project] pub struct ThrottlingSend<R: Request>(#[pin] ThrottlingSendInner<R>); #[pin_project::pin_project(project = SendProj, project_replace = SendRepl)] enum ThrottlingSendInner<R: Request> { Registering { request: R, #[pin] send: ChanSend, wait: Receiver<Never>, }, Pending { request: R, #[pin] wait: Receiver<Never>, }, Sent { #[pin] fut: R::Send, }, Done, } impl<R: Request> Future for ThrottlingSend<R> { type Output = Result<Output<R>, R::Err>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let mut this = self.as_mut().project().0; match this.as_mut().project() { SendProj::Registering { request: _, send, wait: _, } => match send.poll(cx) { Poll::Pending => Poll::Pending, Poll::Ready(res) => { if let SendRepl::Registering { request, send: _, wait, } = this.as_mut().project_replace(ThrottlingSendInner::Done) { match res { Ok(()) => this .as_mut() .project_replace(ThrottlingSendInner::Pending { request, wait }), // The worker is unlikely to drop queue before sending all requests, // but just in case it has dropped the queue, we want to just send the // request. Err(_) => this.as_mut().project_replace(ThrottlingSendInner::Sent { fut: request.send(), }), }; } self.poll(cx) } }, SendProj::Pending { request: _, wait } => match wait.poll(cx) { Poll::Pending => Poll::Pending, // Worker pass "message" to unlock us by closing the channel, // and thus we can safely ignore this result as we know it will // always be `Err(_)` (because `Ok(Never)` is uninhibited) // and that's what we want. Poll::Ready(_) => { if let SendRepl::Pending { request, wait: _ } = this.as_mut().project_replace(ThrottlingSendInner::Done) { this.as_mut().project_replace(ThrottlingSendInner::Sent { fut: request.send(), }); } self.poll(cx) } }, SendProj::Sent { fut } => { let res = futures::ready!(fut.poll(cx)); this.set(ThrottlingSendInner::Done); Poll::Ready(res) } SendProj::Done => Poll::Pending, } } } #[pin_project::pin_project] pub struct ThrottlingSendRef<R: Request>(#[pin] ThrottlingSendRefInner<R>); #[pin_project::pin_project(project = SendRefProj, project_replace = SendRefRepl)] enum ThrottlingSendRefInner<R: Request> { Registering { request: R::SendRef, #[pin] send: ChanSend, wait: Receiver<Never>, }, Pending { request: R::SendRef, #[pin] wait: Receiver<Never>, }, Sent { #[pin] fut: R::SendRef, }, Done, } impl<R: Request> Future for ThrottlingSendRef<R> { type Output = Result<Output<R>, R::Err>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let mut this = self.as_mut().project().0; match this.as_mut().project() { SendRefProj::Registering { request: _, send, wait: _, } => match send.poll(cx) { Poll::Pending => Poll::Pending, Poll::Ready(res) => { if let SendRefRepl::Registering { request, send: _, wait, } = this.as_mut().project_replace(ThrottlingSendRefInner::Done) { match res { Ok(()) => this .as_mut() .project_replace(ThrottlingSendRefInner::Pending { request, wait }), // The worker is unlikely to drop queue before sending all requests, // but just in case it has dropped the queue, we want to just send the // request. Err(_) => this .as_mut() .project_replace(ThrottlingSendRefInner::Sent { fut: request }), }; } self.poll(cx) } }, SendRefProj::Pending { request: _, wait } => match wait.poll(cx) { Poll::Pending => Poll::Pending, // Worker pass "message" to unlock us by closing the channel, // and thus we can safely ignore this result as we know it will // always be `Err(_)` (because `Ok(Never)` is uninhibited) // and that's what we want. Poll::Ready(_) => { if let SendRefRepl::Pending { request, wait: _ } = this.as_mut().project_replace(ThrottlingSendRefInner::Done) { this.as_mut() .project_replace(ThrottlingSendRefInner::Sent { fut: request }); } self.poll(cx) } }, SendRefProj::Sent { fut } => { let res = futures::ready!(fut.poll(cx)); this.set(ThrottlingSendRefInner::Done); Poll::Ready(res) } SendRefProj::Done => Poll::Pending, } } } mod chan_send { use std::{future::Future, pin::Pin}; use futures::task::{Context, Poll}; use never::Never; use tokio::sync::{mpsc, mpsc::error::SendError, oneshot::Sender}; use crate::adaptors::throttle::Id; pub(super) trait SendTy { fn send_t(self, val: (Id, Sender<Never>)) -> ChanSend; } #[pin_project::pin_project] pub(super) struct ChanSend(#[pin] Inner); #[cfg(not(feature = "nightly"))] type Inner = Pin<Box<dyn Future<Output = Result<(), SendError<(Id, Sender<Never>)>>> + Send>>; #[cfg(feature = "nightly")] type Inner = impl Future<Output = Result<(), SendError<(Id, Sender<Never>)>>>; impl SendTy for mpsc::Sender<(Id, Sender<Never>)> { // `return`s trick IDEA not to show errors #[allow(clippy::needless_return)] fn send_t(self, val: (Id, Sender<Never>)) -> ChanSend { #[cfg(feature = "nightly")] { fn def( sender: mpsc::Sender<(Id, Sender<Never>)>, val: (Id, Sender<Never>), ) -> Inner { async move { sender.send(val).await } } return ChanSend(def(self, val)); } #[cfg(not(feature = "nightly"))] { let this = self; return ChanSend(Box::pin(async move { this.send(val).await })); } } } impl Future for ChanSend { type Output = Result<(), SendError<(Id, Sender<Never>)>>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { self.project().0.poll(cx) } } }
33.456494
100
0.551657
26eaf9bbb11dc10aa4bc0efe73bd4b770235b377
19,715
mod command; mod stream_description; mod wire; use std::{ sync::Arc, time::{Duration, Instant}, }; use derivative::Derivative; use tokio::sync::{mpsc, Mutex}; use self::wire::Message; use super::manager::PoolManager; use crate::{ bson::oid::ObjectId, cmap::{ options::{ConnectionOptions, StreamOptions}, PoolGeneration, }, compression::Compressor, error::{load_balanced_mode_mismatch, Error, ErrorKind, Result}, event::cmap::{ CmapEventHandler, ConnectionCheckedInEvent, ConnectionCheckedOutEvent, ConnectionClosedEvent, ConnectionClosedReason, ConnectionCreatedEvent, ConnectionReadyEvent, }, options::{ServerAddress, TlsOptions}, runtime::AsyncStream, }; pub(crate) use command::{Command, RawCommand, RawCommandResponse}; pub(crate) use stream_description::StreamDescription; pub(crate) use wire::next_request_id; /// User-facing information about a connection to the database. #[derive(Clone, Debug)] pub struct ConnectionInfo { /// A driver-generated identifier that uniquely identifies the connection. pub id: u32, /// The address that the connection is connected to. pub address: ServerAddress, } /// A wrapper around Stream that contains all the CMAP information needed to maintain a connection. #[derive(Derivative)] #[derivative(Debug)] pub(crate) struct Connection { pub(super) id: u32, pub(crate) address: ServerAddress, pub(crate) generation: ConnectionGeneration, /// The cached StreamDescription from the connection's handshake. pub(super) stream_description: Option<StreamDescription>, /// Marks the time when the connection was last checked into the pool. This is used /// to detect if the connection is idle. ready_and_available_time: Option<Instant>, /// PoolManager used to check this connection back in when dropped. /// None when checked into the pool. pub(super) pool_manager: Option<PoolManager>, /// Whether or not a command is currently being run on this connection. This is set to `true` /// right before sending bytes to the server and set back to `false` once a full response has /// been read. command_executing: bool, /// Whether or not this connection has experienced a network error while reading or writing. /// Once the connection has received an error, it should not be used again or checked back /// into a pool. error: bool, stream: AsyncStream, /// Compressor that the client will use before sending messages. /// This compressor does not get used to decompress server messages. /// The client will decompress server messages using whichever compressor /// the server indicates in its message. This compressor is the first /// compressor in the client's compressor list that also appears in the /// server's compressor list. pub(super) compressor: Option<Compressor>, /// If the connection is pinned to a cursor or transaction, the channel sender to return this /// connection to the pin holder. pinned_sender: Option<mpsc::Sender<Connection>>, #[derivative(Debug = "ignore")] handler: Option<Arc<dyn CmapEventHandler>>, } impl Connection { async fn new( id: u32, address: ServerAddress, generation: u32, options: Option<ConnectionOptions>, ) -> Result<Self> { let stream_options = StreamOptions { address: address.clone(), connect_timeout: options.as_ref().and_then(|opts| opts.connect_timeout), tls_options: options.as_ref().and_then(|opts| opts.tls_options.clone()), }; let conn = Self { id, generation: ConnectionGeneration::Normal(generation), pool_manager: None, command_executing: false, ready_and_available_time: None, stream: AsyncStream::connect(stream_options).await?, address, handler: options.and_then(|options| options.event_handler), stream_description: None, error: false, pinned_sender: None, compressor: None, }; Ok(conn) } /// Constructs and connects a new connection. pub(super) async fn connect(pending_connection: PendingConnection) -> Result<Self> { let generation = match pending_connection.generation { PoolGeneration::Normal(gen) => gen, PoolGeneration::LoadBalanced(_) => 0, /* Placeholder; will be overwritten in * `ConnectionEstablisher:: * establish_connection`. */ }; Self::new( pending_connection.id, pending_connection.address.clone(), generation, pending_connection.options, ) .await } /// Construct and connect a new connection used for monitoring. pub(crate) async fn connect_monitoring( address: ServerAddress, connect_timeout: Option<Duration>, tls_options: Option<TlsOptions>, ) -> Result<Self> { Self::new( 0, address, 0, Some(ConnectionOptions { connect_timeout, tls_options, event_handler: None, }), ) .await } #[cfg(test)] pub(crate) async fn new_testing( id: u32, address: ServerAddress, generation: u32, options: Option<ConnectionOptions>, ) -> Result<Self> { Self::new(id, address, generation, options).await } pub(crate) fn info(&self) -> ConnectionInfo { ConnectionInfo { id: self.id, address: self.address.clone(), } } pub(crate) fn service_id(&self) -> Option<ObjectId> { self.generation.service_id() } pub(crate) fn address(&self) -> &ServerAddress { &self.address } /// Helper to mark the time that the connection was checked into the pool for the purpose of /// detecting when it becomes idle. pub(super) fn mark_as_available(&mut self) { self.pool_manager.take(); self.ready_and_available_time = Some(Instant::now()); } /// Helper to mark that the connection has been checked out of the pool. This ensures that the /// connection is not marked as idle based on the time that it's checked out and that it has a /// reference to the pool. pub(super) fn mark_as_in_use(&mut self, manager: PoolManager) { self.pool_manager = Some(manager); self.ready_and_available_time.take(); } /// Checks if the connection is idle. pub(super) fn is_idle(&self, max_idle_time: Option<Duration>) -> bool { self.ready_and_available_time .and_then(|ready_and_available_time| { max_idle_time.map(|max_idle_time| { Instant::now().duration_since(ready_and_available_time) >= max_idle_time }) }) .unwrap_or(false) } /// Checks if the connection is currently executing an operation. pub(super) fn is_executing(&self) -> bool { self.command_executing } /// Checks if the connection experienced a network error and should be closed. pub(super) fn has_errored(&self) -> bool { self.error } /// Helper to create a `ConnectionCheckedOutEvent` for the connection. pub(super) fn checked_out_event(&self) -> ConnectionCheckedOutEvent { ConnectionCheckedOutEvent { address: self.address.clone(), connection_id: self.id, } } /// Helper to create a `ConnectionCheckedInEvent` for the connection. pub(super) fn checked_in_event(&self) -> ConnectionCheckedInEvent { ConnectionCheckedInEvent { address: self.address.clone(), connection_id: self.id, } } /// Helper to create a `ConnectionReadyEvent` for the connection. pub(super) fn ready_event(&self) -> ConnectionReadyEvent { ConnectionReadyEvent { address: self.address.clone(), connection_id: self.id, } } /// Helper to create a `ConnectionClosedEvent` for the connection. pub(super) fn closed_event(&self, reason: ConnectionClosedReason) -> ConnectionClosedEvent { ConnectionClosedEvent { address: self.address.clone(), connection_id: self.id, reason, } } async fn send_message( &mut self, message: Message, to_compress: bool, ) -> Result<RawCommandResponse> { self.command_executing = true; // If the client has agreed on a compressor with the server, and the command // is the right type of command, then compress the message. let write_result = match self.compressor { Some(ref compressor) if to_compress => { message .write_compressed_to(&mut self.stream, compressor) .await } _ => message.write_to(&mut self.stream).await, }; self.error = write_result.is_err(); write_result?; let response_message_result = Message::read_from(&mut self.stream).await; self.command_executing = false; self.error = response_message_result.is_err(); RawCommandResponse::new(self.address.clone(), response_message_result?) } /// Executes a `Command` and returns a `CommandResponse` containing the result from the server. /// /// An `Ok(...)` result simply means the server received the command and that the driver /// driver received the response; it does not imply anything about the success of the command /// itself. pub(crate) async fn send_command( &mut self, command: Command, request_id: impl Into<Option<i32>>, ) -> Result<RawCommandResponse> { let to_compress = command.should_compress(); let message = Message::with_command(command, request_id.into())?; self.send_message(message, to_compress).await } /// Executes a `RawCommand` and returns a `CommandResponse` containing the result from the /// server. /// /// An `Ok(...)` result simply means the server received the command and that the driver /// received the response; it does not imply anything about the success of the command /// itself. pub(crate) async fn send_raw_command( &mut self, command: RawCommand, request_id: impl Into<Option<i32>>, ) -> Result<RawCommandResponse> { let to_compress = command.should_compress(); let message = Message::with_raw_command(command, request_id.into()); self.send_message(message, to_compress).await } /// Gets the connection's StreamDescription. pub(crate) fn stream_description(&self) -> Result<&StreamDescription> { self.stream_description.as_ref().ok_or_else(|| { ErrorKind::Internal { message: "Stream checked out but not handshaked".to_string(), } .into() }) } /// Pin the connection, removing it from the normal connection pool. pub(crate) fn pin(&mut self) -> Result<PinnedConnectionHandle> { if self.pinned_sender.is_some() { return Err(Error::internal(format!( "cannot pin an already-pinned connection (id = {})", self.id ))); } if self.pool_manager.is_none() { return Err(Error::internal(format!( "cannot pin a checked-in connection (id = {})", self.id ))); } let (tx, rx) = mpsc::channel(1); self.pinned_sender = Some(tx); Ok(PinnedConnectionHandle { id: self.id, receiver: Arc::new(Mutex::new(rx)), }) } /// Whether this connection has a live `PinnedConnectionHandle`. pub(crate) fn is_pinned(&self) -> bool { self.pinned_sender.is_some() } /// Close this connection, emitting a `ConnectionClosedEvent` with the supplied reason. pub(super) fn close_and_drop(mut self, reason: ConnectionClosedReason) { self.close(reason); } /// Close this connection, emitting a `ConnectionClosedEvent` with the supplied reason. fn close(&mut self, reason: ConnectionClosedReason) { self.pool_manager.take(); if let Some(ref handler) = self.handler { handler.handle_connection_closed_event(self.closed_event(reason)); } } /// Nullify the inner state and return it in a new `Connection` for checking back in to /// the pool. fn take(&mut self) -> Connection { Connection { id: self.id, address: self.address.clone(), generation: self.generation.clone(), stream: std::mem::replace(&mut self.stream, AsyncStream::Null), handler: self.handler.take(), stream_description: self.stream_description.take(), command_executing: self.command_executing, error: self.error, pool_manager: None, ready_and_available_time: None, pinned_sender: self.pinned_sender.clone(), compressor: self.compressor.clone(), } } } impl Drop for Connection { fn drop(&mut self) { // If the connection has a pool manager, that means that the connection is // being dropped when it's checked out. If the pool is still alive, it // should check itself back in. Otherwise, the connection should close // itself and emit a ConnectionClosed event (because the `close_and_drop` // helper was not called explicitly). // // If the connection does not have a pool manager, then the connection is // being dropped while it's not checked out. This means that the pool called // the `close_and_drop` helper explicitly, so we don't add it back to the // pool or emit any events. if let Some(pool_manager) = self.pool_manager.take() { let mut dropped_connection = self.take(); let result = if let Some(sender) = self.pinned_sender.as_mut() { // Preserve the pool manager and timestamp for pinned connections. dropped_connection.pool_manager = Some(pool_manager.clone()); dropped_connection.ready_and_available_time = self.ready_and_available_time; match sender.try_send(dropped_connection) { Ok(()) => Ok(()), // The connection has been unpinned and should be checked back in. Err(mpsc::error::TrySendError::Closed(mut conn)) => { conn.pinned_sender = None; conn.ready_and_available_time = None; pool_manager.check_in(conn) } // The connection is being returned to the pin holder while another connection // is in the pin buffer; this should never happen. Only possible action is to // check the connection back in. Err(mpsc::error::TrySendError::Full(mut conn)) => { // Panic in debug mode if cfg!(debug_assertions) { panic!( "buffer full when attempting to return a pinned connection (id = \ {})", conn.id ); } // TODO RUST-230 log an error in non-debug mode. conn.pinned_sender = None; conn.ready_and_available_time = None; pool_manager.check_in(conn) } } } else { pool_manager.check_in(dropped_connection) }; if let Err(mut conn) = result { // the check in failed because the pool has been dropped, so we emit the event // here and drop the connection. conn.close(ConnectionClosedReason::PoolClosed); } } } } /// A handle to a pinned connection - the connection itself can be retrieved or returned to the /// normal pool via this handle. #[derive(Debug)] pub(crate) struct PinnedConnectionHandle { id: u32, receiver: Arc<Mutex<mpsc::Receiver<Connection>>>, } impl PinnedConnectionHandle { /// Make a new `PinnedConnectionHandle` that refers to the same connection as this one. /// Use with care and only when "lending" a handle in a way that can't be expressed as a /// normal borrow. pub(crate) fn replicate(&self) -> Self { Self { id: self.id, receiver: self.receiver.clone(), } } /// Retrieve the pinned connection, blocking until it's available for use. Will fail if the /// connection has been unpinned. pub(crate) async fn take_connection(&self) -> Result<Connection> { let mut receiver = self.receiver.lock().await; receiver.recv().await.ok_or_else(|| { Error::internal(format!( "cannot take connection after unpin (id={})", self.id )) }) } /// Return the pinned connection to the normal connection pool. pub(crate) async fn unpin_connection(&self) { let mut receiver = self.receiver.lock().await; receiver.close(); // Ensure any connections buffered in the channel are dropped, returning them to the pool. while receiver.recv().await.is_some() {} } } #[derive(Debug, Clone)] pub(crate) enum ConnectionGeneration { Normal(u32), LoadBalanced { generation: u32, service_id: ObjectId, }, } impl ConnectionGeneration { pub(crate) fn service_id(&self) -> Option<ObjectId> { match self { ConnectionGeneration::Normal(_) => None, ConnectionGeneration::LoadBalanced { service_id, .. } => Some(*service_id), } } pub(crate) fn is_stale(&self, current_generation: &PoolGeneration) -> bool { match (self, current_generation) { (ConnectionGeneration::Normal(cgen), PoolGeneration::Normal(pgen)) => cgen != pgen, ( ConnectionGeneration::LoadBalanced { generation: cgen, service_id, }, PoolGeneration::LoadBalanced(gen_map), ) => cgen != gen_map.get(service_id).unwrap_or(&0), _ => load_balanced_mode_mismatch!(false), } } } /// Struct encapsulating the information needed to establish a `Connection`. /// /// Creating a `PendingConnection` contributes towards the total connection count of a pool, despite /// not actually making a TCP connection to the pool's endpoint. This models a "pending" Connection /// from the CMAP specification. #[derive(Debug)] pub(super) struct PendingConnection { pub(super) id: u32, pub(super) address: ServerAddress, pub(super) generation: PoolGeneration, pub(super) options: Option<ConnectionOptions>, } impl PendingConnection { /// Helper to create a `ConnectionCreatedEvent` for the connection. pub(super) fn created_event(&self) -> ConnectionCreatedEvent { ConnectionCreatedEvent { address: self.address.clone(), connection_id: self.id, } } }
36.781716
100
0.609739
dd8137f48d3a33feffe887b02bb6d0580e7e9579
11,324
//! Board file for SparkFun Redboard Artemis Nano //! //! - <https://www.sparkfun.com/products/15443> #![no_std] // Disable this attribute when documenting, as a workaround for // https://github.com/rust-lang/rust/issues/62184. #![cfg_attr(not(doc), no_main)] #![deny(missing_docs)] use apollo3::chip::Apollo3DefaultPeripherals; use capsules::virtual_alarm::VirtualMuxAlarm; use kernel::capabilities; use kernel::component::Component; use kernel::dynamic_deferred_call::DynamicDeferredCall; use kernel::dynamic_deferred_call::DynamicDeferredCallClientState; use kernel::hil::i2c::I2CMaster; use kernel::hil::led::LedHigh; use kernel::hil::time::Counter; use kernel::platform::{KernelResources, SyscallDriverLookup}; use kernel::scheduler::round_robin::RoundRobinSched; use kernel::{create_capability, debug, static_init}; pub mod ble; /// Support routines for debugging I/O. pub mod io; // Number of concurrent processes this platform supports. const NUM_PROCS: usize = 4; const NUM_UPCALLS_IPC: usize = NUM_PROCS + 1; // Actual memory for holding the active process structures. static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = [None; 4]; // Static reference to chip for panic dumps. static mut CHIP: Option<&'static apollo3::chip::Apollo3<Apollo3DefaultPeripherals>> = None; // Static reference to process printer for panic dumps. static mut PROCESS_PRINTER: Option<&'static kernel::process::ProcessPrinterText> = None; // How should the kernel respond when a process faults. const FAULT_RESPONSE: kernel::process::PanicFaultPolicy = kernel::process::PanicFaultPolicy {}; /// Dummy buffer that causes the linker to reserve enough space for the stack. #[no_mangle] #[link_section = ".stack_buffer"] pub static mut STACK_MEMORY: [u8; 0x1000] = [0; 0x1000]; /// A structure representing this platform that holds references to all /// capsules for this platform. struct RedboardArtemisNano { alarm: &'static capsules::alarm::AlarmDriver< 'static, VirtualMuxAlarm<'static, apollo3::stimer::STimer<'static>>, >, led: &'static capsules::led::LedDriver< 'static, LedHigh<'static, apollo3::gpio::GpioPin<'static>>, 1, >, gpio: &'static capsules::gpio::GPIO<'static, apollo3::gpio::GpioPin<'static>>, console: &'static capsules::console::Console<'static>, i2c_master: &'static capsules::i2c_master::I2CMasterDriver<'static, apollo3::iom::Iom<'static>>, ble_radio: &'static capsules::ble_advertising_driver::BLE< 'static, apollo3::ble::Ble<'static>, VirtualMuxAlarm<'static, apollo3::stimer::STimer<'static>>, >, scheduler: &'static RoundRobinSched<'static>, systick: cortexm4::systick::SysTick, } /// Mapping of integer syscalls to objects that implement syscalls. impl SyscallDriverLookup for RedboardArtemisNano { fn with_driver<F, R>(&self, driver_num: usize, f: F) -> R where F: FnOnce(Option<&dyn kernel::syscall::SyscallDriver>) -> R, { match driver_num { capsules::alarm::DRIVER_NUM => f(Some(self.alarm)), capsules::led::DRIVER_NUM => f(Some(self.led)), capsules::gpio::DRIVER_NUM => f(Some(self.gpio)), capsules::console::DRIVER_NUM => f(Some(self.console)), capsules::i2c_master::DRIVER_NUM => f(Some(self.i2c_master)), capsules::ble_advertising_driver::DRIVER_NUM => f(Some(self.ble_radio)), _ => f(None), } } } impl KernelResources<apollo3::chip::Apollo3<Apollo3DefaultPeripherals>> for RedboardArtemisNano { type SyscallDriverLookup = Self; type SyscallFilter = (); type ProcessFault = (); type Scheduler = RoundRobinSched<'static>; type SchedulerTimer = cortexm4::systick::SysTick; type WatchDog = (); type ContextSwitchCallback = (); fn syscall_driver_lookup(&self) -> &Self::SyscallDriverLookup { &self } fn syscall_filter(&self) -> &Self::SyscallFilter { &() } fn process_fault(&self) -> &Self::ProcessFault { &() } fn scheduler(&self) -> &Self::Scheduler { self.scheduler } fn scheduler_timer(&self) -> &Self::SchedulerTimer { &self.systick } fn watchdog(&self) -> &Self::WatchDog { &() } fn context_switch_callback(&self) -> &Self::ContextSwitchCallback { &() } } /// Main function. /// /// This is called after RAM initialization is complete. #[no_mangle] pub unsafe fn main() { apollo3::init(); let peripherals = static_init!(Apollo3DefaultPeripherals, Apollo3DefaultPeripherals::new()); // No need to statically allocate mcu/pwr/clk_ctrl because they are only used in main! let mcu_ctrl = apollo3::mcuctrl::McuCtrl::new(); let pwr_ctrl = apollo3::pwrctrl::PwrCtrl::new(); let clkgen = apollo3::clkgen::ClkGen::new(); clkgen.set_clock_frequency(apollo3::clkgen::ClockFrequency::Freq48MHz); // initialize capabilities let process_mgmt_cap = create_capability!(capabilities::ProcessManagementCapability); let main_loop_cap = create_capability!(capabilities::MainLoopCapability); let memory_allocation_cap = create_capability!(capabilities::MemoryAllocationCapability); let dynamic_deferred_call_clients = static_init!([DynamicDeferredCallClientState; 1], Default::default()); let dynamic_deferred_caller = static_init!( DynamicDeferredCall, DynamicDeferredCall::new(dynamic_deferred_call_clients) ); DynamicDeferredCall::set_global_instance(dynamic_deferred_caller); let board_kernel = static_init!(kernel::Kernel, kernel::Kernel::new(&PROCESSES)); // Power up components pwr_ctrl.enable_uart0(); pwr_ctrl.enable_iom2(); // Enable PinCfg let _ = &peripherals .gpio_port .enable_uart(&&peripherals.gpio_port[48], &&peripherals.gpio_port[49]); // Enable SDA and SCL for I2C2 (exposed via Qwiic) let _ = &peripherals .gpio_port .enable_i2c(&&peripherals.gpio_port[25], &&peripherals.gpio_port[27]); // Configure kernel debug gpios as early as possible kernel::debug::assign_gpios( Some(&peripherals.gpio_port[19]), // Blue LED None, None, ); // Create a shared UART channel for the console and for kernel debug. let uart_mux = components::console::UartMuxComponent::new( &peripherals.uart0, 115200, dynamic_deferred_caller, ) .finalize(()); // Setup the console. let console = components::console::ConsoleComponent::new( board_kernel, capsules::console::DRIVER_NUM, uart_mux, ) .finalize(()); // Create the debugger object that handles calls to `debug!()`. components::debug_writer::DebugWriterComponent::new(uart_mux).finalize(()); // LEDs let led = components::led::LedsComponent::new().finalize(components::led_component_helper!( LedHigh<'static, apollo3::gpio::GpioPin>, LedHigh::new(&peripherals.gpio_port[19]), )); // GPIOs // These are also ADC channels, but let's expose them as GPIOs let gpio = components::gpio::GpioComponent::new( board_kernel, capsules::gpio::DRIVER_NUM, components::gpio_component_helper!( apollo3::gpio::GpioPin, 0 => &&peripherals.gpio_port[13], // A0 1 => &&peripherals.gpio_port[33], // A1 2 => &&peripherals.gpio_port[11], // A2 3 => &&peripherals.gpio_port[29], // A3 5 => &&peripherals.gpio_port[31] // A5 ), ) .finalize(components::gpio_component_buf!(apollo3::gpio::GpioPin)); // Create a shared virtualisation mux layer on top of a single hardware // alarm. let _ = peripherals.stimer.start(); let mux_alarm = components::alarm::AlarmMuxComponent::new(&peripherals.stimer).finalize( components::alarm_mux_component_helper!(apollo3::stimer::STimer), ); let alarm = components::alarm::AlarmDriverComponent::new( board_kernel, capsules::alarm::DRIVER_NUM, mux_alarm, ) .finalize(components::alarm_component_helper!(apollo3::stimer::STimer)); // Create a process printer for panic. let process_printer = components::process_printer::ProcessPrinterTextComponent::new().finalize(()); PROCESS_PRINTER = Some(process_printer); // Init the I2C device attached via Qwiic let i2c_master = static_init!( capsules::i2c_master::I2CMasterDriver<'static, apollo3::iom::Iom<'static>>, capsules::i2c_master::I2CMasterDriver::new( &peripherals.iom2, &mut capsules::i2c_master::BUF, board_kernel.create_grant(capsules::i2c_master::DRIVER_NUM, &memory_allocation_cap) ) ); let _ = &peripherals.iom2.set_master_client(i2c_master); let _ = &peripherals.iom2.enable(); // Setup BLE mcu_ctrl.enable_ble(); clkgen.enable_ble(); pwr_ctrl.enable_ble(); let _ = &peripherals.ble.setup_clocks(); mcu_ctrl.reset_ble(); let _ = &peripherals.ble.power_up(); let _ = &peripherals.ble.ble_initialise(); let ble_radio = ble::BLEComponent::new( board_kernel, capsules::ble_advertising_driver::DRIVER_NUM, &peripherals.ble, mux_alarm, ) .finalize(()); mcu_ctrl.print_chip_revision(); debug!("Initialization complete. Entering main loop"); /// These symbols are defined in the linker script. extern "C" { /// Beginning of the ROM region containing app images. static _sapps: u8; /// End of the ROM region containing app images. static _eapps: u8; /// Beginning of the RAM region for app memory. static mut _sappmem: u8; /// End of the RAM region for app memory. static _eappmem: u8; } let scheduler = components::sched::round_robin::RoundRobinComponent::new(&PROCESSES) .finalize(components::rr_component_helper!(NUM_PROCS)); let systick = cortexm4::systick::SysTick::new_with_calibration(48_000_000); let artemis_nano = static_init!( RedboardArtemisNano, RedboardArtemisNano { alarm, console, gpio, led, i2c_master, ble_radio, scheduler, systick, } ); let chip = static_init!( apollo3::chip::Apollo3<Apollo3DefaultPeripherals>, apollo3::chip::Apollo3::new(peripherals) ); CHIP = Some(chip); kernel::process::load_processes( board_kernel, chip, core::slice::from_raw_parts( &_sapps as *const u8, &_eapps as *const u8 as usize - &_sapps as *const u8 as usize, ), core::slice::from_raw_parts_mut( &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), &mut PROCESSES, &FAULT_RESPONSE, &process_mgmt_cap, ) .unwrap_or_else(|err| { debug!("Error loading processes!"); debug!("{:?}", err); }); board_kernel.kernel_loop( artemis_nano, chip, None::<&kernel::ipc::IPC<NUM_PROCS, NUM_UPCALLS_IPC>>, &main_loop_cap, ); }
34.419453
100
0.657188
5dcee9bdf328182b19820b0cfa0861e2c192cdab
3,401
#![warn(clippy::pedantic)] #![feature(once_cell)] use oxipng::{optimize_from_memory, Options}; use std::{ error::Error, fs::{self, read_dir, OpenOptions}, io::{self, Read, Write}, lazy::SyncLazy, path::{Path, PathBuf}, process::{self, Command}, }; static IN_DIR: SyncLazy<PathBuf> = SyncLazy::new(|| PathBuf::from("./data")); static OUT_DIR: SyncLazy<PathBuf> = SyncLazy::new(|| PathBuf::from("./data-out")); type Res<T> = Result<T, Box<dyn Error>>; fn main() -> Res<()> { if !IN_DIR.exists() { return Err(Box::new(io::Error::new( io::ErrorKind::NotFound, "input directory not found", ))); } if !OUT_DIR.exists() { fs::create_dir_all(&*OUT_DIR)?; } let read = read_dir(&*IN_DIR)?; let mut items = Vec::new(); read.for_each(|x| { if let Ok(entry) = x { items.push(Item { path: entry.path(), file_type: match entry .path() .extension() .and_then(std::ffi::OsStr::to_str) .unwrap_or_default() { "png" => FileType::Png, "jpeg" | "jpg" => FileType::Jpeg, _ => FileType::Other, }, }); } }); let mut queue = Vec::new(); items.iter().for_each(|x| match run_item(x) { Ok(_) => {} Err(e) => { queue.push(format!("{:?}: {}", x.path, e)); } }); for x in queue { println!("{}", x); } Ok(()) } fn run_item(item: &Item) -> Res<()> { println!("Doing {:?}", item.path); let buf = match item.file_type { FileType::Png => run_png(&item.path), FileType::Jpeg => run_jpeg(&item.path), FileType::Other => run_other(&item.path), }?; let hash = blake3::hash(&buf).to_hex(); let ext = item .path .extension() .and_then(std::ffi::OsStr::to_str) .unwrap_or_default(); let mut out_path = OUT_DIR.clone(); out_path.push(format!("{}.{}", hash, ext)); let mut f = OpenOptions::new() .create_new(true) .write(true) .open(out_path)?; f.write_all(&buf)?; Ok(()) } fn run_png(path: &Path) -> Res<Vec<u8>> { let mut buf = Vec::new(); let mut data = OpenOptions::new().read(true).open(&path)?; data.read_to_end(&mut buf)?; let res = optimize_from_memory(&buf, &Options::default())?; Ok(res) } fn run_jpeg(path: &Path) -> Res<Vec<u8>> { let mut buf = Vec::new(); let mut data = OpenOptions::new().read(true).open(&path)?; data.read_to_end(&mut buf)?; let mut cmd = Command::new("jpegoptim") .arg("--stdin") .arg("--stdout") .stdin(process::Stdio::piped()) .stdout(process::Stdio::piped()) .stderr(process::Stdio::null()) .spawn()?; cmd.stdin.as_mut().unwrap().write_all(&buf)?; let res = cmd.wait_with_output()?; Ok(res.stdout) } fn run_other(path: &Path) -> Res<Vec<u8>> { let mut buf = Vec::new(); let mut data = OpenOptions::new().read(true).open(&path)?; data.read_to_end(&mut buf)?; Ok(buf) } #[derive(Debug, Clone)] struct Item { path: PathBuf, file_type: FileType, } #[derive(Debug, Copy, Clone)] enum FileType { Png, Jpeg, Other, }
22.97973
82
0.513672
1c160575decbab91898a6c17a9e2c5e3cfe2c0f0
5,030
// Copyright 2019 First Rust Competition Developers. // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use super::{ ds::{DriverStation, RobotState}, notifier::Alarm, observe, RobotBase, }; use std::time; use wpilib_sys::usage; /// Implements a specific type of robot program framework, for /// `start_iterative` and `start_timed`. /// /// The init methods are called whenever the appropriate mode is entered. /// /// The periodic functions are called for the appropriate mode on an interval. pub trait IterativeRobot { /// Robot constructor. /// /// Create your resources here and return an instance of your robot. fn new(ds: &DriverStation) -> Self; fn disabled_init(&mut self) { println!("Default disabled_init method... Override me!"); } fn autonomous_init(&mut self) { println!("Default autonomous_init method... Override me!"); } fn teleop_init(&mut self) { println!("Default teleop_init method... Override me!"); } fn test_init(&mut self) { println!("Default test_init method... Override me!"); } /// Periodic code for all modes should go here. fn robot_periodic(&mut self) {} fn disabled_periodic(&mut self) {} fn autonomous_periodic(&mut self) {} fn teleop_periodic(&mut self) {} fn test_periodic(&mut self) {} } fn loop_func<T: IterativeRobot>( robot: &mut T, last_mode: Option<RobotState>, cur_mode: RobotState, ) { // Check for state transitions if last_mode != Some(cur_mode) { match cur_mode { RobotState::Autonomous => robot.autonomous_init(), RobotState::Teleop => robot.teleop_init(), RobotState::Test => robot.test_init(), _ => robot.disabled_init(), } } // Call the appropriate periodic function match cur_mode { RobotState::Autonomous => { observe::autonomous(); robot.autonomous_periodic() } RobotState::Teleop => { observe::teleop(); robot.teleop_periodic() } RobotState::Test => { observe::test(); robot.test_periodic() } _ => { observe::disabled(); robot.disabled_periodic() } } robot.robot_periodic() } /// Start the main robot loop for an IterativeRobot. /// The periodic methods are called each time a new packet /// received from the driver station. /// /// It is recommended to use `start_timed` instead, /// which guarantees a more regular period of execution. pub fn start_iterative<T: IterativeRobot>() -> ! { let base = RobotBase::new().expect("Could not initialize HAL"); let ds = base.make_ds(); println!("\n********** Robot program starting **********\n"); let mut robot = T::new(&ds); let mut last_mode: Option<RobotState> = None; usage::report( usage::resource_types::Framework, usage::instances::kFramework_Iterative, ); // Tell the DS that the robot is ready to be enabled observe::start(); loop { ds.wait_for_data(); let cur_mode = ds.robot_state(); loop_func(&mut robot, last_mode, cur_mode); last_mode = Some(cur_mode); } } /// Start the main robot loop for an IterativeRobot. /// The periodic methods are called every 20 milliseconds. /// /// If you wish to have your main loop run at a different rate, /// use `start_timed_with_period`. pub fn start_timed<T: IterativeRobot>() { start_timed_with_period::<T>(time::Duration::from_millis(20)) } /// Start the main robot loop for an IterativeRobot. /// The periodic methods are called on a regular interval specified by `period`. pub fn start_timed_with_period<T: IterativeRobot>(period: time::Duration) { let base = RobotBase::new().expect("Could not initialize HAL"); let ds = base.make_ds(); println!("\n********** Robot program starting **********\n"); let mut robot = T::new(&ds); let mut last_mode: Option<RobotState> = None; let notifier = Alarm::new().expect("Failed to initialize FPGA notifier"); let period = period.as_micros() as u64; usage::report( usage::resource_types::Framework, usage::instances::kFramework_Timed, ); // Tell the DS that the robot is ready to be enabled observe::start(); let mut expiration_time = RobotBase::fpga_time().expect("Failed to read current FPGA time") + period; let _ = notifier.update(expiration_time); while notifier.wait().unwrap() != 0 { expiration_time += period; let _ = notifier.update(expiration_time); let cur_mode = ds.robot_state(); loop_func(&mut robot, last_mode, cur_mode); last_mode = Some(cur_mode); } }
30.858896
83
0.636978
d64a871b9e8f5aa86e1b7be7cb1bd0fb028965c2
10,888
#![allow(non_snake_case, non_upper_case_globals)] #![allow(non_camel_case_types)] //! Sigma-delta analog-to-digital converter //! //! Used by: stm32f301, stm32f373, stm32f3x8 #[cfg(not(feature = "nosync"))] pub use crate::stm32f3::peripherals::sdadc::Instance; pub use crate::stm32f3::peripherals::sdadc::{RegisterBlock, ResetValues}; pub use crate::stm32f3::peripherals::sdadc::{ CLRISR, CONF0R, CONF1R, CONF2R, CONFCHR1, CONFCHR2, CR1, CR2, ISR, JCHGR, JDATA12R, JDATA13R, JDATAR, RDATA12R, RDATA13R, RDATAR, }; /// Access functions for the SDADC1 peripheral instance pub mod SDADC1 { use super::ResetValues; #[cfg(not(feature = "nosync"))] use super::Instance; #[cfg(not(feature = "nosync"))] const INSTANCE: Instance = Instance { addr: 0x40016000, _marker: ::core::marker::PhantomData, }; /// Reset values for each field in SDADC1 pub const reset: ResetValues = ResetValues { CR1: 0x00000000, CR2: 0x00000000, ISR: 0x00000000, CLRISR: 0x00000000, JCHGR: 0x00000001, CONF0R: 0x00000000, CONF1R: 0x00000000, CONF2R: 0x00000000, CONFCHR1: 0x00000000, CONFCHR2: 0x00000000, JDATAR: 0x00000000, RDATAR: 0x00000000, JDATA12R: 0x00000000, RDATA12R: 0x00000000, JDATA13R: 0x00000000, RDATA13R: 0x00000000, }; #[cfg(not(feature = "nosync"))] #[allow(renamed_and_removed_lints)] #[allow(private_no_mangle_statics)] #[no_mangle] static mut SDADC1_TAKEN: bool = false; /// Safe access to SDADC1 /// /// This function returns `Some(Instance)` if this instance is not /// currently taken, and `None` if it is. This ensures that if you /// do get `Some(Instance)`, you are ensured unique access to /// the peripheral and there cannot be data races (unless other /// code uses `unsafe`, of course). You can then pass the /// `Instance` around to other functions as required. When you're /// done with it, you can call `release(instance)` to return it. /// /// `Instance` itself dereferences to a `RegisterBlock`, which /// provides access to the peripheral's registers. #[cfg(not(feature = "nosync"))] #[inline] pub fn take() -> Option<Instance> { external_cortex_m::interrupt::free(|_| unsafe { if SDADC1_TAKEN { None } else { SDADC1_TAKEN = true; Some(INSTANCE) } }) } /// Release exclusive access to SDADC1 /// /// This function allows you to return an `Instance` so that it /// is available to `take()` again. This function will panic if /// you return a different `Instance` or if this instance is not /// already taken. #[cfg(not(feature = "nosync"))] #[inline] pub fn release(inst: Instance) { external_cortex_m::interrupt::free(|_| unsafe { if SDADC1_TAKEN && inst.addr == INSTANCE.addr { SDADC1_TAKEN = false; } else { panic!("Released a peripheral which was not taken"); } }); } /// Unsafely steal SDADC1 /// /// This function is similar to take() but forcibly takes the /// Instance, marking it as taken irregardless of its previous /// state. #[cfg(not(feature = "nosync"))] #[inline] pub unsafe fn steal() -> Instance { SDADC1_TAKEN = true; INSTANCE } } /// Raw pointer to SDADC1 /// /// Dereferencing this is unsafe because you are not ensured unique /// access to the peripheral, so you may encounter data races with /// other users of this peripheral. It is up to you to ensure you /// will not cause data races. /// /// This constant is provided for ease of use in unsafe code: you can /// simply call for example `write_reg!(gpio, GPIOA, ODR, 1);`. pub const SDADC1: *const RegisterBlock = 0x40016000 as *const _; /// Access functions for the SDADC2 peripheral instance pub mod SDADC2 { use super::ResetValues; #[cfg(not(feature = "nosync"))] use super::Instance; #[cfg(not(feature = "nosync"))] const INSTANCE: Instance = Instance { addr: 0x40016400, _marker: ::core::marker::PhantomData, }; /// Reset values for each field in SDADC2 pub const reset: ResetValues = ResetValues { CR1: 0x00000000, CR2: 0x00000000, ISR: 0x00000000, CLRISR: 0x00000000, JCHGR: 0x00000001, CONF0R: 0x00000000, CONF1R: 0x00000000, CONF2R: 0x00000000, CONFCHR1: 0x00000000, CONFCHR2: 0x00000000, JDATAR: 0x00000000, RDATAR: 0x00000000, JDATA12R: 0x00000000, RDATA12R: 0x00000000, JDATA13R: 0x00000000, RDATA13R: 0x00000000, }; #[cfg(not(feature = "nosync"))] #[allow(renamed_and_removed_lints)] #[allow(private_no_mangle_statics)] #[no_mangle] static mut SDADC2_TAKEN: bool = false; /// Safe access to SDADC2 /// /// This function returns `Some(Instance)` if this instance is not /// currently taken, and `None` if it is. This ensures that if you /// do get `Some(Instance)`, you are ensured unique access to /// the peripheral and there cannot be data races (unless other /// code uses `unsafe`, of course). You can then pass the /// `Instance` around to other functions as required. When you're /// done with it, you can call `release(instance)` to return it. /// /// `Instance` itself dereferences to a `RegisterBlock`, which /// provides access to the peripheral's registers. #[cfg(not(feature = "nosync"))] #[inline] pub fn take() -> Option<Instance> { external_cortex_m::interrupt::free(|_| unsafe { if SDADC2_TAKEN { None } else { SDADC2_TAKEN = true; Some(INSTANCE) } }) } /// Release exclusive access to SDADC2 /// /// This function allows you to return an `Instance` so that it /// is available to `take()` again. This function will panic if /// you return a different `Instance` or if this instance is not /// already taken. #[cfg(not(feature = "nosync"))] #[inline] pub fn release(inst: Instance) { external_cortex_m::interrupt::free(|_| unsafe { if SDADC2_TAKEN && inst.addr == INSTANCE.addr { SDADC2_TAKEN = false; } else { panic!("Released a peripheral which was not taken"); } }); } /// Unsafely steal SDADC2 /// /// This function is similar to take() but forcibly takes the /// Instance, marking it as taken irregardless of its previous /// state. #[cfg(not(feature = "nosync"))] #[inline] pub unsafe fn steal() -> Instance { SDADC2_TAKEN = true; INSTANCE } } /// Raw pointer to SDADC2 /// /// Dereferencing this is unsafe because you are not ensured unique /// access to the peripheral, so you may encounter data races with /// other users of this peripheral. It is up to you to ensure you /// will not cause data races. /// /// This constant is provided for ease of use in unsafe code: you can /// simply call for example `write_reg!(gpio, GPIOA, ODR, 1);`. pub const SDADC2: *const RegisterBlock = 0x40016400 as *const _; /// Access functions for the SDADC3 peripheral instance pub mod SDADC3 { use super::ResetValues; #[cfg(not(feature = "nosync"))] use super::Instance; #[cfg(not(feature = "nosync"))] const INSTANCE: Instance = Instance { addr: 0x40016800, _marker: ::core::marker::PhantomData, }; /// Reset values for each field in SDADC3 pub const reset: ResetValues = ResetValues { CR1: 0x00000000, CR2: 0x00000000, ISR: 0x00000000, CLRISR: 0x00000000, JCHGR: 0x00000001, CONF0R: 0x00000000, CONF1R: 0x00000000, CONF2R: 0x00000000, CONFCHR1: 0x00000000, CONFCHR2: 0x00000000, JDATAR: 0x00000000, RDATAR: 0x00000000, JDATA12R: 0x00000000, RDATA12R: 0x00000000, JDATA13R: 0x00000000, RDATA13R: 0x00000000, }; #[cfg(not(feature = "nosync"))] #[allow(renamed_and_removed_lints)] #[allow(private_no_mangle_statics)] #[no_mangle] static mut SDADC3_TAKEN: bool = false; /// Safe access to SDADC3 /// /// This function returns `Some(Instance)` if this instance is not /// currently taken, and `None` if it is. This ensures that if you /// do get `Some(Instance)`, you are ensured unique access to /// the peripheral and there cannot be data races (unless other /// code uses `unsafe`, of course). You can then pass the /// `Instance` around to other functions as required. When you're /// done with it, you can call `release(instance)` to return it. /// /// `Instance` itself dereferences to a `RegisterBlock`, which /// provides access to the peripheral's registers. #[cfg(not(feature = "nosync"))] #[inline] pub fn take() -> Option<Instance> { external_cortex_m::interrupt::free(|_| unsafe { if SDADC3_TAKEN { None } else { SDADC3_TAKEN = true; Some(INSTANCE) } }) } /// Release exclusive access to SDADC3 /// /// This function allows you to return an `Instance` so that it /// is available to `take()` again. This function will panic if /// you return a different `Instance` or if this instance is not /// already taken. #[cfg(not(feature = "nosync"))] #[inline] pub fn release(inst: Instance) { external_cortex_m::interrupt::free(|_| unsafe { if SDADC3_TAKEN && inst.addr == INSTANCE.addr { SDADC3_TAKEN = false; } else { panic!("Released a peripheral which was not taken"); } }); } /// Unsafely steal SDADC3 /// /// This function is similar to take() but forcibly takes the /// Instance, marking it as taken irregardless of its previous /// state. #[cfg(not(feature = "nosync"))] #[inline] pub unsafe fn steal() -> Instance { SDADC3_TAKEN = true; INSTANCE } } /// Raw pointer to SDADC3 /// /// Dereferencing this is unsafe because you are not ensured unique /// access to the peripheral, so you may encounter data races with /// other users of this peripheral. It is up to you to ensure you /// will not cause data races. /// /// This constant is provided for ease of use in unsafe code: you can /// simply call for example `write_reg!(gpio, GPIOA, ODR, 1);`. pub const SDADC3: *const RegisterBlock = 0x40016800 as *const _;
32.795181
97
0.612785
87053aa09b4d64f1da1e65685aefd19b9c821ae6
1,528
//! # Wagyu CLI //! //! A command-line tool to generate cryptocurrency wallets. use wagyu::cli::bitcoin::BitcoinCLI; use wagyu::cli::ethereum::EthereumCLI; use wagyu::cli::monero::MoneroCLI; use wagyu::cli::zcash::ZcashCLI; use wagyu::cli::tron::TronCLI; use wagyu::cli::{CLIError, CLI}; use clap::{App, AppSettings}; #[cfg_attr(tarpaulin, skip)] fn main() -> Result<(), CLIError> { let arguments = App::new("wagyu") .version("v0.6.3") .about("Generate a wallet for Bitcoin, Ethereum, Monero, and Zcash") .author("Aleo <[email protected]>") .settings(&[ AppSettings::ColoredHelp, AppSettings::DisableHelpSubcommand, AppSettings::DisableVersion, AppSettings::SubcommandRequiredElseHelp, ]) .subcommands(vec![ BitcoinCLI::new(), EthereumCLI::new(), MoneroCLI::new(), ZcashCLI::new(), TronCLI::new(), ]) .set_term_width(0) .get_matches(); match arguments.subcommand() { ("bitcoin", Some(arguments)) => BitcoinCLI::print(BitcoinCLI::parse(arguments)?), ("ethereum", Some(arguments)) => EthereumCLI::print(EthereumCLI::parse(arguments)?), ("monero", Some(arguments)) => MoneroCLI::print(MoneroCLI::parse(arguments)?), ("zcash", Some(arguments)) => ZcashCLI::print(ZcashCLI::parse(arguments)?), ("tron", Some(arguments)) => TronCLI::print(TronCLI::parse(arguments)?), _ => unreachable!(), } }
33.955556
92
0.600131
7a8b97f5b36b1ea616f007d345de9ff80ada45fe
4,140
//! `StructOpt` data use std::path::PathBuf; use structopt::clap::AppSettings; use structopt::clap::Shell; use structopt::StructOpt; use crate::configuration::ConfigCommand; use crate::the_way::filter::Filters; #[derive(Debug, StructOpt)] #[structopt( name = "the-way", rename_all = "kebab-case", global_settings = & [AppSettings::DeriveDisplayOrder] )] /// Record, retrieve, search, and categorize code snippets pub enum TheWayCLI { /// Add a new code snippet New, /// Add a new shell snippet Cmd { /// shell snippet code code: Option<String>, }, /// Fuzzy search to find a snippet and copy, edit or delete it Search { #[structopt(flatten)] filters: Filters, // Print to stdout instead of copying (with Enter) #[structopt(long)] stdout: bool, // Use exact search instead of fuzzy #[structopt(long, short)] exact: bool, }, /// Sync snippets to a Gist /// /// Controlled by $THE_WAY_GITHUB_TOKEN env variable. /// Set this to an access token with the "gist" scope obtained from https://github.com/settings/tokens/new Sync, /// Lists (optionally filtered) snippets List { #[structopt(flatten)] filters: Filters, }, /// Imports code snippets from JSON. /// /// Looks for description, language, and code fields. Import { /// filename, reads from stdin if not given #[structopt(parse(from_os_str))] file: Option<PathBuf>, #[structopt(long, short)] /// URL to a Gist, if provided will import snippets from given Gist /// /// Multiple files will be converted to separate snippets. /// Snippet description is created based on Gist description and file name with the format /// "<gist_description> - <gist_id> - <file_name>". /// Each snippet will be tagged with "gist" and its Gist ID. /// Works for both secret and public gists. gist_url: Option<String>, }, /// Saves (optionally filtered) snippets to JSON. Export { /// filename, writes to stdout if not given #[structopt(parse(from_os_str))] file: Option<PathBuf>, #[structopt(flatten)] filters: Filters, }, /// Clears all data Clear { /// Don't ask for confirmation #[structopt(long, short)] force: bool, }, /// Generate shell completions Complete { #[structopt(possible_values = & Shell::variants())] shell: Shell, }, /// Manage syntax highlighting themes Themes { #[structopt(subcommand)] cmd: ThemeCommand, }, /// Manage the-way data locations. /// /// Controlled by $THE_WAY_CONFIG env variable, /// use this to have independent snippet sources for different projects. #[structopt(alias = "configure")] Config { #[structopt(subcommand)] cmd: ConfigCommand, }, /// Change snippet Edit { /// Index of snippet to change index: usize, }, /// Delete snippet #[structopt(alias = "delete")] Del { /// Index of snippet to delete index: usize, /// Don't ask for confirmation #[structopt(long, short)] force: bool, }, /// Copy snippet to clipboard #[structopt(alias = "copy")] Cp { /// Index of snippet to copy index: usize, /// Print to stdout instead of copying #[structopt(long)] stdout: bool, }, /// View snippet View { /// Index of snippet to show index: usize, }, } #[derive(StructOpt, Debug)] pub enum ThemeCommand { /// Set your preferred syntax highlighting theme Set { theme: Option<String> }, /// Add a theme from a Sublime Text ".tmTheme" file. Add { #[structopt(parse(from_os_str))] file: PathBuf, }, /// Add highlight support for a language using a ".sublime-syntax" file. Language { #[structopt(parse(from_os_str))] file: PathBuf, }, /// Prints the current theme name Get, }
28.356164
110
0.592271
3a549bb98ac066edac92949266d8cb284fb7f337
272
extern crate cmake; use cmake::Config; use std::env; fn main() { let target = env::var("TARGET").unwrap(); if !target.contains("android") { let dst = Config::new("ui").build(); println!("cargo:rustc-link-search=native={}", dst.display()); } }
22.666667
69
0.588235
2ff7522e6c45f946508d5493c19e1ad87bfa39b9
2,598
use std::env; use std::fs::{File}; use std::path::{Path}; use std::io::Write; use csv::{ReaderBuilder, Trim}; use rstar::{RTree}; pub mod geomag_record; use geomag_record::GeomagRecord; /// At build time, process geomagnetic grid data generated by /// [WMM2015 Explorer](https://github.com/tstellanova/wmm2015_explorer/) /// and stored in `/data/geomag.csv` /// into an RTree that can be used to quickly lookup an /// estimate of the local magnetic field at a particular geolocation /// /// - See [gufm1]( https://pdfs.semanticscholar.org/0175/7d8d373355c0a2ae5c189ea2c95ca7bc0a25.pdf) /// - See [NOAA calculators](https://www.ngdc.noaa.gov/geomag/calculators/magcalc.shtml) /// impl GeomagRecord { pub fn new_from_csv(input: &csv::StringRecord) -> Self { //csv consists of: lat_deg, lon_deg, mag_x, mag_y, mag_z GeomagRecord { lat_deg: input.get(0).unwrap().parse().unwrap(), lon_deg: input.get(1).unwrap().parse().unwrap(), mag_x: input.get(2).unwrap().parse().unwrap(), mag_y: input.get(3).unwrap().parse().unwrap(), mag_z: input.get(4).unwrap().parse().unwrap(), } } } fn do_parse_geomag(fin: &mut File, fout: &mut File) { let mut rdr = ReaderBuilder::new() .comment(Some(b'#')) .trim(Trim::All) .from_reader(fin); let mut rec_list = vec!(); while let Some(result) = rdr.records().next() { let record = result.unwrap(); let geo_rec = GeomagRecord::new_from_csv(&record); rec_list.push(geo_rec); } println!("rec list len: {}", rec_list.len()); let rtree = RTree::bulk_load(rec_list); println!("rtree size: {}", rtree.size()); let serialized = serde_json::to_string_pretty(&rtree).unwrap(); //println!("serialized = {}", serialized); fout.write_all(serialized.as_bytes()).unwrap(); } pub fn main() { let out_dir = env::var("OUT_DIR").unwrap(); let dest_path = Path::new(&out_dir).join("earth_mag.json"); println!("dest_path: {:?}", dest_path); let mut fout = File::create(&dest_path).unwrap(); let src_dir = env::current_dir().unwrap(); let data_file_path = src_dir.join("./data/geomag.csv"); println!("data_file_path: {:?}", data_file_path); let mut fin: File = File::open(data_file_path).unwrap(); do_parse_geomag(&mut fin, &mut fout); //TODO verify this works if we add new files OR updated files println!("cargo:rerun-if-changed=data/"); } #[cfg(test)] mod tests { #[test] fn test_mag_from_declination() { assert_eq!(0,1); } }
29.862069
98
0.633564
ed521805d236fb71f6619dd61fa2e0e715e097eb
11,758
use super::*; use crate::index::IdxSize; use crate::trusted_len::TrustedLen; // used by agg_quantile #[allow(clippy::too_many_arguments)] pub fn rolling_quantile_by_iter<T, O>( values: &[T], bitmap: &Bitmap, quantile: f64, interpolation: QuantileInterpolOptions, offsets: O, ) -> ArrayRef where O: Iterator<Item = (IdxSize, IdxSize)> + TrustedLen, T: std::iter::Sum<T> + NativeType + Copy + std::cmp::PartialOrd + num::ToPrimitive + NumCast + Add<Output = T> + Sub<Output = T> + Div<Output = T> + Mul<Output = T> + IsFloat + AddAssign + Zero, { if values.is_empty() { let out: Vec<T> = vec![]; return Arc::new(PrimitiveArray::from_data( T::PRIMITIVE.into(), out.into(), None, )); } let len = values.len(); // Safety // we are in bounds let mut sorted_window = unsafe { SortedBufNulls::new(values, bitmap, 0, 1) }; let mut validity = MutableBitmap::with_capacity(len); validity.extend_constant(len, true); let out = offsets .enumerate() .map(|(idx, (start, len))| { let end = start + len; if start == end { validity.set(idx, false); T::default() } else { // safety // we are in bounds unsafe { sorted_window.update(start as usize, end as usize) }; let null_count = sorted_window.null_count; let window = sorted_window.window(); match compute_quantile(window, null_count, quantile, interpolation, 1) { Some(val) => val, None => { validity.set(idx, false); T::default() } } } }) .collect_trusted::<Vec<T>>(); Arc::new(PrimitiveArray::from_data( T::PRIMITIVE.into(), out.into(), Some(validity.into()), )) } #[allow(clippy::too_many_arguments)] fn rolling_apply_quantile<T, Fo, Fa>( values: &[T], bitmap: &Bitmap, quantile: f64, interpolation: QuantileInterpolOptions, window_size: usize, min_periods: usize, det_offsets_fn: Fo, aggregator: Fa, ) -> ArrayRef where Fo: Fn(Idx, WindowSize, Len) -> (Start, End) + Copy, // &[Option<T>] -> window values // usize -> null_count // f764 -> quantile // QuantileInterpolOptions -> Interpolation option // usize -> min_periods Fa: Fn(&[Option<T>], usize, f64, QuantileInterpolOptions, usize) -> Option<T>, T: Default + NativeType + IsFloat + PartialOrd, { let len = values.len(); let (start, end) = det_offsets_fn(0, window_size, len); // Safety // we are in bounds let mut sorted_window = unsafe { SortedBufNulls::new(values, bitmap, start, end) }; let mut validity = match create_validity(min_periods, len as usize, window_size, det_offsets_fn) { Some(v) => v, None => { let mut validity = MutableBitmap::with_capacity(len); validity.extend_constant(len, true); validity } }; let out = (0..len) .map(|idx| { let (start, end) = det_offsets_fn(idx, window_size, len); // safety // we are in bounds unsafe { sorted_window.update(start, end) }; let null_count = sorted_window.null_count; let window = sorted_window.window(); match aggregator(window, null_count, quantile, interpolation, min_periods) { Some(val) => val, None => { validity.set(idx, false); T::default() } } }) .collect_trusted::<Vec<T>>(); Arc::new(PrimitiveArray::from_data( T::PRIMITIVE.into(), out.into(), Some(validity.into()), )) } fn compute_quantile<T>( values: &[Option<T>], null_count: usize, quantile: f64, interpolation: QuantileInterpolOptions, min_periods: usize, ) -> Option<T> where T: NativeType + std::iter::Sum<T> + Zero + AddAssign + std::cmp::PartialOrd + num::ToPrimitive + NumCast + Default + Add<Output = T> + Sub<Output = T> + Div<Output = T> + Mul<Output = T> + IsFloat, { if (values.len() - null_count) < min_periods { return None; } // slice off nulls let values = &values[null_count..]; let length = values.len(); let mut idx = match interpolation { QuantileInterpolOptions::Nearest => ((length as f64) * quantile) as usize, QuantileInterpolOptions::Lower | QuantileInterpolOptions::Midpoint | QuantileInterpolOptions::Linear => ((length as f64 - 1.0) * quantile).floor() as usize, QuantileInterpolOptions::Higher => ((length as f64 - 1.0) * quantile).ceil() as usize, }; idx = std::cmp::min(idx, length - 1); // we can unwrap because we sliced of the nulls match interpolation { QuantileInterpolOptions::Midpoint => { let top_idx = ((length as f64 - 1.0) * quantile).ceil() as usize; Some( (values[idx].unwrap() + values[top_idx].unwrap()) / T::from::<f64>(2.0f64).unwrap(), ) } QuantileInterpolOptions::Linear => { let float_idx = (length as f64 - 1.0) * quantile; let top_idx = f64::ceil(float_idx) as usize; if top_idx == idx { Some(values[idx].unwrap()) } else { let proportion = T::from(float_idx - idx as f64).unwrap(); Some( proportion * (values[top_idx].unwrap() - values[idx].unwrap()) + values[idx].unwrap(), ) } } _ => Some(values[idx].unwrap()), } } pub fn rolling_median<T>( arr: &PrimitiveArray<T>, window_size: usize, min_periods: usize, center: bool, weights: Option<&[f64]>, ) -> ArrayRef where T: NativeType + std::iter::Sum + Zero + AddAssign + Copy + std::cmp::PartialOrd + num::ToPrimitive + NumCast + Default + Add<Output = T> + Sub<Output = T> + Div<Output = T> + Mul<Output = T> + IsFloat, { rolling_quantile( arr, 0.5, QuantileInterpolOptions::Linear, window_size, min_periods, center, weights, ) } pub fn rolling_quantile<T>( arr: &PrimitiveArray<T>, quantile: f64, interpolation: QuantileInterpolOptions, window_size: usize, min_periods: usize, center: bool, weights: Option<&[f64]>, ) -> ArrayRef where T: NativeType + std::iter::Sum + Zero + AddAssign + Copy + std::cmp::PartialOrd + num::ToPrimitive + NumCast + Default + Add<Output = T> + Sub<Output = T> + Div<Output = T> + Mul<Output = T> + IsFloat, { if weights.is_some() { panic!("weights not yet supported on array with null values") } if center { rolling_apply_quantile( arr.values().as_slice(), arr.validity().as_ref().unwrap(), quantile, interpolation, window_size, min_periods, det_offsets_center, compute_quantile, ) } else { rolling_apply_quantile( arr.values().as_slice(), arr.validity().as_ref().unwrap(), quantile, interpolation, window_size, min_periods, det_offsets, compute_quantile, ) } } #[cfg(test)] mod test { use super::*; use crate::kernels::rolling::nulls::{rolling_max, rolling_min}; use arrow::buffer::Buffer; use arrow::datatypes::DataType; #[test] fn test_rolling_median_nulls() { let buf = Buffer::from(vec![1.0, 2.0, 3.0, 4.0]); let arr = &PrimitiveArray::from_data( DataType::Float64, buf, Some(Bitmap::from(&[true, false, true, true])), ); let out = rolling_quantile(arr, 0.5, QuantileInterpolOptions::Linear, 2, 2, false, None); let out = out.as_any().downcast_ref::<PrimitiveArray<f64>>().unwrap(); let out = out.into_iter().map(|v| v.copied()).collect::<Vec<_>>(); assert_eq!(out, &[None, None, None, Some(3.5)]); let out = rolling_quantile(arr, 0.5, QuantileInterpolOptions::Linear, 2, 1, false, None); let out = out.as_any().downcast_ref::<PrimitiveArray<f64>>().unwrap(); let out = out.into_iter().map(|v| v.copied()).collect::<Vec<_>>(); assert_eq!(out, &[Some(1.0), Some(1.0), Some(3.0), Some(3.5)]); let out = rolling_quantile(arr, 0.5, QuantileInterpolOptions::Linear, 4, 1, false, None); let out = out.as_any().downcast_ref::<PrimitiveArray<f64>>().unwrap(); let out = out.into_iter().map(|v| v.copied()).collect::<Vec<_>>(); assert_eq!(out, &[Some(1.0), Some(1.0), Some(2.0), Some(3.0)]); let out = rolling_quantile(arr, 0.5, QuantileInterpolOptions::Linear, 4, 1, true, None); let out = out.as_any().downcast_ref::<PrimitiveArray<f64>>().unwrap(); let out = out.into_iter().map(|v| v.copied()).collect::<Vec<_>>(); assert_eq!(out, &[Some(1.0), Some(2.0), Some(3.0), Some(3.5)]); let out = rolling_quantile(arr, 0.5, QuantileInterpolOptions::Linear, 4, 4, true, None); let out = out.as_any().downcast_ref::<PrimitiveArray<f64>>().unwrap(); let out = out.into_iter().map(|v| v.copied()).collect::<Vec<_>>(); assert_eq!(out, &[None, None, None, None]); } #[test] fn test_rolling_quantile_nulls_limits() { // compare quantiles to corresponding min/max/median values let buf = Buffer::<f64>::from(vec![1.0, 2.0, 3.0, 4.0, 5.0]); let values = &PrimitiveArray::from_data( DataType::Float64, buf, Some(Bitmap::from(&[true, false, false, true, true])), ); let interpol_options = vec![ QuantileInterpolOptions::Lower, QuantileInterpolOptions::Higher, QuantileInterpolOptions::Nearest, QuantileInterpolOptions::Midpoint, QuantileInterpolOptions::Linear, ]; for interpol in interpol_options { let out1 = rolling_min(values, 2, 1, false, None); let out1 = out1.as_any().downcast_ref::<PrimitiveArray<f64>>().unwrap(); let out1 = out1.into_iter().map(|v| v.copied()).collect::<Vec<_>>(); let out2 = rolling_quantile(values, 0.0, interpol, 2, 1, false, None); let out2 = out2.as_any().downcast_ref::<PrimitiveArray<f64>>().unwrap(); let out2 = out2.into_iter().map(|v| v.copied()).collect::<Vec<_>>(); assert_eq!(out1, out2); let out1 = rolling_max(values, 2, 1, false, None); let out1 = out1.as_any().downcast_ref::<PrimitiveArray<f64>>().unwrap(); let out1 = out1.into_iter().map(|v| v.copied()).collect::<Vec<_>>(); let out2 = rolling_quantile(values, 1.0, interpol, 2, 1, false, None); let out2 = out2.as_any().downcast_ref::<PrimitiveArray<f64>>().unwrap(); let out2 = out2.into_iter().map(|v| v.copied()).collect::<Vec<_>>(); assert_eq!(out1, out2); } } }
31.438503
100
0.539718
23625d8f8feef2fcaba72514d2021faa2b9e1200
5,663
// Generated from definition io.k8s.api.apps.v1.DeploymentList /// DeploymentList is a list of Deployments. #[derive(Clone, Debug, Default, PartialEq)] pub struct DeploymentList { /// Items is the list of Deployments. pub items: Vec<crate::v1_15::api::apps::v1::Deployment>, /// Standard list metadata. pub metadata: Option<crate::v1_15::apimachinery::pkg::apis::meta::v1::ListMeta>, } impl crate::Resource for DeploymentList { fn api_version() -> &'static str { "apps/v1" } fn group() -> &'static str { "apps" } fn kind() -> &'static str { "DeploymentList" } fn version() -> &'static str { "v1" } } impl crate::Metadata for DeploymentList { type Ty = crate::v1_15::apimachinery::pkg::apis::meta::v1::ListMeta; fn metadata(&self) -> Option<&<Self as crate::Metadata>::Ty> { self.metadata.as_ref() } } impl<'de> serde::Deserialize<'de> for DeploymentList { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> { #[allow(non_camel_case_types)] enum Field { Key_api_version, Key_kind, Key_items, Key_metadata, Other, } impl<'de> serde::Deserialize<'de> for Field { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> { struct Visitor; impl<'de> serde::de::Visitor<'de> for Visitor { type Value = Field; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "field identifier") } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error { Ok(match v { "apiVersion" => Field::Key_api_version, "kind" => Field::Key_kind, "items" => Field::Key_items, "metadata" => Field::Key_metadata, _ => Field::Other, }) } } deserializer.deserialize_identifier(Visitor) } } struct Visitor; impl<'de> serde::de::Visitor<'de> for Visitor { type Value = DeploymentList; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "struct DeploymentList") } fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> { let mut value_items: Option<Vec<crate::v1_15::api::apps::v1::Deployment>> = None; let mut value_metadata: Option<crate::v1_15::apimachinery::pkg::apis::meta::v1::ListMeta> = None; while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? { match key { Field::Key_api_version => { let value_api_version: String = serde::de::MapAccess::next_value(&mut map)?; if value_api_version != <Self::Value as crate::Resource>::api_version() { return Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(&value_api_version), &<Self::Value as crate::Resource>::api_version())); } }, Field::Key_kind => { let value_kind: String = serde::de::MapAccess::next_value(&mut map)?; if value_kind != <Self::Value as crate::Resource>::kind() { return Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(&value_kind), &<Self::Value as crate::Resource>::kind())); } }, Field::Key_items => value_items = Some(serde::de::MapAccess::next_value(&mut map)?), Field::Key_metadata => value_metadata = serde::de::MapAccess::next_value(&mut map)?, Field::Other => { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; }, } } Ok(DeploymentList { items: value_items.ok_or_else(|| serde::de::Error::missing_field("items"))?, metadata: value_metadata, }) } } deserializer.deserialize_struct( "DeploymentList", &[ "apiVersion", "kind", "items", "metadata", ], Visitor, ) } } impl serde::Serialize for DeploymentList { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer { let mut state = serializer.serialize_struct( "DeploymentList", 3 + self.metadata.as_ref().map_or(0, |_| 1), )?; serde::ser::SerializeStruct::serialize_field(&mut state, "apiVersion", <Self as crate::Resource>::api_version())?; serde::ser::SerializeStruct::serialize_field(&mut state, "kind", <Self as crate::Resource>::kind())?; serde::ser::SerializeStruct::serialize_field(&mut state, "items", &self.items)?; if let Some(value) = &self.metadata { serde::ser::SerializeStruct::serialize_field(&mut state, "metadata", value)?; } serde::ser::SerializeStruct::end(state) } }
39.055172
174
0.509977
48b1ad68aa4a5cf544f91ec636baa196ea805a6b
4,046
use nu_errors::{CoerceInto, ShellError}; use nu_plugin::{serve_plugin, Plugin}; use nu_protocol::{ CallInfo, Primitive, ReturnSuccess, ReturnValue, Signature, UntaggedValue, Value, }; use nu_source::TaggedItem; #[derive(Debug)] struct Average { total: Option<Value>, count: u64, } impl Average { fn new() -> Average { Average { total: None, count: 0, } } fn average(&mut self, value: Value) -> Result<(), ShellError> { match &value.value { UntaggedValue::Primitive(Primitive::Nothing) => Ok(()), UntaggedValue::Primitive(Primitive::Int(i)) => match &self.total { Some(Value { value: UntaggedValue::Primitive(Primitive::Int(j)), tag, }) => { self.total = Some(UntaggedValue::int(i + j).into_value(tag)); self.count += 1; Ok(()) } None => { self.total = Some(value.clone()); self.count += 1; Ok(()) } _ => Err(ShellError::labeled_error( "Could calculate average of non-integer or unrelated types", "source", value.tag, )), }, UntaggedValue::Primitive(Primitive::Bytes(b)) => match &self.total { Some(Value { value: UntaggedValue::Primitive(Primitive::Bytes(j)), tag, }) => { self.total = Some(UntaggedValue::bytes(b + j).into_value(tag)); self.count += 1; Ok(()) } None => { self.total = Some(value); self.count += 1; Ok(()) } _ => Err(ShellError::labeled_error( "Could calculate average of non-integer or unrelated types", "source", value.tag, )), }, x => Err(ShellError::labeled_error( format!("Unrecognized type in stream: {:?}", x), "source", value.tag, )), } } } impl Plugin for Average { fn config(&mut self) -> Result<Signature, ShellError> { Ok(Signature::build("average") .desc("Compute the average of a column of numerical values.") .filter()) } fn begin_filter(&mut self, _: CallInfo) -> Result<Vec<ReturnValue>, ShellError> { Ok(vec![]) } fn filter(&mut self, input: Value) -> Result<Vec<ReturnValue>, ShellError> { self.average(input)?; Ok(vec![]) } fn end_filter(&mut self) -> Result<Vec<ReturnValue>, ShellError> { match self.total { None => Ok(vec![]), Some(ref inner) => match &inner.value { UntaggedValue::Primitive(Primitive::Int(i)) => { let total: u64 = i .tagged(inner.tag.clone()) .coerce_into("converting for average")?; let avg = total as f64 / self.count as f64; let primitive_value: UntaggedValue = Primitive::from(avg).into(); let value = primitive_value.into_value(inner.tag.clone()); Ok(vec![ReturnSuccess::value(value)]) } UntaggedValue::Primitive(Primitive::Bytes(bytes)) => { let avg = *bytes as f64 / self.count as f64; let primitive_value: UntaggedValue = Primitive::from(avg).into(); let tagged_value = primitive_value.into_value(inner.tag.clone()); Ok(vec![ReturnSuccess::value(tagged_value)]) } _ => Ok(vec![]), }, } } } fn main() { serve_plugin(&mut Average::new()); }
34.288136
85
0.463668
6afd0c957c5c7adbc1aee79ec1d4debe4a53edbd
15,840
extern crate petgraph; use std::fs::File; use std::io::prelude::*; use petgraph::graph::{edge_index, node_index}; #[cfg(feature = "matrix_graph")] use petgraph::matrix_graph::MatrixGraph; use petgraph::prelude::*; use petgraph::EdgeType; use petgraph::algo::{is_isomorphic, is_isomorphic_matching, is_isomorphic_subgraph}; /// Petersen A and B are isomorphic /// /// http://www.dharwadker.org/tevet/isomorphism/ const PETERSEN_A: &str = " 0 1 0 0 1 0 1 0 0 0 1 0 1 0 0 0 0 1 0 0 0 1 0 1 0 0 0 0 1 0 0 0 1 0 1 0 0 0 0 1 1 0 0 1 0 1 0 0 0 0 0 0 0 0 1 0 0 1 1 0 1 0 0 0 0 0 0 0 1 1 0 1 0 0 0 1 0 0 0 1 0 0 1 0 0 1 1 0 0 0 0 0 0 1 0 0 1 1 0 0 "; const PETERSEN_B: &str = " 0 0 0 1 0 1 0 0 0 1 0 0 0 1 1 0 1 0 0 0 0 0 0 0 0 0 1 1 0 1 1 1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 1 1 1 0 0 0 0 0 1 0 1 0 0 1 1 0 0 1 0 0 0 0 0 0 1 1 0 0 0 0 1 0 0 0 0 0 1 1 0 1 0 0 1 0 1 0 1 0 0 0 0 0 "; /// An almost full set, isomorphic const FULL_A: &str = " 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 "; const FULL_B: &str = " 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 "; /// Praust A and B are not isomorphic const PRAUST_A: &str = " 0 1 1 1 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 1 1 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 1 0 1 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 1 1 1 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 1 0 1 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 1 1 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 1 0 1 1 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 1 1 0 1 0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 1 1 1 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 1 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 1 0 1 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 1 1 1 0 0 0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 1 1 1 0 0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 1 0 1 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 1 1 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 1 1 1 0 "; const PRAUST_B: &str = " 0 1 1 1 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 1 1 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 1 0 1 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 1 1 1 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 1 0 1 1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 1 1 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 1 0 1 1 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 1 0 1 0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 1 1 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 1 0 0 1 0 1 0 0 0 0 0 0 0 0 0 1 0 0 1 0 0 1 1 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 1 1 0 1 0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 1 0 1 1 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 1 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 1 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 1 0 1 0 0 1 1 0 "; const G1U: &str = " 0 1 1 0 1 1 0 1 0 0 1 1 0 0 0 0 0 0 0 0 1 0 0 0 0 "; const G2U: &str = " 0 1 0 1 0 1 0 0 1 1 0 0 0 0 0 1 1 0 0 0 0 1 0 0 0 "; const G4U: &str = " 0 1 1 0 1 1 0 0 1 0 1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 "; const G1D: &str = " 0 1 1 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 "; const G4D: &str = " 0 1 1 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 "; // G8 1,2 are not iso const G8_1: &str = " 0 1 1 0 0 1 1 1 1 0 1 0 1 0 1 1 1 1 0 1 0 0 1 1 0 0 1 0 1 1 1 1 0 1 0 1 0 1 1 1 1 0 0 1 1 0 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 0 "; const G8_2: &str = " 0 1 0 1 0 1 1 1 1 0 1 0 1 0 1 1 0 1 0 1 0 1 1 1 1 0 1 0 1 0 1 1 0 1 0 1 0 1 1 1 1 0 1 0 1 0 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 0 "; // G3 1,2 are not iso const G3_1: &str = " 0 1 0 1 0 1 0 1 0 "; const G3_2: &str = " 0 1 1 1 0 1 1 1 0 "; // Non-isomorphic due to selfloop difference const S1: &str = " 1 1 1 1 0 1 1 0 0 "; const S2: &str = " 1 1 1 0 1 1 1 0 0 "; /// Parse a text adjacency matrix format into a directed graph fn parse_graph<Ty: EdgeType>(s: &str) -> Graph<(), (), Ty> { let mut gr = Graph::with_capacity(0, 0); let s = s.trim(); let lines = s.lines().filter(|l| !l.is_empty()); for (row, line) in lines.enumerate() { for (col, word) in line.split(' ').filter(|s| !s.is_empty()).enumerate() { let has_edge = word.parse::<i32>().unwrap(); assert!(has_edge == 0 || has_edge == 1); if has_edge == 0 { continue; } while col >= gr.node_count() || row >= gr.node_count() { gr.add_node(()); } gr.update_edge(node_index(row), node_index(col), ()); } } gr } fn str_to_graph(s: &str) -> Graph<(), (), Undirected> { parse_graph(s) } fn str_to_digraph(s: &str) -> Graph<(), (), Directed> { parse_graph(s) } /// Parse a file in adjacency matrix format into a directed graph fn graph_from_file(path: &str) -> Graph<(), (), Directed> { let mut f = File::open(path).expect("file not found"); let mut contents = String::new(); f.read_to_string(&mut contents) .expect("failed to read from file"); parse_graph(&contents) } /* fn graph_to_ad_matrix<N, E, Ty: EdgeType>(g: &Graph<N,E,Ty>) { let n = g.node_count(); for i in (0..n) { for j in (0..n) { let ix = NodeIndex::new(i); let jx = NodeIndex::new(j); let out = match g.find_edge(ix, jx) { None => "0", Some(_) => "1", }; print!("{} ", out); } println!(""); } } */ #[test] fn petersen_iso() { // The correct isomorphism is // 0 => 0, 1 => 3, 2 => 1, 3 => 4, 5 => 2, 6 => 5, 7 => 7, 8 => 6, 9 => 8, 4 => 9 let peta = str_to_digraph(PETERSEN_A); let petb = str_to_digraph(PETERSEN_B); /* println!("{:?}", peta); graph_to_ad_matrix(&peta); println!(""); graph_to_ad_matrix(&petb); */ assert!(petgraph::algo::is_isomorphic(&peta, &petb)); } #[test] fn petersen_undir_iso() { // The correct isomorphism is // 0 => 0, 1 => 3, 2 => 1, 3 => 4, 5 => 2, 6 => 5, 7 => 7, 8 => 6, 9 => 8, 4 => 9 let peta = str_to_digraph(PETERSEN_A); let petb = str_to_digraph(PETERSEN_B); assert!(petgraph::algo::is_isomorphic(&peta, &petb)); } #[test] fn full_iso() { let a = str_to_graph(FULL_A); let b = str_to_graph(FULL_B); assert!(petgraph::algo::is_isomorphic(&a, &b)); } #[test] fn praust_dir_no_iso() { let a = str_to_digraph(PRAUST_A); let b = str_to_digraph(PRAUST_B); assert!(!petgraph::algo::is_isomorphic(&a, &b)); } #[test] fn praust_undir_no_iso() { let a = str_to_graph(PRAUST_A); let b = str_to_graph(PRAUST_B); assert!(!petgraph::algo::is_isomorphic(&a, &b)); } #[test] fn coxeter_di_iso() { // The correct isomorphism is let a = str_to_digraph(COXETER_A); let b = str_to_digraph(COXETER_B); assert!(petgraph::algo::is_isomorphic(&a, &b)); } #[test] fn coxeter_undi_iso() { // The correct isomorphism is let a = str_to_graph(COXETER_A); let b = str_to_graph(COXETER_B); assert!(petgraph::algo::is_isomorphic(&a, &b)); } #[test] fn g14_dir_not_iso() { let a = str_to_digraph(G1D); let b = str_to_digraph(G4D); assert!(!petgraph::algo::is_isomorphic(&a, &b)); } #[test] fn g14_undir_not_iso() { let a = str_to_digraph(G1U); let b = str_to_digraph(G4U); assert!(!petgraph::algo::is_isomorphic(&a, &b)); } #[test] fn g12_undir_iso() { let a = str_to_digraph(G1U); let b = str_to_digraph(G2U); assert!(petgraph::algo::is_isomorphic(&a, &b)); } #[test] fn g3_not_iso() { let a = str_to_digraph(G3_1); let b = str_to_digraph(G3_2); assert!(!petgraph::algo::is_isomorphic(&a, &b)); } #[test] fn g8_not_iso() { let a = str_to_digraph(G8_1); let b = str_to_digraph(G8_2); assert_eq!(a.edge_count(), b.edge_count()); assert_eq!(a.node_count(), b.node_count()); assert!(!petgraph::algo::is_isomorphic(&a, &b)); } #[test] fn s12_not_iso() { let a = str_to_digraph(S1); let b = str_to_digraph(S2); assert_eq!(a.edge_count(), b.edge_count()); assert_eq!(a.node_count(), b.node_count()); assert!(!petgraph::algo::is_isomorphic(&a, &b)); } #[test] fn iso1() { let mut g0 = Graph::<_, ()>::new(); let mut g1 = Graph::<_, ()>::new(); assert!(petgraph::algo::is_isomorphic(&g0, &g1)); // very simple cases let a0 = g0.add_node(0); let a1 = g1.add_node(0); assert!(petgraph::algo::is_isomorphic(&g0, &g1)); let b0 = g0.add_node(1); let b1 = g1.add_node(1); assert!(petgraph::algo::is_isomorphic(&g0, &g1)); let _ = g0.add_node(2); assert!(!petgraph::algo::is_isomorphic(&g0, &g1)); let _ = g1.add_node(2); assert!(petgraph::algo::is_isomorphic(&g0, &g1)); g0.add_edge(a0, b0, ()); assert!(!petgraph::algo::is_isomorphic(&g0, &g1)); g1.add_edge(a1, b1, ()); assert!(petgraph::algo::is_isomorphic(&g0, &g1)); } #[test] fn iso2() { let mut g0 = Graph::<_, ()>::new(); let mut g1 = Graph::<_, ()>::new(); let a0 = g0.add_node(0); let a1 = g1.add_node(0); let b0 = g0.add_node(1); let b1 = g1.add_node(1); let c0 = g0.add_node(2); let c1 = g1.add_node(2); g0.add_edge(a0, b0, ()); g1.add_edge(c1, b1, ()); assert!(petgraph::algo::is_isomorphic(&g0, &g1)); // a -> b // a -> c // vs. // c -> b // c -> a g0.add_edge(a0, c0, ()); g1.add_edge(c1, a1, ()); assert!(petgraph::algo::is_isomorphic(&g0, &g1)); // add // b -> c // vs // b -> a let _ = g0.add_edge(b0, c0, ()); let _ = g1.add_edge(b1, a1, ()); assert!(petgraph::algo::is_isomorphic(&g0, &g1)); let d0 = g0.add_node(3); let d1 = g1.add_node(3); let e0 = g0.add_node(4); let e1 = g1.add_node(4); assert!(petgraph::algo::is_isomorphic(&g0, &g1)); // add // b -> e -> d // vs // b -> d -> e g0.add_edge(b0, e0, ()); g0.add_edge(e0, d0, ()); g1.add_edge(b1, d1, ()); g1.add_edge(d1, e1, ()); assert!(petgraph::algo::is_isomorphic(&g0, &g1)); } #[test] fn iso_matching() { let g0 = Graph::<(), _>::from_edges(&[(0, 0, 1), (0, 1, 2), (0, 2, 3), (1, 2, 4)]); let mut g1 = g0.clone(); g1[edge_index(0)] = 0; assert!(!is_isomorphic_matching( &g0, &g1, |x, y| x == y, |x, y| x == y )); let mut g2 = g0.clone(); g2[edge_index(1)] = 0; assert!(!is_isomorphic_matching( &g0, &g2, |x, y| x == y, |x, y| x == y )); } #[test] fn iso_100n_100e() { let g0 = graph_from_file("tests/res/graph_100n_100e.txt"); let g1 = graph_from_file("tests/res/graph_100n_100e_iso.txt"); assert!(petgraph::algo::is_isomorphic(&g0, &g1)); } #[test] fn iso_large() { let g0 = graph_from_file("tests/res/graph_1000n_1000e.txt"); let g1 = graph_from_file("tests/res/graph_1000n_1000e_iso.txt"); assert!(petgraph::algo::is_isomorphic(&g0, &g1)); } // isomorphism isn't correct for multigraphs. // Keep this testcase to document how #[should_panic] #[test] fn iso_multigraph_failure() { let g0 = Graph::<(), ()>::from_edges(&[(0, 0), (0, 0), (0, 1), (1, 1), (1, 1), (1, 0)]); let g1 = Graph::<(), ()>::from_edges(&[(0, 0), (0, 1), (0, 1), (1, 1), (1, 0), (1, 0)]); assert!(!is_isomorphic(&g0, &g1)); } #[test] #[cfg(feature = "matrix_graph")] fn iso_graph_matrixgraph() { let g0 = Graph::<(), ()>::from_edges(&[(0, 1), (1, 2), (2, 0)]); let g1 = MatrixGraph::<(), ()>::from_edges(&[(0, 1), (1, 2), (2, 0)]); assert!(is_isomorphic(&g0, &g1)); } #[test] fn iso_subgraph() { let g0 = Graph::<(), ()>::from_edges(&[(0, 1), (1, 2), (2, 0)]); let g1 = Graph::<(), ()>::from_edges(&[(0, 1), (1, 2), (2, 0), (2, 3)]); assert!(!is_isomorphic(&g0, &g1)); assert!(is_isomorphic_subgraph(&g0, &g1)); } /// Isomorphic pair const COXETER_A: &str = " 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 1 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 "; const COXETER_B: &str = " 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 1 0 ";
27.692308
92
0.529798
8a978283c278a58694e148c251c381e5bc19489e
131
use auto_enums::enum_derive; #[enum_derive(rayon::IndexedParallelIterator)] enum Enum<A, B> { A(A), B(B), } fn main() {}
13.1
46
0.633588
7171c593413915a963b9f5d29c84ea6fd19c2ae5
7,919
// Copyright (c) Aptos // SPDX-License-Identifier: Apache-2.0 //! This module defines all kinds of structures in the Sparse Merkle Tree maintained in scratch pad. //! There are four kinds of nodes: //! - A `SubTree::Empty` represents an empty subtree with zero leaf. Its root hash is assumed to be //! the default hash. //! //! - A `SubTree::NonEmpty` represents a subtree with one or more leaves, it carries its root hash. //! //! From a `SubTree::NonEmpty` one may or may not get an reference to its root node, depending on //! how this subtree structure was created and if the root node has been dropped (when its persisted //! to DB and given up by any possible cache). A non empty subtree can refer to one of two types of //! nodes as its root: //! //! - An `InternalNode` is a node that has two children. It is same as the internal node in a //! standard Merkle tree. //! //! - A `LeafNode` represents a single account. Similar to what is in storage, a leaf node has a //! key which is the hash of the account address as well as a value hash which is the hash of the //! corresponding account content. The difference is that a `LeafNode` does not always have the //! value, in the case when the leaf was loaded into memory as part of a non-inclusion proof. use aptos_crypto::{ hash::{CryptoHash, SPARSE_MERKLE_PLACEHOLDER_HASH}, HashValue, }; use aptos_types::proof::{SparseMerkleInternalNode, SparseMerkleLeafNode}; use std::sync::{Arc, Weak}; #[derive(Clone, Debug)] pub(crate) struct InternalNode<V> { pub left: SubTree<V>, pub right: SubTree<V>, } impl<V: CryptoHash> InternalNode<V> { pub fn calc_hash(&self) -> HashValue { SparseMerkleInternalNode::new(self.left.hash(), self.right.hash()).hash() } } #[derive(Clone, Debug)] pub(crate) struct LeafNode<V> { pub key: HashValue, pub value: LeafValue<V>, } impl<V> LeafNode<V> { pub fn new(key: HashValue, value: LeafValue<V>) -> Self { Self { key, value } } pub fn clone_with_weak_value(&self) -> Self { Self { key: self.key, value: self.value.weak(), } } } impl<V: CryptoHash> LeafNode<V> { pub fn calc_hash(&self) -> HashValue { SparseMerkleLeafNode::new(self.key, self.value.hash).hash() } } impl<V> From<&SparseMerkleLeafNode> for LeafNode<V> where V: CryptoHash, { fn from(leaf_node: &SparseMerkleLeafNode) -> Self { Self { key: leaf_node.key(), value: LeafValue::new_with_value_hash(leaf_node.value_hash()), } } } #[derive(Debug)] pub(crate) enum NodeInner<V> { Internal(InternalNode<V>), Leaf(LeafNode<V>), } #[derive(Debug)] pub(crate) struct Node<V> { generation: u64, inner: NodeInner<V>, } impl<V: CryptoHash> Node<V> { pub fn calc_hash(&self) -> HashValue { match &self.inner { NodeInner::Internal(internal_node) => internal_node.calc_hash(), NodeInner::Leaf(leaf_node) => leaf_node.calc_hash(), } } } impl<V> Node<V> { pub fn new_leaf(key: HashValue, value: LeafValue<V>, generation: u64) -> Self { Self { generation, inner: NodeInner::Leaf(LeafNode::new(key, value)), } } pub fn new_leaf_from_node(node: LeafNode<V>, generation: u64) -> Self { Self { generation, inner: NodeInner::Leaf(node), } } #[cfg(test)] pub fn new_internal(left: SubTree<V>, right: SubTree<V>, generation: u64) -> Self { Self { generation, inner: NodeInner::Internal(InternalNode { left, right }), } } pub fn new_internal_from_node(node: InternalNode<V>, generation: u64) -> Self { Self { generation, inner: NodeInner::Internal(node), } } pub fn inner(&self) -> &NodeInner<V> { &self.inner } } #[derive(Debug)] pub enum Ref<R> { Shared(Arc<R>), Weak(Weak<R>), } impl<R> Ref<R> { pub fn new_unknown() -> Self { Self::Weak(Weak::new()) } pub fn new_shared(referee: R) -> Self { Self::Shared(Arc::new(referee)) } pub fn weak(&self) -> Self { Self::Weak(match self { Self::Shared(arc) => Arc::downgrade(arc), Self::Weak(weak) => weak.clone(), }) } pub fn get_if_in_mem(&self) -> Option<Arc<R>> { match self { Self::Shared(arc) => Some(arc.clone()), Self::Weak(weak) => weak.upgrade(), } } } impl<R> Clone for Ref<R> { fn clone(&self) -> Self { match self { Self::Shared(arc) => Self::Shared(arc.clone()), Self::Weak(weak) => Self::Weak(weak.clone()), } } } pub(crate) type NodeHandle<V> = Ref<Node<V>>; #[derive(Clone, Debug)] pub(crate) enum SubTree<V> { Empty, NonEmpty { hash: HashValue, root: NodeHandle<V>, }, } impl<V: CryptoHash> SubTree<V> { pub fn new_empty() -> Self { Self::Empty } pub fn new_unknown(hash: HashValue) -> Self { Self::NonEmpty { hash, root: NodeHandle::new_unknown(), } } pub fn new_leaf_with_value(key: HashValue, value: V, generation: u64) -> Self { Self::new_leaf_impl(key, LeafValue::new_with_value(value), generation) } pub fn new_leaf_with_value_hash( key: HashValue, value_hash: HashValue, generation: u64, ) -> Self { Self::new_leaf_impl(key, LeafValue::new_with_value_hash(value_hash), generation) } fn new_leaf_impl(key: HashValue, value: LeafValue<V>, generation: u64) -> Self { let leaf = Node::new_leaf(key, value, generation); Self::NonEmpty { hash: leaf.calc_hash(), root: NodeHandle::new_shared(leaf), } } #[cfg(test)] pub fn new_internal(left: Self, right: Self, generation: u64) -> Self { let internal = Node::new_internal(left, right, generation); Self::NonEmpty { hash: internal.calc_hash(), root: NodeHandle::new_shared(internal), } } pub fn hash(&self) -> HashValue { match self { Self::Empty => *SPARSE_MERKLE_PLACEHOLDER_HASH, Self::NonEmpty { hash, .. } => *hash, } } pub fn weak(&self) -> Self { match self { Self::Empty => Self::Empty, Self::NonEmpty { hash, root } => Self::NonEmpty { hash: *hash, root: root.weak(), }, } } pub fn get_node_if_in_mem(&self, min_generation: u64) -> Option<Arc<Node<V>>> { match self { Self::Empty => None, Self::NonEmpty { root, .. } => root.get_if_in_mem().and_then(|n| { if n.generation >= min_generation { Some(n) } else { None } }), } } #[cfg(test)] pub fn is_unknown(&self) -> bool { matches!( self, Self::NonEmpty { root: NodeHandle::Weak(_), .. } ) } #[cfg(test)] pub fn is_empty(&self) -> bool { matches!(self, SubTree::Empty) } } #[derive(Clone, Debug)] pub struct LeafValue<V> { pub hash: HashValue, pub data: Ref<V>, } impl<V> LeafValue<V> { pub fn new_with_value(value: V) -> Self where V: CryptoHash, { Self { hash: value.hash(), data: Ref::new_shared(value), } } pub fn new_with_value_hash(value_hash: HashValue) -> Self { Self { hash: value_hash, data: Ref::new_unknown(), } } pub fn weak(&self) -> Self { Self { hash: self.hash, data: self.data.weak(), } } }
25.794788
100
0.56194
3a8cba72fa8090a548216fc80eba6e84f4cf8828
245
// https://www.codewars.com/kata/perimeter-of-squares-in-a-rectangle/train/rust fn perimeter(n: u64) -> u64 { 4 * ({ let mut t = (n + 2, 1u64, 1u64); while t.0 > 0 { t = (t.0 - 1, t.2, t.1 + t.2); } t.1 } - 1) }
24.5
79
0.485714
f9677312237bd9e7587b31c79f4c4e4c1afd407f
80,072
use std::fs::File; use byteorder::*; use std::io::{Read, Seek, SeekFrom}; /* Enum needed for various functions that support runtime parsing of ELF data*/ #[derive(Clone, Debug)] pub enum ExecHeader { ThirtyTwo(ExecHeader32), SixtyFour(ExecHeader64) } #[derive(Clone, Debug)] pub struct ExecHeader32 { pub e_ident: [u8; EXEC::EI_IDENT as usize], pub e_type: u16, pub e_machine: u16, pub e_version: u32, pub e_entry: u32, pub e_phoff: u32, pub e_shoff: u32, pub e_flags: u32, pub e_ehsize: u16, pub e_phentsize: u16, pub e_phnum: u16, pub e_shentsize: u16, pub e_shnum: u16, pub e_shstrndx: u16, } #[derive(Clone, Debug)] pub struct ExecHeader64 { pub e_ident: [u8; EXEC::EI_IDENT as usize], pub e_type: u16, pub e_machine: u16, pub e_version: u32, pub e_entry: u64, pub e_phoff: u64, pub e_shoff: u64, pub e_flags: u32, pub e_ehsize: u16, pub e_phentsize: u16, pub e_phnum: u16, pub e_shentsize: u16, pub e_shnum: u16, pub e_shstrndx: u16, } impl ExecHeader32{ pub fn new(ident_array: [u8; 16], etype: u16, emach: u16, evers: u32, eentry: u32, phoff: u32, shoff: u32, flags: u32, ehsize: u16, pehsize: u16, phnum: u16, shent: u16, shnum: u16, shstrndx: u16,) -> ExecHeader32{ ExecHeader32{ e_ident: ident_array, e_type: etype, e_machine : emach, e_version : evers, e_entry : eentry, e_phoff : phoff, e_shoff : shoff, e_flags : flags, e_ehsize : ehsize, e_phentsize : pehsize, e_phnum : phnum, e_shentsize : shent, e_shnum : shnum, e_shstrndx : shstrndx, } } pub fn parse_exec_header<R, B: ByteOrder>(file_ptr: &mut R) -> Result<ExecHeader32, std::io::Error> where R: Read,{ let mut ident_array = [0; EXEC::EI_IDENT]; file_ptr.read_exact(&mut ident_array)?; let etype = file_ptr.read_u16::<B>()?; let emach = file_ptr.read_u16::<B>()?; let evers = file_ptr.read_u32::<B>()?; let eentry = file_ptr.read_u32::<B>()?; let phoff = file_ptr.read_u32::<B>()?; let shoff = file_ptr.read_u32::<B>()?; let flags = file_ptr.read_u32::<B>()?; let ehsize = file_ptr.read_u16::<B>()?; let pehsize = file_ptr.read_u16::<B>()?; let phnum = file_ptr.read_u16::<B>()?; let shent = file_ptr.read_u16::<B>()?; let shnum = file_ptr.read_u16::<B>()?; let shstrndx = file_ptr.read_u16::<B>()?; Ok(ExecHeader32{ e_ident: ident_array, e_type: etype, e_machine : emach, e_version : evers, e_entry : eentry, e_phoff : phoff, e_shoff : shoff, e_flags : flags, e_ehsize : ehsize, e_phentsize : pehsize, e_phnum : phnum, e_shentsize : shent, e_shnum : shnum, e_shstrndx : shstrndx, }) } /*TODO -- this can be improved; just write directly to file pointer rather than intermediate vec */ pub fn write_header<B: ByteOrder>(&self, file_ptr: &mut File)-> Result<(),std::io::Error>{ for &val in &self.e_ident{ file_ptr.write_u8(val)?; } file_ptr.write_u16::<B>(self.e_type)?; file_ptr.write_u16::<B>(self.e_machine)?; file_ptr.write_u32::<B>(self.e_version)?; file_ptr.write_u32::<B>(self.e_entry)?; file_ptr.write_u32::<B>(self.e_phoff)?; file_ptr.write_u32::<B>(self.e_shoff)?; file_ptr.write_u32::<B>(self.e_flags)?; file_ptr.write_u16::<B>(self.e_ehsize)?; file_ptr.write_u16::<B>(self.e_phentsize)?; file_ptr.write_u16::<B>(self.e_phnum)?; file_ptr.write_u16::<B>(self.e_shentsize)?; file_ptr.write_u16::<B>(self.e_shnum)?; file_ptr.write_u16::<B>(self.e_shstrndx)?; Ok(()) } /* * Note: The calling function is responsible for parsing a safe-to-downcast * value from u64 to either u8, u16, or u32 */ pub fn update_exec_header(&mut self, field: String, val: u32, offset: Option<usize>)-> Result<(),std::io::Error>{ match field.as_ref() { "e_ident" => { if let Some(offset) = offset { self.e_ident[offset]=val as u8 } else { return Err(std::io::Error::new(std::io::ErrorKind::Other, "Invalid exec header change option")) } }, "e_type" => self.e_type = val as u16, "e_machine" => self.e_machine = val as u16, "e_version" => self.e_version =val, "e_entry" => self.e_entry = val, "e_phoff" => self.e_phoff = val, "e_shoff" => self.e_shoff =val, "e_flags" => self.e_flags=val, "e_ehsize" => self.e_ehsize=val as u16, "e_phentsize" => self.e_phentsize=val as u16, "e_phnum" => self.e_phnum=val as u16, "e_shentsize" => self.e_shentsize=val as u16, "e_shnum" => self.e_shnum=val as u16, "e_shstrndx" => self.e_shstrndx=val as u16, _ => return Err(std::io::Error::new(std::io::ErrorKind::Other, "Invalid exec header change option")) }; Ok(()) } } impl ExecHeader64{ pub fn new(ident_array: [u8; EXEC::EI_IDENT], etype: u16, emach: u16, evers: u32, eentry: u64, phoff: u64, shoff: u64, flags: u32, ehsize: u16, pehsize: u16, phnum: u16, shent: u16, shnum: u16, shstrndx: u16,) -> ExecHeader64 { ExecHeader64{ e_ident: ident_array, e_type: etype, e_machine : emach, e_version : evers, e_entry : eentry, e_phoff : phoff, e_shoff : shoff, e_flags : flags, e_ehsize : ehsize, e_phentsize : pehsize, e_phnum : phnum, e_shentsize : shent, e_shnum : shnum, e_shstrndx : shstrndx, } } pub fn parse_exec_header<R,B: ByteOrder>(file_ptr: &mut R,) -> Result<ExecHeader64, std::io::Error> where R: Read,{ let mut ident_array = [0; EXEC::EI_IDENT]; file_ptr.read_exact(&mut ident_array)?; let etype = file_ptr.read_u16::<B>()?; let emach = file_ptr.read_u16::<B>()?; let evers = file_ptr.read_u32::<B>()?; let eentry = file_ptr.read_u64::<B>()?; let phoff = file_ptr.read_u64::<B>()?; let shoff = file_ptr.read_u64::<B>()?; let flags = file_ptr.read_u32::<B>()?; let ehsize = file_ptr.read_u16::<B>()?; let pehsize = file_ptr.read_u16::<B>()?; let phnum = file_ptr.read_u16::<B>()?; let shent = file_ptr.read_u16::<B>()?; let shnum = file_ptr.read_u16::<B>()?; let shstrndx = file_ptr.read_u16::<B>()?; Ok(ExecHeader64{ e_ident: ident_array, e_type: etype, e_machine : emach, e_version : evers, e_entry : eentry, e_phoff : phoff, e_shoff : shoff, e_flags : flags, e_ehsize : ehsize, e_phentsize : pehsize, e_phnum : phnum, e_shentsize : shent, e_shnum : shnum, e_shstrndx : shstrndx, }) } /* TODO: IMPROVE THIS, dont need the intermediate writer vec, just write directly to file pointer*/ pub fn write_header<B: ByteOrder>(&self, file_ptr: &mut File)->Result<(),std::io::Error>{ file_ptr.seek( SeekFrom::Start(0))?; for &val in &self.e_ident{ file_ptr.write_u8(val)?; } file_ptr.write_u16::<B>(self.e_type)?; file_ptr.write_u16::<B>(self.e_machine)?; file_ptr.write_u32::<B>(self.e_version)?; file_ptr.write_u64::<B>(self.e_entry)?; file_ptr.write_u64::<B>(self.e_phoff)?; file_ptr.write_u64::<B>(self.e_shoff)?; file_ptr.write_u32::<B>(self.e_flags)?; file_ptr.write_u16::<B>(self.e_ehsize)?; file_ptr.write_u16::<B>(self.e_phentsize)?; file_ptr.write_u16::<B>(self.e_phnum)?; file_ptr.write_u16::<B>(self.e_shentsize)?; file_ptr.write_u16::<B>(self.e_shnum)?; file_ptr.write_u16::<B>(self.e_shstrndx)?; Ok(()) } /* * Note: The calling function is responsible for parsing a safe-to-downcast * value from u64 to either u8, u16, or u32 */ pub fn update_exec_header(&mut self, field: String, val: u64, offset: Option<usize>)->Result<(),std::io::Error> { match field.as_ref() { "e_ident" => { if let Some(offset) = offset { self.e_ident[offset]=val as u8 } else{ return Err(std::io::Error::new(std::io::ErrorKind::Other, "Invalid exec header change option")) } }, "e_type" => self.e_type = val as u16, "e_machine" => self.e_machine = val as u16, "e_version" => self.e_version =val as u32, "e_entry" => self.e_entry = val, "e_phoff" => self.e_phoff = val, "e_shoff" => self.e_shoff =val, "e_flags" => self.e_flags=val as u32, "e_ehsize" => self.e_ehsize=val as u16, "e_phentsize" => self.e_phentsize=val as u16, "e_phnum" => self.e_phnum=val as u16, "e_shentsize" => self.e_shentsize=val as u16, "e_shnum" => self.e_shnum=val as u16, "e_shstrndx" => self.e_shstrndx=val as u16, _ => return Err(std::io::Error::new(std::io::ErrorKind::Other, "Invalid exec header change option")) }; Ok(()) } } #[allow(non_snake_case)] #[derive(Clone, Debug)] pub struct ExecutiveHeader { pub data: EXEC::EI_DATA, pub class: EXEC::EI_CLASS, pub EH: ExecHeader, } impl ExecutiveHeader { pub fn new<R>(file_ptr: &mut R) -> Result<ExecutiveHeader,std::io::Error> where R: Read + Seek, { file_ptr.seek(SeekFrom::Start(0))?; let mut ident_array = [0; EXEC::EI_IDENT]; file_ptr.read_exact(&mut ident_array)?; file_ptr.seek(SeekFrom::Start(0))?; let class: EXEC::EI_CLASS = match_class( ident_array[EXEC::_EI_CLASS]); let data: EXEC::EI_DATA = match_data(ident_array[EXEC::_EI_DATA]); let eh_t = match class { EXEC::EI_CLASS::ELFCLASSNONE => { return Err(std::io::Error::new(std::io::ErrorKind::Other, "Elf class not supported")) }, EXEC::EI_CLASS::ELFCLASS32=> { match data { EXEC::EI_DATA::ELFDATANONE => { return Err(std::io::Error::new(std::io::ErrorKind::Other, "Elf data not supported")) }, EXEC::EI_DATA::ELFDATA2LSB => { let exec: ExecHeader32= ExecHeader32::parse_exec_header::<R, LittleEndian>(file_ptr)?; ExecHeader::ThirtyTwo(exec) }, EXEC::EI_DATA::ELFDATA2MSB => { let exec: ExecHeader32 = ExecHeader32::parse_exec_header::<R, BigEndian>(file_ptr)?; ExecHeader::ThirtyTwo(exec) }, EXEC::EI_DATA::ELFDATAOTHER(_)=> { return Err(std::io::Error::new(std::io::ErrorKind::Other, "Elf data not supported")) } } }, EXEC::EI_CLASS::ELFCLASS64=>{ match data { EXEC::EI_DATA::ELFDATANONE => { return Err(std::io::Error::new(std::io::ErrorKind::Other, "Elf data not supported")) }, EXEC::EI_DATA::ELFDATA2LSB => { let exec: ExecHeader64= ExecHeader64::parse_exec_header::<R, LittleEndian>(file_ptr)?; ExecHeader::SixtyFour(exec) }, EXEC::EI_DATA::ELFDATA2MSB => { let exec: ExecHeader64 = ExecHeader64::parse_exec_header::<R, BigEndian>(file_ptr)?; ExecHeader::SixtyFour(exec) }, EXEC::EI_DATA::ELFDATAOTHER(_)=> { return Err(std::io::Error::new(std::io::ErrorKind::Other, "Elf data not supported")) } } }, EXEC::EI_CLASS::ELFCLASSOTHER(_) =>{ return Err(std::io::Error::new(std::io::ErrorKind::Other, "Elf class not supported")) }, }; Ok(ExecutiveHeader { class: class, data: data, EH: eh_t, }) } /* pub fn write_header(&self, file_ptr: &mut File ) -> Result<(),std::io::Error>{ file_ptr.seek(SeekFrom::Start(0))?; self.write_exec_header(file_ptr)?; Ok(()) }*/ pub fn write_exec_header(&self, file_ptr: &mut File) -> Result<(),std::io::Error>{ match &self.EH { ExecHeader::ThirtyTwo(exec32) => { match self.data { EXEC::EI_DATA::ELFDATA2LSB => { exec32.write_header::<LittleEndian>(file_ptr) }, EXEC::EI_DATA::ELFDATA2MSB => { exec32.write_header::<BigEndian>(file_ptr) } _ => { return Err(std::io::Error::new(std::io::ErrorKind::Other, "Elf class not supported")) } } }, ExecHeader::SixtyFour(exec64)=>{ match self.data { EXEC::EI_DATA::ELFDATA2LSB => { exec64.write_header::<LittleEndian>(file_ptr) }, EXEC::EI_DATA::ELFDATA2MSB => { exec64.write_header::<BigEndian>(file_ptr) } _ => { return Err(std::io::Error::new(std::io::ErrorKind::Other, "Elf class not supported")) } } }, } } pub fn update_sht_offset(&mut self, by_size: u64){ match &mut self.EH { ExecHeader::ThirtyTwo(exec32) => { exec32.e_shoff += by_size as u32 }, ExecHeader::SixtyFour(exec64)=>{ exec64.e_shoff += by_size }, } } pub fn sht_offset(&self)-> SHTOffset { match &self.EH { ExecHeader::ThirtyTwo(exec32) => { SHTOffset::ThirtyTwo(exec32.e_shoff) }, ExecHeader::SixtyFour(exec64)=>{ SHTOffset::SixtyFour(exec64.e_shoff) }, } } pub fn pht_offset(&self)-> PHTOffset { match &self.EH { ExecHeader::ThirtyTwo(exec32) => { PHTOffset::ThirtyTwo(exec32.e_phoff) }, ExecHeader::SixtyFour(exec64)=>{ PHTOffset::SixtyFour(exec64.e_phoff) }, } } pub fn entry(&self)-> Entry { match &self.EH { ExecHeader::ThirtyTwo(exec32) => { Entry::ThirtyTwo(exec32.e_entry) }, ExecHeader::SixtyFour(exec64)=>{ Entry::SixtyFour(exec64.e_entry) }, } } pub fn ph_entry_num(&self)-> u16 { match &self.EH { ExecHeader::ThirtyTwo(exec32) => { exec32.e_phnum }, ExecHeader::SixtyFour(exec64)=>{ exec64.e_phnum }, } } pub fn sh_entry_num(&self)-> u16 { match &self.EH { ExecHeader::ThirtyTwo(exec32) => { exec32.e_shnum }, ExecHeader::SixtyFour(exec64)=>{ exec64.e_shnum }, } } pub fn sh_entry_size(&self)-> u16 { match &self.EH { ExecHeader::ThirtyTwo(exec32) => { exec32.e_shentsize }, ExecHeader::SixtyFour(exec64)=>{ exec64.e_shentsize }, } } pub fn ph_entry_size(&self)-> u16 { match &self.EH { ExecHeader::ThirtyTwo(exec32) => { exec32.e_phentsize }, ExecHeader::SixtyFour(exec64)=>{ exec64.e_phentsize }, } } pub fn shstrndx(&self)-> u16 { match &self.EH { ExecHeader::ThirtyTwo(exec32) => { exec32.e_shstrndx }, ExecHeader::SixtyFour(exec64)=>{ exec64.e_shstrndx }, } } //This function should only ever be called after obtaining 'safe' values for val i.e. //should be parsed to return a u8 or u16 appropriately so that we already know //up or downcasting is safe pub fn update_exec_header(&mut self, field: String, val: u64, offset: Option<usize> ) -> Result<(),std::io::Error>{ match &mut self.EH { ExecHeader::ThirtyTwo(exec32) => { exec32.update_exec_header(field, val as u32, offset)?; }, ExecHeader::SixtyFour(exec64) => { exec64.update_exec_header(field, val, offset)?; }, }; Ok(()) } } #[derive(Clone, Debug)] pub enum Entry{ ThirtyTwo(u32), SixtyFour(u64), } #[derive(Clone, Debug)] pub enum SHTOffset{ ThirtyTwo(u32), SixtyFour(u64), } #[derive(Clone, Debug)] pub enum PHTOffset{ ThirtyTwo(u32), SixtyFour(u64), } pub fn match_data(data: u8) -> EXEC::EI_DATA { match data { 0 => EXEC::EI_DATA::ELFDATANONE, 1 => EXEC::EI_DATA::ELFDATA2LSB, 2 => EXEC::EI_DATA::ELFDATA2MSB, d => EXEC::EI_DATA::ELFDATAOTHER(d), } } pub fn match_class(class: u8) -> EXEC::EI_CLASS { match class { 0 => EXEC::EI_CLASS::ELFCLASSNONE, 1 => EXEC::EI_CLASS::ELFCLASS32, 2 => EXEC::EI_CLASS::ELFCLASS64, c => EXEC::EI_CLASS::ELFCLASSOTHER(c), } } pub fn match_osabi(osabi: u8) -> EXEC::EI_OSABI { match osabi { 0 => EXEC::EI_OSABI::ELFOSABI_NONE, 1 => EXEC::EI_OSABI::ELFOSABI_HPUX, 2 => EXEC::EI_OSABI::ELFOSABI_NETBSD, 3 => EXEC::EI_OSABI::ELFOSABI_GNU, 6 => EXEC::EI_OSABI::ELFOSABI_SOLARIS, 7 => EXEC::EI_OSABI::ELFOSABI_AIX, 8 => EXEC::EI_OSABI::ELFOSABI_IRIX, 9 => EXEC::EI_OSABI::ELFOSABI_FREEBSD, 10 => EXEC::EI_OSABI::ELFOSABI_TRU64, 11 => EXEC::EI_OSABI::ELFOSABI_MODESTO, 12 => EXEC::EI_OSABI::ELFOSABI_OPENBSD, 64 => EXEC::EI_OSABI::ELFOSABI_ARM_AEABI, 97 => EXEC::EI_OSABI::ELFOSABI_ARM, 255 => EXEC::EI_OSABI::ELFOSABI_STANDALONE, osabi => EXEC::EI_OSABI::ELFOSABI_OTHER(osabi), } } pub fn match_data_as_str(data: String)-> Result<u8, std::io::Error>{ match data.as_str(){ "ELFDATANONE" =>Ok(0), //should this be supported? "ELFDATA2LSB" =>Ok(1), "ELFDATA2MSB" =>Ok(2), //ELFCLASSOTHER, //not supported _ => return Err(std::io::Error::new(std::io::ErrorKind::Other, "Elf data not supported")) } } pub fn match_class_as_str(class: String)-> Result<u8, std::io::Error>{ match class.as_str(){ "ELFCLASSNONE" =>Ok(0), //should this be supported? "ELFCLASS32" =>Ok(1), "ELFCLASS64" =>Ok(2), //ELFCLASSOTHER, //not supported _ => return Err(std::io::Error::new(std::io::ErrorKind::Other, "Elf class not supported")) } } /* comments from elf.h*/ pub fn match_osabi_as_str(osabi: String)-> Result<u8, std::io::Error>{ match osabi.as_str(){ "ELFOSABI_NONE" =>Ok(0), /* UNIX System V ABI */ "ELFOSABI_SYSV" => Ok(0), /* Alias for ELFOSABI_NONE */ "ELFOSABI_HPUX" =>Ok(1), /* HP-UX */ "ELFOSABI_NETBSD" =>Ok(2), /* NetBSD. */ "ELFOSABI_GNU" =>Ok(3), /* Object uses GNU ELF extensions. */ "ELFOSABI_LINUX" => Ok(3), /* Compatibility alias for ELFOSABI_GNU */ "ELFOSABI_SOLARIS" =>Ok(6), /* Sun Solaris. */ "ELFOSABI_AIX" =>Ok(7), /* IBM AIX. */ "ELFOSABI_IRIX" =>Ok(8), /* SGI Irix. */ "ELFOSABI_FREEBSD" =>Ok(9), /* FreeBSD. */ "ELFOSABI_TRU64" =>Ok(10), /* Compaq TRU64 UNIX. */ "ELFOSABI_MODESTO" =>Ok(11), /* Novell Modesto. */ "ELFOSABI_OPENBSD" =>Ok( 12), /* OpenBSD. */ "ELFOSABI_ARM_AEABI" =>Ok(64), /* ARM EABI */ "ELFOSABI_ARM" =>Ok(97), /* ARM */ //ELFOSABI_OTHER(u8), //TODO should support 'other'? "ELFOSABI_STANDALONE" =>Ok(255), /* Standalone (embedded) application */ _ => return Err(std::io::Error::new(std::io::ErrorKind::Other, "Elf osabi not supported")) } } pub fn match_version(e_vers: u32)-> Result<EXEC::EI_VERS, std::io::Error> { match e_vers{ 0 => Ok(EXEC::EI_VERS::EV_NONE), 1 => Ok(EXEC::EI_VERS::EV_CURRENT), _ => return Err(std::io::Error::new(std::io::ErrorKind::Other, "Elf version not supported")) } } pub fn match_version_as_str(e_vers: String)-> Result<u32, std::io::Error> { match e_vers.as_str(){ "EV_NONE"=> Ok(0), "EV_CURRENT"=>Ok(1), _ => return Err(std::io::Error::new(std::io::ErrorKind::Other, "Elf version not supported")) } } pub fn match_type(etype: u16)-> Result<EXEC::EI_TYPE, std::io::Error> { match etype { 0 => Ok(EXEC::EI_TYPE::ET_NONE), 1 => Ok(EXEC::EI_TYPE::ET_REL), 2=> Ok(EXEC::EI_TYPE::ET_EXEC), 3=> Ok(EXEC::EI_TYPE::ET_DYN), 4=> Ok(EXEC::EI_TYPE::ET_CORE), 0xfe00 => Ok(EXEC::EI_TYPE::ET_LOOS), 0xfeff=> Ok(EXEC::EI_TYPE::ET_HIOS), 0xff00=> Ok(EXEC::EI_TYPE::ET_LOPROC), 0xffff=> Ok(EXEC::EI_TYPE::ET_HIPROC), _ => return Err(std::io::Error::new(std::io::ErrorKind::Other, "Elf type not supported")) } } pub fn match_type_as_str(etype: String)->Result<u16, std::io::Error>{ match etype.as_str() { "ET_NONE"=> Ok(0), "ET_REL"=> Ok(1), "ET_EXEC"=> Ok(2), "ET_DYN"=> Ok(3), "ET_CORE"=> Ok(4), "ET_LOOS"=> Ok(0xfe00), "ET_HIOS"=> Ok(0xfeff), "ET_LOPROC"=> Ok(0xff00), "ET_HIPROC"=> Ok(0xffff), _ => return Err(std::io::Error::new(std::io::ErrorKind::Other, "Elf type not supported")) } } pub fn match_mach_as_str(mach: String) -> Result<u16, std::io::Error> { match mach.as_str(){ "EM_NONE" => Ok(0), "EM_M32" => Ok(1), /* AT&T WE 32100 */ "EM_SPARC" => Ok(2), /*SPARC*/ "EM_386" => Ok(3), /*Intel 80386*/ "EM_68K" => Ok(4), /*Motorola 68000*/ "EM_88K" => Ok(5), /* Motorola 88000*/ // RESERVED = 6, /* Reserved for future use*/ "EM_860" => Ok(7), /*Intel 80860*/ "EM_MIPS" => Ok(8), /* MIPS I Architecture */ "EM_S370" => Ok(9), /* IBM System/370 Processor */ "EM_MIPS_RS3_LE" => Ok(10), /* MIPS RS3000 Little-endian */ //RESERVED 11-14 // /*Reserved for future use */ "EM_PARISC" => Ok(15), /*Hewlett-Packard PA-RISC */ //RESERVED 16 // /*Reserved for future use */ "EM_VPP500" => Ok(17), /*Fujitsu VPP500 */ "EM_SPARC32PLUS" => Ok(18), /*Enhanced instruction set SPARC */ "EM_960" => Ok(19), /* Intel 80960 */ "EM_PPC" => Ok(20), /* PowerPC */ "EM_PPC64" => Ok(21), /* 64-bit PowerPC */ //RESERVED 22-35 // /*Reserved for future use */ "EM_V800" => Ok(36), /* NEC V800 */ "EM_FR20" => Ok(37), /* Fujitsu FR20 */ "EM_RH32" => Ok(38), /*TRW RH-32 */ "EM_RCE" => Ok(39), /*Motorola RCE */ "EM_ARM" => Ok(40), /*Advanced RISC Machines ARM */ "EM_ALPHA" => Ok(41), /* Digital Alpha */ "EM_SH" => Ok(42), /*Hitachi SH */ "EM_SPARCV9" => Ok(43), /* SPARC Version 9 */ "EM_TRICORE" => Ok(44), /* Siemens Tricore embedded processor */ "EM_ARC" => Ok(45), /*Argonaut RISC Core, Argonaut Technologies Inc. */ "EM_H8_300" => Ok(46), /* Hitachi H8/300 */ "EM_H8_300H" => Ok(47), /* Hitachi H8/300H */ "EM_H8S" => Ok(48), /*Hitachi H8S */ "EM_H8_500" => Ok(49), /*Hitachi H8/500 */ "EM_IA_64" => Ok(50), /*Intel IA-64 processor architecture */ "EM_MIPS_X" => Ok(51), /*Stanford MIPS-X */ "EM_COLDFIRE" => Ok(52), /* Motorola ColdFire */ "EM_68HC12" => Ok(53), /*Motorola M68HC12 */ "EM_MMA" => Ok(54), /*Fujitsu MMA Multimedia Accelerator */ "EM_PCP" => Ok(55), /*Siemens PCP */ "EM_NCPU" => Ok(56), /*Sony nCPU embedded RISC processor */ "EM_NDR1" => Ok(57), /*Denso NDR1 microprocessor */ "EM_STARCORE" => Ok(58), /* Motorola Star*Core processor */ "EM_ME16" => Ok(59), /*Toyota ME16 processor */ "EM_ST100" => Ok(60), /*STMicroelectronics ST100 processor */ "EM_TINYJ" => Ok(61), /*Advanced Logic Corp. TinyJ embedded processor family */ //Reserved 62-65 /*Reserved for future use */ "EM_FX66" => Ok(66), /*Siemens FX66 microcontroller */ "EM_ST9PLUS" => Ok(67), /* STMicroelectronics ST9+ 8/16 bit microcontroller */ "EM_ST7" => Ok(68), /*STMicroelectronics ST7 8-bit microcontroller */ "EM_68HC16" => Ok(69), /* Motorola MC68HC16 Microcontroller */ "EM_68HC11" => Ok(70), /* Motorola MC68HC11 Microcontroller */ "EM_68HC08" => Ok(71), /* Motorola MC68HC08 Microcontroller */ "EM_68HC05" => Ok(72), /* Motorola MC68HC05 Microcontroller */ "EM_SVX" => Ok(73), /*Silicon Graphics SVx */ "EM_ST19" => Ok(74), /* STMicroelectronics ST19 8-bit microcontroller */ "EM_VAX" => Ok(75), /* Digital VAX */ "EM_CRIS" => Ok(76), /* Axis Communications 32-bit embedded processor */ "EM_JAVELIN" => Ok(77), /*Infineon Technologies 32-bit embedded processor */ "EM_FIREPATH" => Ok(78), /* Element 14 64-bit DSP Processor */ "EM_ZSP" => Ok(79), /*LSI Logic 16-bit DSP Processor */ "EM_MMIX" => Ok(80), /* Donald Knuth's educational 64-bit processor */ "EM_HUANY" => Ok(81), /* Harvard University machine-independent object files */ "EM_PRISM" => Ok(82), /* SiTera Prism */ "EM_AVR" => Ok(83), /* Atmel AVR 8-bit microcontroller */ "EM_FR30" => Ok(84), /* Fujitsu FR30 */ "EM_D10V" => Ok(85), /* Mitsubishi D10V */ "EM_D30V" => Ok(86), /* Mitsubishi D30V */ "EM_V850" => Ok(87), /* NEC v850 */ "EM_M32R" => Ok(88), /* Mitsubishi M32R */ "EM_MN10300" => Ok(89), /* Matsushita MN10300 */ "EM_MN10200" => Ok(90), /* Matsushita MN10200 */ "EM_PJ" => Ok(91), /* picoJava */ "EM_OPENRISC" => Ok(92), /* OpenRISC 32-bit embedded processor */ "EM_ARC_COMPACT" => Ok(93), /* ARC International ARCompact */ "EM_XTENSA" => Ok(94), /* Tensilica Xtensa Architecture */ "EM_VIDEOCORE" => Ok(95), /* Alphamosaic VideoCore */ "EM_TMM_GPP" => Ok(96), /* Thompson Multimedia General Purpose Proc */ "EM_NS32K" => Ok(97), /* National Semi. 32000 */ "EM_TPC" => Ok(98), /* Tenor Network TPC */ "EM_SNP1K" => Ok(99), /* Trebia SNP 1000 */ "EM_ST200" => Ok(100), /* STMicroelectronics ST200 */ "EM_IP2K" => Ok(101), /* Ubicom IP2xxx */ "EM_MAX" => Ok(102), /* MAX processor */ "EM_CR" => Ok(103), /* National Semi. CompactRISC */ "EM_F2MC16" => Ok(104), /* Fujitsu F2MC16 */ "EM_MSP430" => Ok(105), /* Texas Instruments msp430 */ "EM_BLACKFIN" => Ok(106), /* Analog Devices Blackfin DSP */ "EM_SE_C33" => Ok(107), /* Seiko Epson S1C33 family */ "EM_SEP" => Ok(108), /* Sharp embedded microprocessor */ "EM_ARCA" => Ok(109), /* Arca RISC */ "EM_UNICORE" => Ok(110), /* PKU-Unity & MPRC Peking Uni. mc series */ "EM_EXCESS" => Ok(111), /* eXcess configurable cpu */ "EM_DXP" => Ok(112), /* Icera Semi. Deep Execution Processor */ "EM_ALTERA_NIOS2" => Ok(113), /* Altera Nios II */ "EM_CRX" => Ok(114), /* National Semi. CompactRISC CRX */ "EM_XGATE" => Ok(115), /* Motorola XGATE */ "EM_C166" => Ok(116), /* Infineon C16x/XC16x */ "EM_M16C" => Ok(117), /* Renesas M16C */ "EM_DSPIC30F" => Ok(118), /* Microchip Technology dsPIC30F */ "EM_CE" => Ok(119), /* Freescale Communication Engine RISC */ "EM_M32C" => Ok(120), /* Renesas M32C */ /* reserved 121-130 */ "EM_TSK3000" => Ok(131), /* Altium TSK3000 */ "EM_RS08" => Ok(132), /* Freescale RS08 */ "EM_SHARC" => Ok(133), /* Analog Devices SHARC family */ "EM_ECOG2" => Ok(134), /* Cyan Technology eCOG2 */ "EM_SCORE7" => Ok(135), /* Sunplus S+core7 RISC */ "EM_DSP24" => Ok(136), /* New Japan Radio (NJR) 24-bit DSP */ "EM_VIDEOCORE3" => Ok(137), /* Broadcom VideoCore III */ "EM_LATTICEMICO32" => Ok(138), /* RISC for Lattice FPGA */ "EM_SE_C17" => Ok(139), /* Seiko Epson C17 */ "EM_TI_C6000" => Ok(140), /* Texas Instruments TMS320C6000 DSP */ "EM_TI_C2000" => Ok(141), /* Texas Instruments TMS320C2000 DSP */ "EM_TI_C5500" => Ok(142), /* Texas Instruments TMS320C55x DSP */ "EM_TI_ARP32" => Ok(143), /* Texas Instruments App. Specific RISC */ "EM_TI_PRU" => Ok(144), /* Texas Instruments Prog. Realtime Unit */ /* reserved 145-159 */ "EM_MMDSP_PLUS" => Ok(160), /* STMicroelectronics 64bit VLIW DSP */ "EM_CYPRESS_M8C" => Ok(161), /* Cypress M8C */ "EM_R32C" => Ok(162), /* Renesas R32C */ "EM_TRIMEDIA" => Ok(163), /* NXP Semi. TriMedia */ "EM_QDSP6" => Ok(164), /* QUALCOMM DSP6 */ "EM_8051" => Ok(165), /* Intel 8051 and variants */ "EM_STXP7X" => Ok(166), /* STMicroelectronics STxP7x */ "EM_NDS32" => Ok(167), /* Andes Tech. compact code emb. RISC */ "EM_ECOG1X" => Ok(168), /* Cyan Technology eCOG1X */ "EM_MAXQ30" => Ok(169), /* Dallas Semi. MAXQ30 mc */ "EM_XIMO16" => Ok(170), /* New Japan Radio (NJR) 16-bit DSP */ "EM_MANIK" => Ok(171), /* M2000 Reconfigurable RISC */ "EM_CRAYNV2" => Ok(172), /* Cray NV2 vector architecture */ "EM_RX" => Ok(173), /* Renesas RX */ "EM_METAG" => Ok(174), /* Imagination Tech. META */ "EM_MCST_ELBRUS" => Ok(175), /* MCST Elbrus */ "EM_ECOG16" => Ok(176), /* Cyan Technology eCOG16 */ "EM_CR16" => Ok(177), /* National Semi. CompactRISC CR16 */ "EM_ETPU" => Ok(178), /* Freescale Extended Time Processing Unit */ "EM_SLE9X" => Ok(179), /* Infineon Tech. SLE9X */ "EM_L10M" => Ok(180), /* Intel L10M */ "EM_K10M" => Ok(181), /* Intel K10M */ /* reserved 182 */ "EM_AARCH64" => Ok(183), /* ARM AARCH64 */ /* reserved 184 */ "EM_AVR32" => Ok(185), /* Amtel 32-bit microprocessor */ "EM_STM8" => Ok(186), /* STMicroelectronics STM8 */ "EM_TILE64" => Ok(187), /* Tileta TILE64 */ "EM_TILEPRO" => Ok(188), /* Tilera TILEPro */ "EM_MICROBLAZE" => Ok(189), /* Xilinx MicroBlaze */ "EM_CUDA" => Ok(190), /* NVIDIA CUDA */ "EM_TILEGX" => Ok(191), /* Tilera TILE-Gx */ "EM_CLOUDSHIELD" => Ok(192), /* CloudShield */ "EM_COREA_1ST" => Ok(193), /* KIPO-KAIST Core-A 1st gen. */ "EM_COREA_2ND" => Ok(194), /* KIPO-KAIST Core-A 2nd gen. */ "EM_ARC_COMPACT2" => Ok(195), /* Synopsys ARCompact V2 */ "EM_OPEN8" => Ok(196), /* Open8 RISC */ "EM_RL78" => Ok(197), /* Renesas RL78 */ "EM_VIDEOCORE5" => Ok(198), /* Broadcom VideoCore V */ "EM_78KOR" => Ok(199), /* Renesas 78KOR */ "EM_56800EX" => Ok(200), /* Freescale 56800EX DSC */ "EM_BA1" => Ok(201), /* Beyond BA1 */ "EM_BA2" => Ok(202), /* Beyond BA2 */ "EM_XCORE" => Ok(203), /* XMOS xCORE */ "EM_MCHP_PIC" => Ok(204), /* Microchip 8-bit PIC(r) */ /* reserved 205-209 */ "EM_KM32" => Ok(210), /* KM211 KM32 */ "EM_KMX32" => Ok(211), /* KM211 KMX32 */ "EM_EMX16" => Ok(212), /* KM211 KMX16 */ "EM_EMX8" => Ok(213), /* KM211 KMX8 */ "EM_KVARC" => Ok(214), /* KM211 KVARC */ "EM_CDP" => Ok(215), /* Paneve CDP */ "EM_COGE" => Ok(216), /* Cognitive Smart Memory Processor */ "EM_COOL" => Ok(217), /* Bluechip CoolEngine */ "EM_NORC" => Ok(218), /* Nanoradio Optimized RISC */ "EM_CSR_KALIMBA" => Ok(219), /* CSR Kalimba */ "EM_Z80" => Ok(220), /* Zilog Z80 */ "EM_VISIUM" => Ok(221), /* Controls and Data Services VISIUMcore */ "EM_FT32" => Ok(222), /* FTDI Chip FT32 */ "EM_MOXIE" => Ok(223), /* Moxie processor */ "EM_AMDGPU" => Ok(224), /* AMD GPU */ /* reserved 225-242 */ "EM_RISCV" => Ok(243), /* RISC-V */ "EM_BPF" => Ok(247), /* Linux BPF -- in-kernel virtual machine */ "EM_CSKY" => Ok(252), /* C-SKY */ "EM_NUM" => Ok(253), /* Old spellings/synonyms. */ "EM_ARC_A5" => Ok(93), /* If it is necessary to assign new unofficial EM_* values, please pick large random numbers (0x8523, 0xa7f2, etc.) to minimize the chances of collision with official or non-GNU unofficial values. */ //EM_ALPHA = 0x9026, _ => return Err(std::io::Error::new(std::io::ErrorKind::Other, "Elf mach not supported")) } } //TODO check that all machine options are represented /*comments from elf.h*/ pub fn match_mach(mach: u16) -> Result<EXEC::EI_MACH, std::io::Error> { match mach { 0=> Ok(EXEC::EI_MACH::EM_NONE ), 1=> Ok(EXEC::EI_MACH::EM_M32 ), // AT&T WE 32100 2=> Ok(EXEC::EI_MACH::EM_SPARC ), //SPARC 3=> Ok(EXEC::EI_MACH::EM_386 ), //Intel 80386 4=> Ok(EXEC::EI_MACH::EM_68K ), //Motorola 68000 5=> Ok(EXEC::EI_MACH::EM_88K ), // Motorola 88000 // RESERVED 6 // Reserved for future use 7=> Ok(EXEC::EI_MACH::EM_860 ), //Intel 80860 8=> Ok(EXEC::EI_MACH::EM_MIPS ), // MIPS I Architecture 9=> Ok(EXEC::EI_MACH::EM_S370 ), // IBM System/370 Processor 10=> Ok(EXEC::EI_MACH::EM_MIPS_RS3_LE ), // MIPS RS3000 Little-endian //RESERVED 11-14 // Reserved for future use 15=> Ok(EXEC::EI_MACH::EM_PARISC ), //Hewlett-Packard PA-RISC //RESERVED 16 // Reserved for future use 17=> Ok(EXEC::EI_MACH::EM_VPP500 ), //Fujitsu VPP500 18=> Ok(EXEC::EI_MACH::EM_SPARC32PLUS ), //Enhanced instruction set SPARC 19=> Ok(EXEC::EI_MACH::EM_960 ), // Intel 80960 20=> Ok(EXEC::EI_MACH::EM_PPC ), // PowerPC 21=> Ok(EXEC::EI_MACH::EM_PPC64 ), // 64-bit PowerPC //RESERVED 22-35 // Reserved for future use 36=> Ok(EXEC::EI_MACH::EM_V800 ), // NEC V800 37=> Ok(EXEC::EI_MACH::EM_FR20 ), // Fujitsu FR20 38=> Ok(EXEC::EI_MACH::EM_RH32 ), //TRW RH-32 39=> Ok(EXEC::EI_MACH::EM_RCE ), //Motorola RCE 40=> Ok(EXEC::EI_MACH::EM_ARM ), //Advanced RISC Machines ARM 41=> Ok(EXEC::EI_MACH::EM_ALPHA ), // Digital Alpha 42=> Ok(EXEC::EI_MACH::EM_SH ), //Hitachi SH 43=> Ok(EXEC::EI_MACH::EM_SPARCV9 ), // SPARC Version 9 44=> Ok(EXEC::EI_MACH::EM_TRICORE ), // Siemens Tricore embedded processor 45=> Ok(EXEC::EI_MACH::EM_ARC), //Argonaut RISC Core, Argonaut Technologies Inc. 46=> Ok(EXEC::EI_MACH::EM_H8_300 ), // Hitachi H8/300 47=> Ok(EXEC::EI_MACH::EM_H8_300H ), // Hitachi H8/300H 48=> Ok(EXEC::EI_MACH::EM_H8S ), //Hitachi H8S 49=> Ok(EXEC::EI_MACH::EM_H8_500 ), //Hitachi H8/500 50=> Ok(EXEC::EI_MACH::EM_IA_64 ), //Intel IA-64 processor architecture 51=> Ok(EXEC::EI_MACH::EM_MIPS_X ), //Stanford MIPS-X 52=> Ok(EXEC::EI_MACH::EM_COLDFIRE ), // Motorola ColdFire 53=> Ok(EXEC::EI_MACH::EM_68HC12 ), //Motorola M68HC12 54=> Ok(EXEC::EI_MACH::EM_MMA), //Fujitsu MMA Multimedia Accelerator 55=> Ok(EXEC::EI_MACH::EM_PCP ), //Siemens PCP 56=> Ok(EXEC::EI_MACH::EM_NCPU ), //Sony nCPU embedded RISC processor 57=> Ok(EXEC::EI_MACH::EM_NDR1 ), //Denso NDR1 microprocessor 58=> Ok(EXEC::EI_MACH::EM_STARCORE ), // Motorola Star*Core processor 59=> Ok(EXEC::EI_MACH::EM_ME16 ), //Toyota ME16 processor 60=> Ok(EXEC::EI_MACH::EM_ST100 ), //STMicroelectronics ST100 processor 61=> Ok(EXEC::EI_MACH::EM_TINYJ ), //Advanced Logic Corp. TinyJ embedded processor family //Reserved 62-65 //Reserved for future use 66=> Ok(EXEC::EI_MACH::EM_FX66 ), //Siemens FX66 microcontroller 67=> Ok(EXEC::EI_MACH::EM_ST9PLUS ), // STMicroelectronics ST9+ 8/16 bit microcontroller 68=> Ok(EXEC::EI_MACH::EM_ST7 ), //STMicroelectronics ST7 8-bit microcontroller 69=> Ok(EXEC::EI_MACH::EM_68HC16 ), // Motorola MC68HC16 Microcontroller 70=> Ok(EXEC::EI_MACH::EM_68HC11 ), // Motorola MC68HC11 Microcontroller 71=> Ok(EXEC::EI_MACH::EM_68HC08 ), // Motorola MC68HC08 Microcontroller 72=> Ok(EXEC::EI_MACH::EM_68HC05 ), // Motorola MC68HC05 Microcontroller 73=> Ok(EXEC::EI_MACH::EM_SVX ), //Silicon Graphics SVx 74=> Ok(EXEC::EI_MACH::EM_ST19 ), // STMicroelectronics ST19 8-bit microcontroller 75=> Ok(EXEC::EI_MACH::EM_VAX ), // Digital VAX 76=> Ok(EXEC::EI_MACH::EM_CRIS ), // Axis Communications 32-bit embedded processor 77=> Ok(EXEC::EI_MACH::EM_JAVELIN ), //Infineon Technologies 32-bit embedded processor 78=> Ok(EXEC::EI_MACH::EM_FIREPATH ), // Element 14 64-bit DSP Processor 79=> Ok(EXEC::EI_MACH::EM_ZSP ), //LSI Logic 16-bit DSP Processor 80=> Ok(EXEC::EI_MACH::EM_MMIX ), // Donald Knuth's educational 64-bit processor 81=> Ok(EXEC::EI_MACH::EM_HUANY ), // Harvard University machine-independent object files 82=> Ok(EXEC::EI_MACH::EM_PRISM ), // SiTera Prism 83=> Ok(EXEC::EI_MACH::EM_AVR ), /* Atmel AVR 8-bit microcontroller */ 84=> Ok(EXEC::EI_MACH::EM_FR30 ), /* Fujitsu FR30 */ 85=> Ok(EXEC::EI_MACH::EM_D10V ), /* Mitsubishi D10V */ 86=> Ok(EXEC::EI_MACH::EM_D30V ), /* Mitsubishi D30V */ 87=> Ok(EXEC::EI_MACH::EM_V850 ), /* NEC v850 */ 88=> Ok(EXEC::EI_MACH::EM_M32R), /* Mitsubishi M32R */ 89=> Ok(EXEC::EI_MACH::EM_MN10300 ), /* Matsushita MN10300 */ 90=> Ok(EXEC::EI_MACH::EM_MN10200 ), /* Matsushita MN10200 */ 91=> Ok(EXEC::EI_MACH::EM_PJ ), /* picoJava */ 92=> Ok(EXEC::EI_MACH::EM_OPENRISC ), /* OpenRISC 32-bit embedded processor */ 93=> Ok(EXEC::EI_MACH::EM_ARC_COMPACT ), /* ARC International ARCompact */ 94=> Ok(EXEC::EI_MACH::EM_XTENSA ), /* Tensilica Xtensa Architecture */ 95=> Ok(EXEC::EI_MACH::EM_VIDEOCORE ), /* Alphamosaic VideoCore */ 96=> Ok(EXEC::EI_MACH::EM_TMM_GPP ), /* Thompson Multimedia General Purpose Proc */ 97=> Ok(EXEC::EI_MACH::EM_NS32K), /* National Semi. 32000 */ 98=> Ok(EXEC::EI_MACH::EM_TPC ), /* Tenor Network TPC */ 99=> Ok(EXEC::EI_MACH::EM_SNP1K ), /* Trebia SNP 1000 */ 100=> Ok(EXEC::EI_MACH::EM_ST200 ), /* STMicroelectronics ST200 */ 101=> Ok(EXEC::EI_MACH::EM_IP2K ), /* Ubicom IP2xxx */ 102=> Ok(EXEC::EI_MACH::EM_MAX ), /* MAX processor */ 103=> Ok(EXEC::EI_MACH::EM_CR ), /* National Semi. CompactRISC */ 104=> Ok(EXEC::EI_MACH::EM_F2MC16 ), /* Fujitsu F2MC16 */ 105=> Ok(EXEC::EI_MACH::EM_MSP430 ), /* Texas Instruments msp430 */ 106=> Ok(EXEC::EI_MACH::EM_BLACKFIN ), /* Analog Devices Blackfin DSP */ 107=> Ok(EXEC::EI_MACH::EM_SE_C33 ), /* Seiko Epson S1C33 family */ 108=> Ok(EXEC::EI_MACH::EM_SEP ), /* Sharp embedded microprocessor */ 109=> Ok(EXEC::EI_MACH::EM_ARCA ), /* Arca RISC */ 110=> Ok(EXEC::EI_MACH::EM_UNICORE ), /* PKU-Unity & MPRC Peking Uni. mc series */ 111=> Ok(EXEC::EI_MACH::EM_EXCESS ), /* eXcess configurable cpu */ 112=> Ok(EXEC::EI_MACH::EM_DXP ), /* Icera Semi. Deep Execution Processor */ 113 => Ok(EXEC::EI_MACH::EM_ALTERA_NIOS2 ),/* Altera Nios II */ 114=> Ok(EXEC::EI_MACH::EM_CRX ), /* National Semi. CompactRISC CRX */ 115=> Ok(EXEC::EI_MACH::EM_XGATE ), /* Motorola XGATE */ 116=> Ok(EXEC::EI_MACH::EM_C166 ), /* Infineon C16x/XC16x */ 117=> Ok(EXEC::EI_MACH::EM_M16C ), /* Renesas M16C */ 118=> Ok(EXEC::EI_MACH::EM_DSPIC30F ), /* Microchip Technology dsPIC30F */ 119=> Ok(EXEC::EI_MACH::EM_CE ), /* Freescale Communication Engine RISC */ 120=> Ok(EXEC::EI_MACH::EM_M32C ), /* Renesas M32C */ /* reserved 121-130 */ 131=> Ok(EXEC::EI_MACH::EM_TSK3000 ), /* Altium TSK3000 */ 132=> Ok(EXEC::EI_MACH::EM_RS08 ), /* Freescale RS08 */ 133=> Ok(EXEC::EI_MACH::EM_SHARC ), /* Analog Devices SHARC family */ 134=> Ok(EXEC::EI_MACH::EM_ECOG2 ), /* Cyan Technology eCOG2 */ 135=> Ok(EXEC::EI_MACH::EM_SCORE7 ), /* Sunplus S+core7 RISC */ 136=> Ok(EXEC::EI_MACH::EM_DSP24 ), /* New Japan Radio (NJR) 24-bit DSP */ 137=> Ok(EXEC::EI_MACH::EM_VIDEOCORE3 ), /* Broadcom VideoCore III */ 138=> Ok(EXEC:: EI_MACH::EM_LATTICEMICO32 ), /* RISC for Lattice FPGA */ 139=> Ok(EXEC::EI_MACH::EM_SE_C17), /* Seiko Epson C17 */ 140=> Ok(EXEC::EI_MACH::EM_TI_C6000 ), /* Texas Instruments TMS320C6000 DSP */ 141=> Ok(EXEC::EI_MACH::EM_TI_C2000 ), /* Texas Instruments TMS320C2000 DSP */ 142=> Ok(EXEC::EI_MACH::EM_TI_C5500 ), /* Texas Instruments TMS320C55x DSP */ 143=> Ok(EXEC::EI_MACH::EM_TI_ARP32 ), /* Texas Instruments App. Specific RISC */ 144=> Ok(EXEC::EI_MACH::EM_TI_PRU ), /* Texas Instruments Prog. Realtime Unit */ /* reserved 145-159 */ 160=> Ok(EXEC::EI_MACH::EM_MMDSP_PLUS ), /* STMicroelectronics 64bit VLIW DSP */ 161=> Ok(EXEC::EI_MACH::EM_CYPRESS_M8C ), /* Cypress M8C */ 162=> Ok(EXEC::EI_MACH::EM_R32C ), /* Renesas R32C */ 163=> Ok(EXEC::EI_MACH::EM_TRIMEDIA ), /* NXP Semi. TriMedia */ 164=> Ok(EXEC::EI_MACH::EM_QDSP6 ), /* QUALCOMM DSP6 */ 165=> Ok(EXEC::EI_MACH::EM_8051 ), /* Intel 8051 and variants */ 166=> Ok(EXEC::EI_MACH::EM_STXP7X ), /* STMicroelectronics STxP7x */ 167=> Ok(EXEC::EI_MACH::EM_NDS32 ), /* Andes Tech. compact code emb. RISC */ 168=> Ok(EXEC::EI_MACH::EM_ECOG1X ), /* Cyan Technology eCOG1X */ 169=> Ok(EXEC::EI_MACH::EM_MAXQ30 ), /* Dallas Semi. MAXQ30 mc */ 170=> Ok(EXEC::EI_MACH::EM_XIMO16 ), /* New Japan Radio (NJR) 16-bit DSP */ 171=> Ok(EXEC::EI_MACH::EM_MANIK ), /* M2000 Reconfigurable RISC */ 172=> Ok(EXEC::EI_MACH::EM_CRAYNV2 ), /* Cray NV2 vector architecture */ 173=> Ok(EXEC::EI_MACH::EM_RX ), /* Renesas RX */ 174=> Ok(EXEC::EI_MACH::EM_METAG ), /* Imagination Tech. META */ 175=> Ok(EXEC::EI_MACH::EM_MCST_ELBRUS ), /* MCST Elbrus */ 176=> Ok(EXEC::EI_MACH::EM_ECOG16 ), /* Cyan Technology eCOG16 */ 177=> Ok(EXEC::EI_MACH::EM_CR16 ), /* National Semi. CompactRISC CR16 */ 178=> Ok(EXEC::EI_MACH::EM_ETPU ), /* Freescale Extended Time Processing Unit */ 179=> Ok(EXEC::EI_MACH::EM_SLE9X ), /* Infineon Tech. SLE9X */ 180=> Ok(EXEC::EI_MACH::EM_L10M ), /* Intel L10M */ 181=> Ok(EXEC::EI_MACH::EM_K10M ), /* Intel K10M */ /* reserved 182 */ 183=> Ok(EXEC::EI_MACH::EM_AARCH64 ), /* ARM AARCH64 */ /* reserved 184 */ 185=> Ok(EXEC::EI_MACH::EM_AVR32 ), /* Amtel 32-bit microprocessor */ 186=> Ok(EXEC::EI_MACH::EM_STM8 ), /* STMicroelectronics STM8 */ 187=> Ok(EXEC::EI_MACH::EM_TILE64), /* Tileta TILE64 */ 188=> Ok(EXEC::EI_MACH::EM_TILEPRO ), /* Tilera TILEPro */ 189=> Ok(EXEC::EI_MACH::EM_MICROBLAZE ), /* Xilinx MicroBlaze */ 190=> Ok(EXEC::EI_MACH::EM_CUDA ), /* NVIDIA CUDA */ 191=> Ok(EXEC::EI_MACH::EM_TILEGX ), /* Tilera TILE-Gx */ 192=> Ok(EXEC::EI_MACH::EM_CLOUDSHIELD ), /* CloudShield */ 193=> Ok(EXEC::EI_MACH::EM_COREA_1ST ), /* KIPO-KAIST Core-A 1st gen. */ 194=> Ok(EXEC::EI_MACH::EM_COREA_2ND ), /* KIPO-KAIST Core-A 2nd gen. */ 195=> Ok(EXEC::EI_MACH::EM_ARC_COMPACT2 ), /* Synopsys ARCompact V2 */ 196=> Ok(EXEC::EI_MACH::EM_OPEN8 ), /* Open8 RISC */ 197=> Ok(EXEC::EI_MACH::EM_RL78 ), /* Renesas RL78 */ 198=> Ok(EXEC::EI_MACH::EM_VIDEOCORE5 ), /* Broadcom VideoCore V */ 199=> Ok(EXEC::EI_MACH::EM_78KOR ), /* Renesas 78KOR */ 200=> Ok(EXEC::EI_MACH::EM_56800EX ), /* Freescale 56800EX DSC */ 201=> Ok(EXEC::EI_MACH::EM_BA1), /* Beyond BA1 */ 202=> Ok(EXEC::EI_MACH::EM_BA2), /* Beyond BA2 */ 203=> Ok(EXEC::EI_MACH::EM_XCORE), /* XMOS xCORE */ 204=> Ok(EXEC::EI_MACH::EM_MCHP_PIC), /* Microchip 8-bit PIC(r) */ /* reserved 205-209 */ 210=> Ok(EXEC::EI_MACH::EM_KM32), /* KM211 KM32 */ 211=> Ok(EXEC::EI_MACH::EM_KMX32), /* KM211 KMX32 */ 212=> Ok(EXEC::EI_MACH::EM_EMX16), /* KM211 KMX16 */ 213=> Ok(EXEC::EI_MACH::EM_EMX8), /* KM211 KMX8 */ 214=> Ok(EXEC::EI_MACH::EM_KVARC), /* KM211 KVARC */ 215=> Ok(EXEC::EI_MACH::EM_CDP), /* Paneve CDP */ 216=> Ok(EXEC::EI_MACH::EM_COGE), /* Cognitive Smart Memory Processor */ 217=> Ok(EXEC::EI_MACH::EM_COOL), /* Bluechip CoolEngine */ 218=> Ok(EXEC::EI_MACH::EM_NORC), /* Nanoradio Optimized RISC */ 219=> Ok(EXEC::EI_MACH::EM_CSR_KALIMBA), /* CSR Kalimba */ 220=> Ok(EXEC::EI_MACH::EM_Z80), /* Zilog Z80 */ 221=> Ok(EXEC::EI_MACH::EM_VISIUM), /* Controls and Data Services VISIUMcore */ 222=> Ok(EXEC::EI_MACH::EM_FT32), /* FTDI Chip FT32 */ 223=> Ok(EXEC::EI_MACH::EM_MOXIE), /* Moxie processor */ 224=> Ok(EXEC::EI_MACH::EM_AMDGPU), /* AMD GPU */ /* reserved 225-242 */ 243=> Ok(EXEC::EI_MACH::EM_RISCV), /* RISC-V */ /*244-246*/ 247=> Ok(EXEC::EI_MACH::EM_BPF), /* Linux BPF -- in-kernel virtual machine */ /*248-251*/ 252=> Ok(EXEC::EI_MACH::EM_CSKY), /* C-SKY */ 253 => Ok(EXEC::EI_MACH::EM_NUM), /* Old spellings/synonyms. */ // EM_ARC_A5 = EM_ARC_COMPACT, /* If it is necessary to assign new unofficial EM_* values, please pick large random numbers (0x8523, 0xa7f2, etc.) to minimize the chances of collision with official or non-GNU unofficial values. */ // EM_ALPHA => 0x9026, _ => return Err(std::io::Error::new(std::io::ErrorKind::Other, "Elf mach not supported")) } } #[allow(non_camel_case_types, non_snake_case)] pub(super) mod EXEC { pub const _EI_MAG0: usize = 0; pub const _EI_MAG1: usize = 1; pub const _EI_MAG2: usize = 2; pub const _EI_MAG3: usize = 3; pub const _EI_CLASS: usize = 4; pub const _EI_DATA: usize = 5; pub const _EI_VERSION: usize = 6; pub const _EI_OSABI: usize = 7; pub const _EI_ABIVERSION: usize = 8; pub const _EI_PAD: usize = 9; pub const _EI_NIDENT: usize = 16; pub const EI_IDENT:usize = 16; #[repr(u16)] #[allow(non_camel_case_types)] pub enum EI_TYPE { ET_NONE = 0, ET_REL = 1, ET_EXEC = 2, ET_DYN = 3, ET_CORE = 4, ET_LOOS = 0xfe00, ET_HIOS = 0xfeff, ET_LOPROC = 0xff00, ET_HIPROC = 0xffff, } impl EI_TYPE { pub fn as_u16(&self) -> u16 { match self { EI_TYPE::ET_NONE => 0, EI_TYPE::ET_REL => 1, EI_TYPE::ET_EXEC => 2, EI_TYPE::ET_DYN => 3, EI_TYPE::ET_CORE => 4, EI_TYPE::ET_LOOS => 0xfe00, EI_TYPE::ET_HIOS => 0xfeff, EI_TYPE::ET_LOPROC => 0xff00, EI_TYPE::ET_HIPROC => 0xffff, } } } #[repr(u16)] #[allow(non_camel_case_types)] pub enum EI_MACH { EM_NONE = 0, EM_M32 = 1, // AT&T WE 32100 EM_SPARC = 2, //SPARC EM_386 = 3, //Intel 80386 EM_68K = 4, //Motorola 68000 EM_88K = 5, // Motorola 88000 // RESERVED = 6, // Reserved for future use EM_860 = 7, //Intel 80860 EM_MIPS = 8, // MIPS I Architecture EM_S370 = 9, // IBM System/370 Processor EM_MIPS_RS3_LE = 10, // MIPS RS3000 Little-endian //RESERVED 11-14 // Reserved for future use EM_PARISC = 15, //Hewlett-Packard PA-RISC //RESERVED 16 // Reserved for future use EM_VPP500 = 17, //Fujitsu VPP500 EM_SPARC32PLUS = 18, //Enhanced instruction set SPARC EM_960 = 19, // Intel 80960 EM_PPC = 20, // PowerPC EM_PPC64 = 21, // 64-bit PowerPC //RESERVED 22-35 // Reserved for future use EM_V800 = 36, // NEC V800 EM_FR20 = 37, // Fujitsu FR20 EM_RH32 = 38, //TRW RH-32 EM_RCE = 39, //Motorola RCE EM_ARM = 40, //Advanced RISC Machines ARM EM_ALPHA = 41, // Digital Alpha EM_SH = 42, //Hitachi SH EM_SPARCV9 = 43, // SPARC Version 9 EM_TRICORE = 44, // Siemens Tricore embedded processor EM_ARC = 45, //Argonaut RISC Core, Argonaut Technologies Inc. EM_H8_300 = 46, // Hitachi H8/300 EM_H8_300H = 47, // Hitachi H8/300H EM_H8S = 48, //Hitachi H8S EM_H8_500 = 49, //Hitachi H8/500 EM_IA_64 = 50, //Intel IA-64 processor architecture EM_MIPS_X = 51, //Stanford MIPS-X EM_COLDFIRE = 52, // Motorola ColdFire EM_68HC12 = 53, //Motorola M68HC12 EM_MMA = 54, //Fujitsu MMA Multimedia Accelerator EM_PCP = 55, //Siemens PCP EM_NCPU = 56, //Sony nCPU embedded RISC processor EM_NDR1 = 57, //Denso NDR1 microprocessor EM_STARCORE = 58, // Motorola Star*Core processor EM_ME16 = 59, //Toyota ME16 processor EM_ST100 = 60, //STMicroelectronics ST100 processor EM_TINYJ = 61, //Advanced Logic Corp. TinyJ embedded processor family //Reserved 62-65 //Reserved for future use EM_FX66 = 66, //Siemens FX66 microcontroller EM_ST9PLUS = 67, // STMicroelectronics ST9+ 8/16 bit microcontroller EM_ST7 = 68, //STMicroelectronics ST7 8-bit microcontroller EM_68HC16 = 69, // Motorola MC68HC16 Microcontroller EM_68HC11 = 70, // Motorola MC68HC11 Microcontroller EM_68HC08 = 71, // Motorola MC68HC08 Microcontroller EM_68HC05 = 72, // Motorola MC68HC05 Microcontroller EM_SVX = 73, //Silicon Graphics SVx EM_ST19 = 74, // STMicroelectronics ST19 8-bit microcontroller EM_VAX = 75, // Digital VAX EM_CRIS = 76, // Axis Communications 32-bit embedded processor EM_JAVELIN = 77, //Infineon Technologies 32-bit embedded processor EM_FIREPATH = 78, // Element 14 64-bit DSP Processor EM_ZSP = 79, //LSI Logic 16-bit DSP Processor EM_MMIX = 80, // Donald Knuth's educational 64-bit processor EM_HUANY = 81, // Harvard University machine-independent object files EM_PRISM = 82, // SiTera Prism EM_AVR = 83, /* Atmel AVR 8-bit microcontroller */ EM_FR30 = 84, /* Fujitsu FR30 */ EM_D10V = 85, /* Mitsubishi D10V */ EM_D30V = 86, /* Mitsubishi D30V */ EM_V850 = 87, /* NEC v850 */ EM_M32R = 88, /* Mitsubishi M32R */ EM_MN10300 = 89, /* Matsushita MN10300 */ EM_MN10200 = 90, /* Matsushita MN10200 */ EM_PJ = 91, /* picoJava */ EM_OPENRISC = 92, /* OpenRISC 32-bit embedded processor */ EM_ARC_COMPACT = 93, /* ARC International ARCompact */ EM_XTENSA = 94, /* Tensilica Xtensa Architecture */ EM_VIDEOCORE = 95, /* Alphamosaic VideoCore */ EM_TMM_GPP = 96, /* Thompson Multimedia General Purpose Proc */ EM_NS32K = 97, /* National Semi. 32000 */ EM_TPC = 98, /* Tenor Network TPC */ EM_SNP1K = 99, /* Trebia SNP 1000 */ EM_ST200 = 100, /* STMicroelectronics ST200 */ EM_IP2K = 101, /* Ubicom IP2xxx */ EM_MAX = 102, /* MAX processor */ EM_CR = 103, /* National Semi. CompactRISC */ EM_F2MC16 = 104, /* Fujitsu F2MC16 */ EM_MSP430 = 105, /* Texas Instruments msp430 */ EM_BLACKFIN = 106, /* Analog Devices Blackfin DSP */ EM_SE_C33 = 107, /* Seiko Epson S1C33 family */ EM_SEP = 108, /* Sharp embedded microprocessor */ EM_ARCA = 109, /* Arca RISC */ EM_UNICORE = 110, /* PKU-Unity & MPRC Peking Uni. mc series */ EM_EXCESS = 111, /* eXcess configurable cpu */ EM_DXP = 112, /* Icera Semi. Deep Execution Processor */ EM_ALTERA_NIOS2 = 113, /* Altera Nios II */ EM_CRX = 114, /* National Semi. CompactRISC CRX */ EM_XGATE = 115, /* Motorola XGATE */ EM_C166 = 116, /* Infineon C16x/XC16x */ EM_M16C = 117, /* Renesas M16C */ EM_DSPIC30F = 118, /* Microchip Technology dsPIC30F */ EM_CE = 119, /* Freescale Communication Engine RISC */ EM_M32C = 120, /* Renesas M32C */ /* reserved 121-130 */ EM_TSK3000 = 131, /* Altium TSK3000 */ EM_RS08 = 132, /* Freescale RS08 */ EM_SHARC = 133, /* Analog Devices SHARC family */ EM_ECOG2 = 134, /* Cyan Technology eCOG2 */ EM_SCORE7 = 135, /* Sunplus S+core7 RISC */ EM_DSP24 = 136, /* New Japan Radio (NJR) 24-bit DSP */ EM_VIDEOCORE3 = 137, /* Broadcom VideoCore III */ EM_LATTICEMICO32 = 138, /* RISC for Lattice FPGA */ EM_SE_C17 = 139, /* Seiko Epson C17 */ EM_TI_C6000 = 140, /* Texas Instruments TMS320C6000 DSP */ EM_TI_C2000 = 141, /* Texas Instruments TMS320C2000 DSP */ EM_TI_C5500 = 142, /* Texas Instruments TMS320C55x DSP */ EM_TI_ARP32 = 143, /* Texas Instruments App. Specific RISC */ EM_TI_PRU = 144, /* Texas Instruments Prog. Realtime Unit */ /* reserved 145-159 */ EM_MMDSP_PLUS = 160, /* STMicroelectronics 64bit VLIW DSP */ EM_CYPRESS_M8C = 161, /* Cypress M8C */ EM_R32C = 162, /* Renesas R32C */ EM_TRIMEDIA = 163, /* NXP Semi. TriMedia */ EM_QDSP6 = 164, /* QUALCOMM DSP6 */ EM_8051 = 165, /* Intel 8051 and variants */ EM_STXP7X = 166, /* STMicroelectronics STxP7x */ EM_NDS32 = 167, /* Andes Tech. compact code emb. RISC */ EM_ECOG1X = 168, /* Cyan Technology eCOG1X */ EM_MAXQ30 = 169, /* Dallas Semi. MAXQ30 mc */ EM_XIMO16 = 170, /* New Japan Radio (NJR) 16-bit DSP */ EM_MANIK = 171, /* M2000 Reconfigurable RISC */ EM_CRAYNV2 = 172, /* Cray NV2 vector architecture */ EM_RX = 173, /* Renesas RX */ EM_METAG = 174, /* Imagination Tech. META */ EM_MCST_ELBRUS = 175, /* MCST Elbrus */ EM_ECOG16 = 176, /* Cyan Technology eCOG16 */ EM_CR16 = 177, /* National Semi. CompactRISC CR16 */ EM_ETPU = 178, /* Freescale Extended Time Processing Unit */ EM_SLE9X = 179, /* Infineon Tech. SLE9X */ EM_L10M = 180, /* Intel L10M */ EM_K10M = 181, /* Intel K10M */ /* reserved 182 */ EM_AARCH64 = 183, /* ARM AARCH64 */ /* reserved 184 */ EM_AVR32 = 185, /* Amtel 32-bit microprocessor */ EM_STM8 = 186, /* STMicroelectronics STM8 */ EM_TILE64 = 187, /* Tileta TILE64 */ EM_TILEPRO = 188, /* Tilera TILEPro */ EM_MICROBLAZE = 189, /* Xilinx MicroBlaze */ EM_CUDA = 190, /* NVIDIA CUDA */ EM_TILEGX = 191, /* Tilera TILE-Gx */ EM_CLOUDSHIELD = 192, /* CloudShield */ EM_COREA_1ST = 193, /* KIPO-KAIST Core-A 1st gen. */ EM_COREA_2ND = 194, /* KIPO-KAIST Core-A 2nd gen. */ EM_ARC_COMPACT2 = 195, /* Synopsys ARCompact V2 */ EM_OPEN8 = 196, /* Open8 RISC */ EM_RL78 = 197, /* Renesas RL78 */ EM_VIDEOCORE5 = 198, /* Broadcom VideoCore V */ EM_78KOR = 199, /* Renesas 78KOR */ EM_56800EX = 200, /* Freescale 56800EX DSC */ EM_BA1 = 201, /* Beyond BA1 */ EM_BA2 = 202, /* Beyond BA2 */ EM_XCORE = 203, /* XMOS xCORE */ EM_MCHP_PIC = 204, /* Microchip 8-bit PIC(r) */ /* reserved 205-209 */ EM_KM32 = 210, /* KM211 KM32 */ EM_KMX32 = 211, /* KM211 KMX32 */ EM_EMX16 = 212, /* KM211 KMX16 */ EM_EMX8 = 213, /* KM211 KMX8 */ EM_KVARC = 214, /* KM211 KVARC */ EM_CDP = 215, /* Paneve CDP */ EM_COGE = 216, /* Cognitive Smart Memory Processor */ EM_COOL = 217, /* Bluechip CoolEngine */ EM_NORC = 218, /* Nanoradio Optimized RISC */ EM_CSR_KALIMBA = 219, /* CSR Kalimba */ EM_Z80 = 220, /* Zilog Z80 */ EM_VISIUM = 221, /* Controls and Data Services VISIUMcore */ EM_FT32 = 222, /* FTDI Chip FT32 */ EM_MOXIE = 223, /* Moxie processor */ EM_AMDGPU = 224, /* AMD GPU */ /* reserved 225-242 */ EM_RISCV = 243, /* RISC-V */ EM_BPF = 247, /* Linux BPF -- in-kernel virtual machine */ EM_CSKY = 252, /* C-SKY */ EM_NUM = 253, /* Old spellings/synonyms. */ // EM_ARC_A5 = EM_ARC_COMPACT, /* If it is necessary to assign new unofficial EM_* values, please pick large random numbers (0x8523, 0xa7f2, etc.) to minimize the chances of collision with official or non-GNU unofficial values. */ //EM_ALPHA = 0x9026, } impl EI_MACH { pub fn as_u16(&self) -> u16 { match self { EI_MACH::EM_NONE => 0, EI_MACH::EM_M32 => 1, // AT&T WE 32100 EI_MACH::EM_SPARC => 2, //SPARC EI_MACH::EM_386 => 3, //Intel 80386 EI_MACH::EM_68K => 4, //Motorola 68000 EI_MACH::EM_88K => 5, // Motorola 88000 // RESERVED 6 // Reserved for future use EI_MACH::EM_860 => 7, //Intel 80860 EI_MACH::EM_MIPS => 8, // MIPS I Architecture EI_MACH::EM_S370 => 9, // IBM System/370 Processor EI_MACH::EM_MIPS_RS3_LE => 10, // MIPS RS3000 Little-endian //RESERVED 11-14 // Reserved for future use EI_MACH::EM_PARISC => 15, //Hewlett-Packard PA-RISC //RESERVED 16 // Reserved for future use EI_MACH::EM_VPP500 => 17, //Fujitsu VPP500 EI_MACH::EM_SPARC32PLUS => 18, //Enhanced instruction set SPARC EI_MACH::EM_960 => 19, // Intel 80960 EI_MACH::EM_PPC => 20, // PowerPC EI_MACH::EM_PPC64 => 21, // 64-bit PowerPC //RESERVED 22-35 // Reserved for future use EI_MACH::EM_V800 => 36, // NEC V800 EI_MACH::EM_FR20 => 37, // Fujitsu FR20 EI_MACH::EM_RH32 => 38, //TRW RH-32 EI_MACH::EM_RCE => 39, //Motorola RCE EI_MACH::EM_ARM => 40, //Advanced RISC Machines ARM EI_MACH::EM_ALPHA => 41, // Digital Alpha EI_MACH::EM_SH => 42, //Hitachi SH EI_MACH::EM_SPARCV9 => 43, // SPARC Version 9 EI_MACH::EM_TRICORE => 44, // Siemens Tricore embedded processor EI_MACH::EM_ARC => 45, //Argonaut RISC Core, Argonaut Technologies Inc. EI_MACH::EM_H8_300 => 46, // Hitachi H8/300 EI_MACH::EM_H8_300H => 47, // Hitachi H8/300H EI_MACH::EM_H8S => 48, //Hitachi H8S EI_MACH::EM_H8_500 => 49, //Hitachi H8/500 EI_MACH::EM_IA_64 => 50, //Intel IA-64 processor architecture EI_MACH::EM_MIPS_X => 51, //Stanford MIPS-X EI_MACH::EM_COLDFIRE => 52, // Motorola ColdFire EI_MACH::EM_68HC12 => 53, //Motorola M68HC12 EI_MACH::EM_MMA => 54, //Fujitsu MMA Multimedia Accelerator EI_MACH::EM_PCP => 55, //Siemens PCP EI_MACH:: EM_NCPU => 56, //Sony nCPU embedded RISC processor EI_MACH::EM_NDR1 => 57, //Denso NDR1 microprocessor EI_MACH::EM_STARCORE => 58, // Motorola Star*Core processor EI_MACH::EM_ME16 => 59, //Toyota ME16 processor EI_MACH::EM_ST100 => 60, //STMicroelectronics ST100 processor EI_MACH::EM_TINYJ => 61, //Advanced Logic Corp. TinyJ embedded processor family //Reserved 62-65 //Reserved for future use EI_MACH::EM_FX66 => 66, //Siemens FX66 microcontroller EI_MACH::EM_ST9PLUS => 67, // STMicroelectronics ST9+ 8/16 bit microcontroller EI_MACH::EM_ST7 => 68, //STMicroelectronics ST7 8-bit microcontroller EI_MACH::EM_68HC16 => 69, // Motorola MC68HC16 Microcontroller EI_MACH::EM_68HC11 => 70, // Motorola MC68HC11 Microcontroller EI_MACH::EM_68HC08 => 71, // Motorola MC68HC08 Microcontroller EI_MACH::EM_68HC05 => 72, // Motorola MC68HC05 Microcontroller EI_MACH::EM_SVX => 73, //Silicon Graphics SVx EI_MACH::EM_ST19 => 74, // STMicroelectronics ST19 8-bit microcontroller EI_MACH::EM_VAX => 75, // Digital VAX EI_MACH::EM_CRIS => 76, // Axis Communications 32-bit embedded processor EI_MACH::EM_JAVELIN => 77, //Infineon Technologies 32-bit embedded processor EI_MACH::EM_FIREPATH => 78, // Element 14 64-bit DSP Processor EI_MACH::EM_ZSP => 79, //LSI Logic 16-bit DSP Processor EI_MACH::EM_MMIX => 80, // Donald Knuth's educational 64-bit processor EI_MACH::EM_HUANY => 81, // Harvard University machine-independent object files EI_MACH::EM_PRISM => 82, // SiTera Prism EI_MACH::EM_AVR => 83, /* Atmel AVR 8-bit microcontroller */ EI_MACH::EM_FR30 => 84, /* Fujitsu FR30 */ EI_MACH::EM_D10V => 85, /* Mitsubishi D10V */ EI_MACH::EM_D30V => 86, /* Mitsubishi D30V */ EI_MACH::EM_V850 => 87, /* NEC v850 */ EI_MACH::EM_M32R => 88, /* Mitsubishi M32R */ EI_MACH::EM_MN10300 => 89, /* Matsushita MN10300 */ EI_MACH::EM_MN10200 => 90, /* Matsushita MN10200 */ EI_MACH::EM_PJ => 91, /* picoJava */ EI_MACH::EM_OPENRISC => 92, /* OpenRISC 32-bit embedded processor */ EI_MACH::EM_ARC_COMPACT => 93, /* ARC International ARCompact */ EI_MACH::EM_XTENSA => 94, /* Tensilica Xtensa Architecture */ EI_MACH::EM_VIDEOCORE => 95, /* Alphamosaic VideoCore */ EI_MACH::EM_TMM_GPP => 96, /* Thompson Multimedia General Purpose Proc */ EI_MACH::EM_NS32K => 97, /* National Semi. 32000 */ EI_MACH::EM_TPC => 98, /* Tenor Network TPC */ EI_MACH::EM_SNP1K => 99, /* Trebia SNP 1000 */ EI_MACH::EM_ST200 => 100, /* STMicroelectronics ST200 */ EI_MACH::EM_IP2K => 101, /* Ubicom IP2xxx */ EI_MACH::EM_MAX => 102, /* MAX processor */ EI_MACH::EM_CR => 103, /* National Semi. CompactRISC */ EI_MACH::EM_F2MC16 => 104, /* Fujitsu F2MC16 */ EI_MACH::EM_MSP430 => 105, /* Texas Instruments msp430 */ EI_MACH::EM_BLACKFIN => 106, /* Analog Devices Blackfin DSP */ EI_MACH::EM_SE_C33 => 107, /* Seiko Epson S1C33 family */ EI_MACH::EM_SEP => 108, /* Sharp embedded microprocessor */ EI_MACH::EM_ARCA => 109, /* Arca RISC */ EI_MACH::EM_UNICORE => 110, /* PKU-Unity & MPRC Peking Uni. mc series */ EI_MACH::EM_EXCESS => 111, /* eXcess configurable cpu */ EI_MACH::EM_DXP => 112, /* Icera Semi. Deep Execution Processor */ EI_MACH::EM_ALTERA_NIOS2 => 113, /* Altera Nios II */ EI_MACH::EM_CRX => 114, /* National Semi. CompactRISC CRX */ EI_MACH::EM_XGATE => 115, /* Motorola XGATE */ EI_MACH::EM_C166 => 116, /* Infineon C16x/XC16x */ EI_MACH::EM_M16C => 117, /* Renesas M16C */ EI_MACH::EM_DSPIC30F => 118, /* Microchip Technology dsPIC30F */ EI_MACH::EM_CE => 119, /* Freescale Communication Engine RISC */ EI_MACH::EM_M32C => 120, /* Renesas M32C */ /* reserved 121-130 */ EI_MACH::EM_TSK3000 => 131, /* Altium TSK3000 */ EI_MACH::EM_RS08 => 132, /* Freescale RS08 */ EI_MACH::EM_SHARC => 133, /* Analog Devices SHARC family */ EI_MACH::EM_ECOG2 => 134, /* Cyan Technology eCOG2 */ EI_MACH::EM_SCORE7 => 135, /* Sunplus S+core7 RISC */ EI_MACH::EM_DSP24 => 136, /* New Japan Radio (NJR) 24-bit DSP */ EI_MACH::EM_VIDEOCORE3 => 137, /* Broadcom VideoCore III */ EI_MACH::EM_LATTICEMICO32 => 138, /* RISC for Lattice FPGA */ EI_MACH::EM_SE_C17 => 139, /* Seiko Epson C17 */ EI_MACH::EM_TI_C6000 => 140, /* Texas Instruments TMS320C6000 DSP */ EI_MACH::EM_TI_C2000 => 141, /* Texas Instruments TMS320C2000 DSP */ EI_MACH::EM_TI_C5500 => 142, /* Texas Instruments TMS320C55x DSP */ EI_MACH::EM_TI_ARP32 => 143, /* Texas Instruments App. Specific RISC */ EI_MACH::EM_TI_PRU => 144, /* Texas Instruments Prog. Realtime Unit */ /* reserved 145-159 */ EI_MACH::EM_MMDSP_PLUS => 160, /* STMicroelectronics 64bit VLIW DSP */ EI_MACH::EM_CYPRESS_M8C => 161, /* Cypress M8C */ EI_MACH::EM_R32C => 162, /* Renesas R32C */ EI_MACH::EM_TRIMEDIA => 163, /* NXP Semi. TriMedia */ EI_MACH::EM_QDSP6 => 164, /* QUALCOMM DSP6 */ EI_MACH::EM_8051 => 165, /* Intel 8051 and variants */ EI_MACH::EM_STXP7X => 166, /* STMicroelectronics STxP7x */ EI_MACH::EM_NDS32 => 167, /* Andes Tech. compact code emb. RISC */ EI_MACH::EM_ECOG1X => 168, /* Cyan Technology eCOG1X */ EI_MACH::EM_MAXQ30 => 169, /* Dallas Semi. MAXQ30 mc */ EI_MACH::EM_XIMO16 => 170, /* New Japan Radio (NJR) 16-bit DSP */ EI_MACH::EM_MANIK => 171, /* M2000 Reconfigurable RISC */ EI_MACH::EM_CRAYNV2 => 172, /* Cray NV2 vector architecture */ EI_MACH::EM_RX => 173, /* Renesas RX */ EI_MACH::EM_METAG => 174, /* Imagination Tech. META */ EI_MACH::EM_MCST_ELBRUS => 175, /* MCST Elbrus */ EI_MACH::EM_ECOG16 => 176, /* Cyan Technology eCOG16 */ EI_MACH::EM_CR16 => 177, /* National Semi. CompactRISC CR16 */ EI_MACH::EM_ETPU => 178, /* Freescale Extended Time Processing Unit */ EI_MACH::EM_SLE9X => 179, /* Infineon Tech. SLE9X */ EI_MACH::EM_L10M => 180, /* Intel L10M */ EI_MACH::EM_K10M => 181, /* Intel K10M */ /* reserved 182 */ EI_MACH::EM_AARCH64 => 183, /* ARM AARCH64 */ /* reserved 184 */ EI_MACH::EM_AVR32 => 185, /* Amtel 32-bit microprocessor */ EI_MACH::EM_STM8 => 186, /* STMicroelectronics STM8 */ EI_MACH::EM_TILE64 => 187, /* Tileta TILE64 */ EI_MACH::EM_TILEPRO => 188, /* Tilera TILEPro */ EI_MACH::EM_MICROBLAZE => 189, /* Xilinx MicroBlaze */ EI_MACH::EM_CUDA => 190, /* NVIDIA CUDA */ EI_MACH::EM_TILEGX => 191, /* Tilera TILE-Gx */ EI_MACH::EM_CLOUDSHIELD => 192, /* CloudShield */ EI_MACH::EM_COREA_1ST => 193, /* KIPO-KAIST Core-A 1st gen. */ EI_MACH::EM_COREA_2ND => 194, /* KIPO-KAIST Core-A 2nd gen. */ EI_MACH::EM_ARC_COMPACT2 => 195, /* Synopsys ARCompact V2 */ EI_MACH::EM_OPEN8 => 196, /* Open8 RISC */ EI_MACH::EM_RL78 => 197, /* Renesas RL78 */ EI_MACH::EM_VIDEOCORE5 => 198, /* Broadcom VideoCore V */ EI_MACH::EM_78KOR => 199, /* Renesas 78KOR */ EI_MACH::EM_56800EX => 200, /* Freescale 56800EX DSC */ EI_MACH::EM_BA1 => 201, /* Beyond BA1 */ EI_MACH::EM_BA2 => 202, /* Beyond BA2 */ EI_MACH::EM_XCORE => 203, /* XMOS xCORE */ EI_MACH::EM_MCHP_PIC => 204, /* Microchip 8-bit PIC(r) */ /* reserved 205-209 */ EI_MACH::EM_KM32 => 210, /* KM211 KM32 */ EI_MACH::EM_KMX32 => 211, /* KM211 KMX32 */ EI_MACH::EM_EMX16 => 212, /* KM211 KMX16 */ EI_MACH::EM_EMX8 => 213, /* KM211 KMX8 */ EI_MACH::EM_KVARC => 214, /* KM211 KVARC */ EI_MACH::EM_CDP => 215, /* Paneve CDP */ EI_MACH::EM_COGE => 216, /* Cognitive Smart Memory Processor */ EI_MACH::EM_COOL => 217, /* Bluechip CoolEngine */ EI_MACH::EM_NORC => 218, /* Nanoradio Optimized RISC */ EI_MACH::EM_CSR_KALIMBA => 219, /* CSR Kalimba */ EI_MACH::EM_Z80 => 220, /* Zilog Z80 */ EI_MACH::EM_VISIUM => 221, /* Controls and Data Services VISIUMcore */ EI_MACH::EM_FT32 => 222, /* FTDI Chip FT32 */ EI_MACH::EM_MOXIE => 223, /* Moxie processor */ EI_MACH::EM_AMDGPU => 224, /* AMD GPU */ /* reserved 225-242 */ EI_MACH::EM_RISCV => 243, /* RISC-V */ EI_MACH::EM_BPF => 247, /* Linux BPF -- in-kernel virtual machine */ EI_MACH::EM_CSKY => 252, /* C-SKY */ EI_MACH::EM_NUM => 253, /* Old spellings/synonyms. */ // EM_ARC_A5 = EM_ARC_COMPACT, /* If it is necessary to assign new unofficial EM_* values, please pick large random numbers (0x8523, 0xa7f2, etc.) to minimize the chances of collision with official or non-GNU unofficial values. */ // EM_ALPHA => 0x9026, } } } #[derive(Clone, Copy, Debug)] pub enum EI_CLASS { ELFCLASSNONE, ELFCLASS32, ELFCLASS64, ELFCLASSOTHER(u8), } impl EI_CLASS { pub fn as_u8(&self) -> u8 { match self { EI_CLASS::ELFCLASSNONE => 0, EI_CLASS::ELFCLASS32 => 1, EI_CLASS::ELFCLASS64 => 2, EI_CLASS::ELFCLASSOTHER(d) => *d, } } } #[repr(u8)] #[derive(Clone, Copy, Debug)] pub enum EI_DATA { ELFDATANONE = 0, ELFDATA2LSB, ELFDATA2MSB, ELFDATAOTHER(u8), //TODO are other values than 3 valid here? Need to check ELF Specs for arm and x86 } impl EI_DATA { pub fn as_u8(&self) -> u8 { match self { EI_DATA::ELFDATANONE => 0, EI_DATA::ELFDATA2LSB => 1, EI_DATA::ELFDATA2MSB => 2, EI_DATA::ELFDATAOTHER(d) => *d, } } } //TODO figure out different system so that aliasing is allowed? #[repr(u8)] #[allow(non_camel_case_types)] pub enum EI_OSABI { ELFOSABI_NONE = 0, /* UNIX System V ABI */ //ELFOSABI_SYSV = /* Alias ELFOSABI_NONE, */ ELFOSABI_HPUX = 1, /* HP-UX */ ELFOSABI_NETBSD = 2, /* NetBSD. */ ELFOSABI_GNU = 3, /* Object uses GNU ELF extensions. */ //ELFOSABI_LINUX = , /* Compatibility alias ELFOSABI_GNU */ ELFOSABI_SOLARIS = 6, /* Sun Solaris. */ ELFOSABI_AIX = 7, /* IBM AIX. */ ELFOSABI_IRIX = 8, /* SGI Irix. */ ELFOSABI_FREEBSD = 9, /* FreeBSD. */ ELFOSABI_TRU64 = 10, /* Compaq TRU64 UNIX. */ ELFOSABI_MODESTO = 11, /* Novell Modesto. */ ELFOSABI_OPENBSD = 12, /* OpenBSD. */ ELFOSABI_ARM_AEABI = 64, /* ARM EABI */ ELFOSABI_ARM = 97, /* ARM */ ELFOSABI_OTHER(u8), ELFOSABI_STANDALONE = 255, /* Standalone (embedded) application */ } #[allow(non_camel_case_types)] impl EI_OSABI { pub fn as_u8(&self) -> u8 { match self { EI_OSABI::ELFOSABI_NONE => 0, /* UNIX System V ABI */ //EI_OSABI::ELFOSABI_SYSV => 0, //EI_OSABI::ELFOSABI_NONE, /* Alias. */ EI_OSABI::ELFOSABI_HPUX => 1, /* HP-UX */ EI_OSABI::ELFOSABI_NETBSD => 2, /* NetBSD. */ EI_OSABI::ELFOSABI_GNU => 3, /* Object uses GNU ELF extensions. */ // EI_OSABI::ELFOSABI_LINUX => 3, //EI_OSABI::ELFOSABI_GNU, /* Compatibility alias. */ EI_OSABI::ELFOSABI_SOLARIS => 6, /* Sun Solaris. */ EI_OSABI::ELFOSABI_AIX => 7, /* IBM AIX. */ EI_OSABI::ELFOSABI_IRIX => 8, /* SGI Irix. */ EI_OSABI::ELFOSABI_FREEBSD => 9, /* FreeBSD. */ EI_OSABI::ELFOSABI_TRU64 => 10, /* Compaq TRU64 UNIX. */ EI_OSABI::ELFOSABI_MODESTO => 11, /* Novell Modesto. */ EI_OSABI::ELFOSABI_OPENBSD => 12, /* OpenBSD. */ EI_OSABI::ELFOSABI_ARM_AEABI => 64, /* ARM EABI */ EI_OSABI::ELFOSABI_ARM => 97, /* ARM */ EI_OSABI::ELFOSABI_OTHER(d) => *d, EI_OSABI::ELFOSABI_STANDALONE => 255, /* Standalone (embedded) application */ } } } pub enum EI_VERS{ EV_NONE = 0, EV_CURRENT = 1, } }
49.003672
109
0.511352
1c91889377165e3cc64570dcef59d81b08a898fb
5,352
//! Device drivers. use alloc::{sync::Arc, vec::Vec}; use core::convert::From; use spin::{RwLock, RwLockReadGuard}; use zcore_drivers::scheme::{ BlockScheme, DisplayScheme, InputScheme, IrqScheme, NetScheme, Scheme, UartScheme, }; use zcore_drivers::{Device, DeviceError}; /// Re-exported modules from crate [`zcore_drivers`]. pub use zcore_drivers::{prelude, scheme}; /// A wrapper of a device array with the same [`Scheme`]. pub struct DeviceList<T: Scheme + ?Sized>(RwLock<Vec<Arc<T>>>); impl<T: Scheme + ?Sized> DeviceList<T> { fn add(&self, dev: Arc<T>) { self.0.write().push(dev); } /// Convert self into a vector. pub fn as_vec(&self) -> RwLockReadGuard<'_, Vec<Arc<T>>> { self.0.read() } /// Returns the device at given position, or `None` if out of bounds. pub fn try_get(&self, idx: usize) -> Option<Arc<T>> { self.0.read().get(idx).cloned() } /// Returns the device with the given name, or `None` if not found. pub fn find(&self, name: &str) -> Option<Arc<T>> { self.0.read().iter().find(|d| d.name() == name).cloned() } /// Returns the first device of this device array, or `None` if it is empty. pub fn first(&self) -> Option<Arc<T>> { self.try_get(0) } /// Returns the first device of this device array. /// /// # Panic /// /// Panics if the array is empty. pub fn first_unwrap(&self) -> Arc<T> { self.first() .unwrap_or_else(|| panic!("device not initialized: {}", core::any::type_name::<T>())) } } impl<T: Scheme + ?Sized> Default for DeviceList<T> { fn default() -> Self { Self(RwLock::new(Vec::new())) } } #[derive(Default)] struct AllDeviceList { block: DeviceList<dyn BlockScheme>, display: DeviceList<dyn DisplayScheme>, input: DeviceList<dyn InputScheme>, irq: DeviceList<dyn IrqScheme>, net: DeviceList<dyn NetScheme>, uart: DeviceList<dyn UartScheme>, } impl AllDeviceList { pub fn add_device(&self, dev: Device) { match dev { Device::Block(d) => self.block.add(d), Device::Display(d) => self.display.add(d), Device::Input(d) => self.input.add(d), Device::Irq(d) => self.irq.add(d), Device::Net(d) => self.net.add(d), Device::Uart(d) => self.uart.add(d), } } } lazy_static! { static ref DEVICES: AllDeviceList = AllDeviceList::default(); } pub(crate) fn add_device(dev: Device) { DEVICES.add_device(dev) } /// Returns all devices which implement the [`BlockScheme`]. pub fn all_block() -> &'static DeviceList<dyn BlockScheme> { &DEVICES.block } /// Returns all devices which implement the [`DisplayScheme`]. pub fn all_display() -> &'static DeviceList<dyn DisplayScheme> { &DEVICES.display } /// Returns all devices which implement the [`InputScheme`]. pub fn all_input() -> &'static DeviceList<dyn InputScheme> { &DEVICES.input } /// Returns all devices which implement the [`IrqScheme`]. pub fn all_irq() -> &'static DeviceList<dyn IrqScheme> { &DEVICES.irq } /// Returns all devices which implement the [`NetScheme`]. pub fn all_net() -> &'static DeviceList<dyn NetScheme> { &DEVICES.net } /// Returns all devices which implement the [`UartScheme`]. pub fn all_uart() -> &'static DeviceList<dyn UartScheme> { &DEVICES.uart } impl From<DeviceError> for crate::HalError { fn from(err: DeviceError) -> Self { warn!("{:?}", err); Self } } #[cfg(not(feature = "libos"))] mod virtio_drivers_ffi { use crate::{PhysAddr, VirtAddr, KCONFIG, KHANDLER, PAGE_SIZE}; #[no_mangle] extern "C" fn virtio_dma_alloc(pages: usize) -> PhysAddr { let paddr = KHANDLER.frame_alloc_contiguous(pages, 0).unwrap(); trace!("alloc DMA: paddr={:#x}, pages={}", paddr, pages); paddr } #[no_mangle] extern "C" fn virtio_dma_dealloc(paddr: PhysAddr, pages: usize) -> i32 { for i in 0..pages { KHANDLER.frame_dealloc(paddr + i * PAGE_SIZE); } trace!("dealloc DMA: paddr={:#x}, pages={}", paddr, pages); 0 } #[no_mangle] extern "C" fn virtio_phys_to_virt(paddr: PhysAddr) -> VirtAddr { paddr + KCONFIG.phys_to_virt_offset } #[no_mangle] extern "C" fn virtio_virt_to_phys(vaddr: VirtAddr) -> PhysAddr { vaddr - KCONFIG.phys_to_virt_offset } } #[cfg(not(feature = "libos"))] mod drivers_ffi { use crate::{PhysAddr, VirtAddr, KCONFIG, KHANDLER, PAGE_SIZE}; #[no_mangle] extern "C" fn drivers_dma_alloc(pages: usize) -> PhysAddr { let paddr = KHANDLER.frame_alloc_contiguous(pages, 0).unwrap(); trace!("alloc DMA: paddr={:#x}, pages={}", paddr, pages); paddr } #[no_mangle] extern "C" fn drivers_dma_dealloc(paddr: PhysAddr, pages: usize) -> i32 { for i in 0..pages { KHANDLER.frame_dealloc(paddr + i * PAGE_SIZE); } trace!("dealloc DMA: paddr={:#x}, pages={}", paddr, pages); 0 } #[no_mangle] extern "C" fn drivers_phys_to_virt(paddr: PhysAddr) -> VirtAddr { paddr + KCONFIG.phys_to_virt_offset } #[no_mangle] extern "C" fn drivers_virt_to_phys(vaddr: VirtAddr) -> PhysAddr { vaddr - KCONFIG.phys_to_virt_offset } }
28.168421
97
0.615097
11822e61f563860947afec0c44453558a728db98
21,172
#[doc = "Register `PLL_SIC` reader"] pub struct R(crate::R<PLL_SIC_SPEC>); impl core::ops::Deref for R { type Target = crate::R<PLL_SIC_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<PLL_SIC_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<PLL_SIC_SPEC>) -> Self { R(reader) } } #[doc = "Register `PLL_SIC` writer"] pub struct W(crate::W<PLL_SIC_SPEC>); impl core::ops::Deref for W { type Target = crate::W<PLL_SIC_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<PLL_SIC_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<PLL_SIC_SPEC>) -> Self { W(writer) } } #[doc = "Field `PLL_EN_USB_CLKS` reader - Enables the USB clock from PLL to USB PHY"] pub struct PLL_EN_USB_CLKS_R(crate::FieldReader<bool, bool>); impl PLL_EN_USB_CLKS_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { PLL_EN_USB_CLKS_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PLL_EN_USB_CLKS_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PLL_EN_USB_CLKS` writer - Enables the USB clock from PLL to USB PHY"] pub struct PLL_EN_USB_CLKS_W<'a> { w: &'a mut W, } impl<'a> PLL_EN_USB_CLKS_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 6)) | ((value as u32 & 0x01) << 6); self.w } } #[doc = "Field `PLL_POWER` reader - Power up the USB PLL"] pub struct PLL_POWER_R(crate::FieldReader<bool, bool>); impl PLL_POWER_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { PLL_POWER_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PLL_POWER_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PLL_POWER` writer - Power up the USB PLL"] pub struct PLL_POWER_W<'a> { w: &'a mut W, } impl<'a> PLL_POWER_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 12)) | ((value as u32 & 0x01) << 12); self.w } } #[doc = "Field `PLL_ENABLE` reader - Enables the clock output from the USB PLL"] pub struct PLL_ENABLE_R(crate::FieldReader<bool, bool>); impl PLL_ENABLE_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { PLL_ENABLE_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PLL_ENABLE_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PLL_ENABLE` writer - Enables the clock output from the USB PLL"] pub struct PLL_ENABLE_W<'a> { w: &'a mut W, } impl<'a> PLL_ENABLE_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 13)) | ((value as u32 & 0x01) << 13); self.w } } #[doc = "Reference bias power down select.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum REFBIAS_PWD_SEL_A { #[doc = "0: Selects PLL_POWER to control the reference bias"] VALUE0 = 0, #[doc = "1: Selects REFBIAS_PWD to control the reference bias"] VALUE1 = 1, } impl From<REFBIAS_PWD_SEL_A> for bool { #[inline(always)] fn from(variant: REFBIAS_PWD_SEL_A) -> Self { variant as u8 != 0 } } #[doc = "Field `REFBIAS_PWD_SEL` reader - Reference bias power down select."] pub struct REFBIAS_PWD_SEL_R(crate::FieldReader<bool, REFBIAS_PWD_SEL_A>); impl REFBIAS_PWD_SEL_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { REFBIAS_PWD_SEL_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> REFBIAS_PWD_SEL_A { match self.bits { false => REFBIAS_PWD_SEL_A::VALUE0, true => REFBIAS_PWD_SEL_A::VALUE1, } } #[doc = "Checks if the value of the field is `VALUE0`"] #[inline(always)] pub fn is_value0(&self) -> bool { **self == REFBIAS_PWD_SEL_A::VALUE0 } #[doc = "Checks if the value of the field is `VALUE1`"] #[inline(always)] pub fn is_value1(&self) -> bool { **self == REFBIAS_PWD_SEL_A::VALUE1 } } impl core::ops::Deref for REFBIAS_PWD_SEL_R { type Target = crate::FieldReader<bool, REFBIAS_PWD_SEL_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `REFBIAS_PWD_SEL` writer - Reference bias power down select."] pub struct REFBIAS_PWD_SEL_W<'a> { w: &'a mut W, } impl<'a> REFBIAS_PWD_SEL_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: REFBIAS_PWD_SEL_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Selects PLL_POWER to control the reference bias"] #[inline(always)] pub fn value0(self) -> &'a mut W { self.variant(REFBIAS_PWD_SEL_A::VALUE0) } #[doc = "Selects REFBIAS_PWD to control the reference bias"] #[inline(always)] pub fn value1(self) -> &'a mut W { self.variant(REFBIAS_PWD_SEL_A::VALUE1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 19)) | ((value as u32 & 0x01) << 19); self.w } } #[doc = "Field `REFBIAS_PWD` reader - Power down the reference bias This bit is only used when REFBIAS_PWD_SEL is set to 1."] pub struct REFBIAS_PWD_R(crate::FieldReader<bool, bool>); impl REFBIAS_PWD_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { REFBIAS_PWD_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for REFBIAS_PWD_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `REFBIAS_PWD` writer - Power down the reference bias This bit is only used when REFBIAS_PWD_SEL is set to 1."] pub struct REFBIAS_PWD_W<'a> { w: &'a mut W, } impl<'a> REFBIAS_PWD_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 20)) | ((value as u32 & 0x01) << 20); self.w } } #[doc = "Field `PLL_REG_ENABLE` reader - This field controls the USB PLL regulator, set to enable the regulator"] pub struct PLL_REG_ENABLE_R(crate::FieldReader<bool, bool>); impl PLL_REG_ENABLE_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { PLL_REG_ENABLE_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PLL_REG_ENABLE_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PLL_REG_ENABLE` writer - This field controls the USB PLL regulator, set to enable the regulator"] pub struct PLL_REG_ENABLE_W<'a> { w: &'a mut W, } impl<'a> PLL_REG_ENABLE_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 21)) | ((value as u32 & 0x01) << 21); self.w } } #[doc = "This field controls the USB PLL feedback loop divider\n\nValue on reset: 3"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum PLL_DIV_SEL_A { #[doc = "0: Divide by 13"] VALUE0 = 0, #[doc = "1: Divide by 15"] VALUE1 = 1, #[doc = "2: Divide by 16"] VALUE2 = 2, #[doc = "3: Divide by 20"] VALUE3 = 3, #[doc = "4: Divide by 22"] VALUE4 = 4, #[doc = "5: Divide by 25"] VALUE5 = 5, #[doc = "6: Divide by 30"] VALUE6 = 6, #[doc = "7: Divide by 240"] VALUE7 = 7, } impl From<PLL_DIV_SEL_A> for u8 { #[inline(always)] fn from(variant: PLL_DIV_SEL_A) -> Self { variant as _ } } #[doc = "Field `PLL_DIV_SEL` reader - This field controls the USB PLL feedback loop divider"] pub struct PLL_DIV_SEL_R(crate::FieldReader<u8, PLL_DIV_SEL_A>); impl PLL_DIV_SEL_R { #[inline(always)] pub(crate) fn new(bits: u8) -> Self { PLL_DIV_SEL_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> PLL_DIV_SEL_A { match self.bits { 0 => PLL_DIV_SEL_A::VALUE0, 1 => PLL_DIV_SEL_A::VALUE1, 2 => PLL_DIV_SEL_A::VALUE2, 3 => PLL_DIV_SEL_A::VALUE3, 4 => PLL_DIV_SEL_A::VALUE4, 5 => PLL_DIV_SEL_A::VALUE5, 6 => PLL_DIV_SEL_A::VALUE6, 7 => PLL_DIV_SEL_A::VALUE7, _ => unreachable!(), } } #[doc = "Checks if the value of the field is `VALUE0`"] #[inline(always)] pub fn is_value0(&self) -> bool { **self == PLL_DIV_SEL_A::VALUE0 } #[doc = "Checks if the value of the field is `VALUE1`"] #[inline(always)] pub fn is_value1(&self) -> bool { **self == PLL_DIV_SEL_A::VALUE1 } #[doc = "Checks if the value of the field is `VALUE2`"] #[inline(always)] pub fn is_value2(&self) -> bool { **self == PLL_DIV_SEL_A::VALUE2 } #[doc = "Checks if the value of the field is `VALUE3`"] #[inline(always)] pub fn is_value3(&self) -> bool { **self == PLL_DIV_SEL_A::VALUE3 } #[doc = "Checks if the value of the field is `VALUE4`"] #[inline(always)] pub fn is_value4(&self) -> bool { **self == PLL_DIV_SEL_A::VALUE4 } #[doc = "Checks if the value of the field is `VALUE5`"] #[inline(always)] pub fn is_value5(&self) -> bool { **self == PLL_DIV_SEL_A::VALUE5 } #[doc = "Checks if the value of the field is `VALUE6`"] #[inline(always)] pub fn is_value6(&self) -> bool { **self == PLL_DIV_SEL_A::VALUE6 } #[doc = "Checks if the value of the field is `VALUE7`"] #[inline(always)] pub fn is_value7(&self) -> bool { **self == PLL_DIV_SEL_A::VALUE7 } } impl core::ops::Deref for PLL_DIV_SEL_R { type Target = crate::FieldReader<u8, PLL_DIV_SEL_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PLL_DIV_SEL` writer - This field controls the USB PLL feedback loop divider"] pub struct PLL_DIV_SEL_W<'a> { w: &'a mut W, } impl<'a> PLL_DIV_SEL_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: PLL_DIV_SEL_A) -> &'a mut W { self.bits(variant.into()) } #[doc = "Divide by 13"] #[inline(always)] pub fn value0(self) -> &'a mut W { self.variant(PLL_DIV_SEL_A::VALUE0) } #[doc = "Divide by 15"] #[inline(always)] pub fn value1(self) -> &'a mut W { self.variant(PLL_DIV_SEL_A::VALUE1) } #[doc = "Divide by 16"] #[inline(always)] pub fn value2(self) -> &'a mut W { self.variant(PLL_DIV_SEL_A::VALUE2) } #[doc = "Divide by 20"] #[inline(always)] pub fn value3(self) -> &'a mut W { self.variant(PLL_DIV_SEL_A::VALUE3) } #[doc = "Divide by 22"] #[inline(always)] pub fn value4(self) -> &'a mut W { self.variant(PLL_DIV_SEL_A::VALUE4) } #[doc = "Divide by 25"] #[inline(always)] pub fn value5(self) -> &'a mut W { self.variant(PLL_DIV_SEL_A::VALUE5) } #[doc = "Divide by 30"] #[inline(always)] pub fn value6(self) -> &'a mut W { self.variant(PLL_DIV_SEL_A::VALUE6) } #[doc = "Divide by 240"] #[inline(always)] pub fn value7(self) -> &'a mut W { self.variant(PLL_DIV_SEL_A::VALUE7) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x07 << 22)) | ((value as u32 & 0x07) << 22); self.w } } #[doc = "Field `PLL_PREDIV` reader - This is selection between /1 or /2 to expand the range of ref input clock."] pub struct PLL_PREDIV_R(crate::FieldReader<bool, bool>); impl PLL_PREDIV_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { PLL_PREDIV_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PLL_PREDIV_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PLL_PREDIV` writer - This is selection between /1 or /2 to expand the range of ref input clock."] pub struct PLL_PREDIV_W<'a> { w: &'a mut W, } impl<'a> PLL_PREDIV_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 30)) | ((value as u32 & 0x01) << 30); self.w } } #[doc = "USB PLL lock status indicator\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PLL_LOCK_A { #[doc = "0: PLL is not currently locked"] VALUE0 = 0, #[doc = "1: PLL is currently locked"] VALUE1 = 1, } impl From<PLL_LOCK_A> for bool { #[inline(always)] fn from(variant: PLL_LOCK_A) -> Self { variant as u8 != 0 } } #[doc = "Field `PLL_LOCK` reader - USB PLL lock status indicator"] pub struct PLL_LOCK_R(crate::FieldReader<bool, PLL_LOCK_A>); impl PLL_LOCK_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { PLL_LOCK_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> PLL_LOCK_A { match self.bits { false => PLL_LOCK_A::VALUE0, true => PLL_LOCK_A::VALUE1, } } #[doc = "Checks if the value of the field is `VALUE0`"] #[inline(always)] pub fn is_value0(&self) -> bool { **self == PLL_LOCK_A::VALUE0 } #[doc = "Checks if the value of the field is `VALUE1`"] #[inline(always)] pub fn is_value1(&self) -> bool { **self == PLL_LOCK_A::VALUE1 } } impl core::ops::Deref for PLL_LOCK_R { type Target = crate::FieldReader<bool, PLL_LOCK_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl R { #[doc = "Bit 6 - Enables the USB clock from PLL to USB PHY"] #[inline(always)] pub fn pll_en_usb_clks(&self) -> PLL_EN_USB_CLKS_R { PLL_EN_USB_CLKS_R::new(((self.bits >> 6) & 0x01) != 0) } #[doc = "Bit 12 - Power up the USB PLL"] #[inline(always)] pub fn pll_power(&self) -> PLL_POWER_R { PLL_POWER_R::new(((self.bits >> 12) & 0x01) != 0) } #[doc = "Bit 13 - Enables the clock output from the USB PLL"] #[inline(always)] pub fn pll_enable(&self) -> PLL_ENABLE_R { PLL_ENABLE_R::new(((self.bits >> 13) & 0x01) != 0) } #[doc = "Bit 19 - Reference bias power down select."] #[inline(always)] pub fn refbias_pwd_sel(&self) -> REFBIAS_PWD_SEL_R { REFBIAS_PWD_SEL_R::new(((self.bits >> 19) & 0x01) != 0) } #[doc = "Bit 20 - Power down the reference bias This bit is only used when REFBIAS_PWD_SEL is set to 1."] #[inline(always)] pub fn refbias_pwd(&self) -> REFBIAS_PWD_R { REFBIAS_PWD_R::new(((self.bits >> 20) & 0x01) != 0) } #[doc = "Bit 21 - This field controls the USB PLL regulator, set to enable the regulator"] #[inline(always)] pub fn pll_reg_enable(&self) -> PLL_REG_ENABLE_R { PLL_REG_ENABLE_R::new(((self.bits >> 21) & 0x01) != 0) } #[doc = "Bits 22:24 - This field controls the USB PLL feedback loop divider"] #[inline(always)] pub fn pll_div_sel(&self) -> PLL_DIV_SEL_R { PLL_DIV_SEL_R::new(((self.bits >> 22) & 0x07) as u8) } #[doc = "Bit 30 - This is selection between /1 or /2 to expand the range of ref input clock."] #[inline(always)] pub fn pll_prediv(&self) -> PLL_PREDIV_R { PLL_PREDIV_R::new(((self.bits >> 30) & 0x01) != 0) } #[doc = "Bit 31 - USB PLL lock status indicator"] #[inline(always)] pub fn pll_lock(&self) -> PLL_LOCK_R { PLL_LOCK_R::new(((self.bits >> 31) & 0x01) != 0) } } impl W { #[doc = "Bit 6 - Enables the USB clock from PLL to USB PHY"] #[inline(always)] pub fn pll_en_usb_clks(&mut self) -> PLL_EN_USB_CLKS_W { PLL_EN_USB_CLKS_W { w: self } } #[doc = "Bit 12 - Power up the USB PLL"] #[inline(always)] pub fn pll_power(&mut self) -> PLL_POWER_W { PLL_POWER_W { w: self } } #[doc = "Bit 13 - Enables the clock output from the USB PLL"] #[inline(always)] pub fn pll_enable(&mut self) -> PLL_ENABLE_W { PLL_ENABLE_W { w: self } } #[doc = "Bit 19 - Reference bias power down select."] #[inline(always)] pub fn refbias_pwd_sel(&mut self) -> REFBIAS_PWD_SEL_W { REFBIAS_PWD_SEL_W { w: self } } #[doc = "Bit 20 - Power down the reference bias This bit is only used when REFBIAS_PWD_SEL is set to 1."] #[inline(always)] pub fn refbias_pwd(&mut self) -> REFBIAS_PWD_W { REFBIAS_PWD_W { w: self } } #[doc = "Bit 21 - This field controls the USB PLL regulator, set to enable the regulator"] #[inline(always)] pub fn pll_reg_enable(&mut self) -> PLL_REG_ENABLE_W { PLL_REG_ENABLE_W { w: self } } #[doc = "Bits 22:24 - This field controls the USB PLL feedback loop divider"] #[inline(always)] pub fn pll_div_sel(&mut self) -> PLL_DIV_SEL_W { PLL_DIV_SEL_W { w: self } } #[doc = "Bit 30 - This is selection between /1 or /2 to expand the range of ref input clock."] #[inline(always)] pub fn pll_prediv(&mut self) -> PLL_PREDIV_W { PLL_PREDIV_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "USB PHY PLL Control/Status Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [pll_sic](index.html) module"] pub struct PLL_SIC_SPEC; impl crate::RegisterSpec for PLL_SIC_SPEC { type Ux = u32; } #[doc = "`read()` method returns [pll_sic::R](R) reader structure"] impl crate::Readable for PLL_SIC_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [pll_sic::W](W) writer structure"] impl crate::Writable for PLL_SIC_SPEC { type Writer = W; } #[doc = "`reset()` method sets PLL_SIC to value 0x00d1_2000"] impl crate::Resettable for PLL_SIC_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0x00d1_2000 } }
32.127466
423
0.590261
38f8bc565704ba90151064a01619d1d9e62a8c45
2,220
pub mod packages; use std::sync::Arc; pub use packages::{Name, StateTask, StateTaskMessage, PACKAGE_CREATORS}; use super::{deps::Dependencies, ext_traits::GetWorkerSimStartMsg, prelude::*}; pub use crate::config::Globals; use crate::{ config::ExperimentConfig, datastore::{ batch::change::ArrayChange, error::Result as DatastoreResult, schema::{accessor::FieldSpecMapAccessor, RootFieldSpec, RootFieldSpecCreator}, table::state::ExState, }, simulation::{ comms::package::PackageComms, package::ext_traits::GetWorkerExpStartMsg, Error, Result, }, SimRunConfig, }; #[async_trait] pub trait Package: GetWorkerSimStartMsg + Send + Sync { async fn run(&mut self, state: &mut ExState, context: &Context) -> Result<()>; } pub trait PackageCreator: GetWorkerExpStartMsg + Send + Sync { /// We can't derive a default as that returns Self which implies Sized which in turn means we /// can't create Trait Objects out of PackageCreator fn new(experiment_config: &Arc<ExperimentConfig>) -> Result<Box<dyn PackageCreator>> where Self: Sized; /// Create the package. fn create( &self, config: &Arc<SimRunConfig>, comms: PackageComms, accessor: FieldSpecMapAccessor, ) -> Result<Box<dyn Package>>; /// Get the package names that this package depends on. fn dependencies() -> Dependencies where Self: Sized, { Dependencies::empty() } fn get_state_field_specs( &self, _config: &ExperimentConfig, _globals: &Globals, _field_spec_map_builder: &RootFieldSpecCreator, ) -> Result<Vec<RootFieldSpec>> { Ok(vec![]) } } pub struct StateColumn { inner: Box<dyn IntoArrowChange + Send + Sync>, } impl StateColumn { pub fn get_arrow_change(&self, range: std::ops::Range<usize>) -> DatastoreResult<ArrayChange> { self.inner.get_arrow_change(range) } pub fn new(inner: Box<dyn IntoArrowChange + Send + Sync>) -> StateColumn { StateColumn { inner } } } pub trait IntoArrowChange { fn get_arrow_change(&self, range: std::ops::Range<usize>) -> DatastoreResult<ArrayChange>; }
28.461538
99
0.660811
9003e30357cbd8b24a22ceed894398a829866a26
28,475
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. pub use self::Integer::*; pub use self::Primitive::*; use spec::Target; use std::{cmp, fmt}; use std::ops::{Add, Deref, Sub, Mul, AddAssign, Range, RangeInclusive}; pub mod call; /// Parsed [Data layout](http://llvm.org/docs/LangRef.html#data-layout) /// for a target, which contains everything needed to compute layouts. pub struct TargetDataLayout { pub endian: Endian, pub i1_align: Align, pub i8_align: Align, pub i16_align: Align, pub i32_align: Align, pub i64_align: Align, pub i128_align: Align, pub f32_align: Align, pub f64_align: Align, pub pointer_size: Size, pub pointer_align: Align, pub aggregate_align: Align, /// Alignments for vector types. pub vector_align: Vec<(Size, Align)> } impl Default for TargetDataLayout { /// Creates an instance of `TargetDataLayout`. fn default() -> TargetDataLayout { TargetDataLayout { endian: Endian::Big, i1_align: Align::from_bits(8, 8).unwrap(), i8_align: Align::from_bits(8, 8).unwrap(), i16_align: Align::from_bits(16, 16).unwrap(), i32_align: Align::from_bits(32, 32).unwrap(), i64_align: Align::from_bits(32, 64).unwrap(), i128_align: Align::from_bits(32, 64).unwrap(), f32_align: Align::from_bits(32, 32).unwrap(), f64_align: Align::from_bits(64, 64).unwrap(), pointer_size: Size::from_bits(64), pointer_align: Align::from_bits(64, 64).unwrap(), aggregate_align: Align::from_bits(0, 64).unwrap(), vector_align: vec![ (Size::from_bits(64), Align::from_bits(64, 64).unwrap()), (Size::from_bits(128), Align::from_bits(128, 128).unwrap()) ] } } } impl TargetDataLayout { pub fn parse(target: &Target) -> Result<TargetDataLayout, String> { // Parse a bit count from a string. let parse_bits = |s: &str, kind: &str, cause: &str| { s.parse::<u64>().map_err(|err| { format!("invalid {} `{}` for `{}` in \"data-layout\": {}", kind, s, cause, err) }) }; // Parse a size string. let size = |s: &str, cause: &str| { parse_bits(s, "size", cause).map(Size::from_bits) }; // Parse an alignment string. let align = |s: &[&str], cause: &str| { if s.is_empty() { return Err(format!("missing alignment for `{}` in \"data-layout\"", cause)); } let abi = parse_bits(s[0], "alignment", cause)?; let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?; Align::from_bits(abi, pref).map_err(|err| { format!("invalid alignment for `{}` in \"data-layout\": {}", cause, err) }) }; let mut dl = TargetDataLayout::default(); let mut i128_align_src = 64; for spec in target.data_layout.split("-") { match &spec.split(":").collect::<Vec<_>>()[..] { &["e"] => dl.endian = Endian::Little, &["E"] => dl.endian = Endian::Big, &["a", ref a..] => dl.aggregate_align = align(a, "a")?, &["f32", ref a..] => dl.f32_align = align(a, "f32")?, &["f64", ref a..] => dl.f64_align = align(a, "f64")?, &[p @ "p", s, ref a..] | &[p @ "p0", s, ref a..] => { dl.pointer_size = size(s, p)?; dl.pointer_align = align(a, p)?; } &[s, ref a..] if s.starts_with("i") => { let bits = match s[1..].parse::<u64>() { Ok(bits) => bits, Err(_) => { size(&s[1..], "i")?; // For the user error. continue; } }; let a = align(a, s)?; match bits { 1 => dl.i1_align = a, 8 => dl.i8_align = a, 16 => dl.i16_align = a, 32 => dl.i32_align = a, 64 => dl.i64_align = a, _ => {} } if bits >= i128_align_src && bits <= 128 { // Default alignment for i128 is decided by taking the alignment of // largest-sized i{64...128}. i128_align_src = bits; dl.i128_align = a; } } &[s, ref a..] if s.starts_with("v") => { let v_size = size(&s[1..], "v")?; let a = align(a, s)?; if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) { v.1 = a; continue; } // No existing entry, add a new one. dl.vector_align.push((v_size, a)); } _ => {} // Ignore everything else. } } // Perform consistency checks against the Target information. let endian_str = match dl.endian { Endian::Little => "little", Endian::Big => "big" }; if endian_str != target.target_endian { return Err(format!("inconsistent target specification: \"data-layout\" claims \ architecture is {}-endian, while \"target-endian\" is `{}`", endian_str, target.target_endian)); } if dl.pointer_size.bits().to_string() != target.target_pointer_width { return Err(format!("inconsistent target specification: \"data-layout\" claims \ pointers are {}-bit, while \"target-pointer-width\" is `{}`", dl.pointer_size.bits(), target.target_pointer_width)); } Ok(dl) } /// Return exclusive upper bound on object size. /// /// The theoretical maximum object size is defined as the maximum positive `isize` value. /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly /// index every address within an object along with one byte past the end, along with allowing /// `isize` to store the difference between any two pointers into an object. /// /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable /// address space on 64-bit ARMv8 and x86_64. pub fn obj_size_bound(&self) -> u64 { match self.pointer_size.bits() { 16 => 1 << 15, 32 => 1 << 31, 64 => 1 << 47, bits => panic!("obj_size_bound: unknown pointer bit size {}", bits) } } pub fn ptr_sized_integer(&self) -> Integer { match self.pointer_size.bits() { 16 => I16, 32 => I32, 64 => I64, bits => panic!("ptr_sized_integer: unknown pointer bit size {}", bits) } } pub fn vector_align(&self, vec_size: Size) -> Align { for &(size, align) in &self.vector_align { if size == vec_size { return align; } } // Default to natural alignment, which is what LLVM does. // That is, use the size, rounded up to a power of 2. let align = vec_size.bytes().next_power_of_two(); Align::from_bytes(align, align).unwrap() } } pub trait HasDataLayout: Copy { fn data_layout(&self) -> &TargetDataLayout; } impl<'a> HasDataLayout for &'a TargetDataLayout { fn data_layout(&self) -> &TargetDataLayout { self } } /// Endianness of the target, which must match cfg(target-endian). #[derive(Copy, Clone)] pub enum Endian { Little, Big } /// Size of a type in bytes. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct Size { raw: u64 } impl Size { pub const ZERO: Size = Self::from_bytes(0); pub fn from_bits(bits: u64) -> Size { // Avoid potential overflow from `bits + 7`. Size::from_bytes(bits / 8 + ((bits % 8) + 7) / 8) } pub const fn from_bytes(bytes: u64) -> Size { Size { raw: bytes } } pub fn bytes(self) -> u64 { self.raw } pub fn bits(self) -> u64 { self.bytes().checked_mul(8).unwrap_or_else(|| { panic!("Size::bits: {} bytes in bits doesn't fit in u64", self.bytes()) }) } pub fn abi_align(self, align: Align) -> Size { let mask = align.abi() - 1; Size::from_bytes((self.bytes() + mask) & !mask) } pub fn is_abi_aligned(self, align: Align) -> bool { let mask = align.abi() - 1; self.bytes() & mask == 0 } pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: C) -> Option<Size> { let dl = cx.data_layout(); let bytes = self.bytes().checked_add(offset.bytes())?; if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None } } pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: C) -> Option<Size> { let dl = cx.data_layout(); let bytes = self.bytes().checked_mul(count)?; if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None } } } // Panicking addition, subtraction and multiplication for convenience. // Avoid during layout computation, return `LayoutError` instead. impl Add for Size { type Output = Size; fn add(self, other: Size) -> Size { Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| { panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes()) })) } } impl Sub for Size { type Output = Size; fn sub(self, other: Size) -> Size { Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| { panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes()) })) } } impl Mul<Size> for u64 { type Output = Size; fn mul(self, size: Size) -> Size { size * self } } impl Mul<u64> for Size { type Output = Size; fn mul(self, count: u64) -> Size { match self.bytes().checked_mul(count) { Some(bytes) => Size::from_bytes(bytes), None => { panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count) } } } } impl AddAssign for Size { fn add_assign(&mut self, other: Size) { *self = *self + other; } } /// Alignment of a type in bytes, both ABI-mandated and preferred. /// Each field is a power of two, giving the alignment a maximum value /// of 2<sup>(2<sup>8</sup> - 1)</sup>, which is limited by LLVM to a /// maximum capacity of 2<sup>29</sup> or 536870912. #[derive(Copy, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct Align { abi_pow2: u8, pref_pow2: u8, } impl Align { pub fn from_bits(abi: u64, pref: u64) -> Result<Align, String> { Align::from_bytes(Size::from_bits(abi).bytes(), Size::from_bits(pref).bytes()) } pub fn from_bytes(abi: u64, pref: u64) -> Result<Align, String> { let log2 = |align: u64| { // Treat an alignment of 0 bytes like 1-byte alignment. if align == 0 { return Ok(0); } let mut bytes = align; let mut pow: u8 = 0; while (bytes & 1) == 0 { pow += 1; bytes >>= 1; } if bytes != 1 { Err(format!("`{}` is not a power of 2", align)) } else if pow > 29 { Err(format!("`{}` is too large", align)) } else { Ok(pow) } }; Ok(Align { abi_pow2: log2(abi)?, pref_pow2: log2(pref)?, }) } pub fn abi(self) -> u64 { 1 << self.abi_pow2 } pub fn pref(self) -> u64 { 1 << self.pref_pow2 } pub fn abi_bits(self) -> u64 { self.abi() * 8 } pub fn pref_bits(self) -> u64 { self.pref() * 8 } pub fn min(self, other: Align) -> Align { Align { abi_pow2: cmp::min(self.abi_pow2, other.abi_pow2), pref_pow2: cmp::min(self.pref_pow2, other.pref_pow2), } } pub fn max(self, other: Align) -> Align { Align { abi_pow2: cmp::max(self.abi_pow2, other.abi_pow2), pref_pow2: cmp::max(self.pref_pow2, other.pref_pow2), } } } /// Integers, also used for enum discriminants. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] pub enum Integer { I8, I16, I32, I64, I128, } impl Integer { pub fn size(&self) -> Size { match *self { I8 => Size::from_bytes(1), I16 => Size::from_bytes(2), I32 => Size::from_bytes(4), I64 => Size::from_bytes(8), I128 => Size::from_bytes(16), } } pub fn align<C: HasDataLayout>(&self, cx: C) -> Align { let dl = cx.data_layout(); match *self { I8 => dl.i8_align, I16 => dl.i16_align, I32 => dl.i32_align, I64 => dl.i64_align, I128 => dl.i128_align, } } /// Find the smallest Integer type which can represent the signed value. pub fn fit_signed(x: i128) -> Integer { match x { -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8, -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16, -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32, -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64, _ => I128 } } /// Find the smallest Integer type which can represent the unsigned value. pub fn fit_unsigned(x: u128) -> Integer { match x { 0..=0x0000_0000_0000_00ff => I8, 0..=0x0000_0000_0000_ffff => I16, 0..=0x0000_0000_ffff_ffff => I32, 0..=0xffff_ffff_ffff_ffff => I64, _ => I128, } } /// Find the smallest integer with the given alignment. pub fn for_abi_align<C: HasDataLayout>(cx: C, align: Align) -> Option<Integer> { let dl = cx.data_layout(); let wanted = align.abi(); for &candidate in &[I8, I16, I32, I64, I128] { if wanted == candidate.align(dl).abi() && wanted == candidate.size().bytes() { return Some(candidate); } } None } /// Find the largest integer with the given alignment or less. pub fn approximate_abi_align<C: HasDataLayout>(cx: C, align: Align) -> Integer { let dl = cx.data_layout(); let wanted = align.abi(); // FIXME(eddyb) maybe include I128 in the future, when it works everywhere. for &candidate in &[I64, I32, I16] { if wanted >= candidate.align(dl).abi() && wanted >= candidate.size().bytes() { return candidate; } } I8 } } #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Copy, PartialOrd, Ord)] pub enum FloatTy { F32, F64, } impl fmt::Debug for FloatTy { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(self, f) } } impl fmt::Display for FloatTy { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.ty_to_string()) } } impl FloatTy { pub fn ty_to_string(&self) -> &'static str { match *self { FloatTy::F32 => "f32", FloatTy::F64 => "f64", } } pub fn bit_width(&self) -> usize { match *self { FloatTy::F32 => 32, FloatTy::F64 => 64, } } } /// Fundamental unit of memory access and layout. #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub enum Primitive { /// The `bool` is the signedness of the `Integer` type. /// /// One would think we would not care about such details this low down, /// but some ABIs are described in terms of C types and ISAs where the /// integer arithmetic is done on {sign,zero}-extended registers, e.g. /// a negative integer passed by zero-extension will appear positive in /// the callee, and most operations on it will produce the wrong values. Int(Integer, bool), Float(FloatTy), Pointer } impl<'a, 'tcx> Primitive { pub fn size<C: HasDataLayout>(self, cx: C) -> Size { let dl = cx.data_layout(); match self { Int(i, _) => i.size(), Float(FloatTy::F32) => Size::from_bits(32), Float(FloatTy::F64) => Size::from_bits(64), Pointer => dl.pointer_size } } pub fn align<C: HasDataLayout>(self, cx: C) -> Align { let dl = cx.data_layout(); match self { Int(i, _) => i.align(dl), Float(FloatTy::F32) => dl.f32_align, Float(FloatTy::F64) => dl.f64_align, Pointer => dl.pointer_align } } pub fn is_float(self) -> bool { match self { Float(_) => true, _ => false } } pub fn is_int(self) -> bool { match self { Int(..) => true, _ => false, } } } /// Information about one scalar component of a Rust type. #[derive(Clone, PartialEq, Eq, Hash, Debug)] pub struct Scalar { pub value: Primitive, /// Inclusive wrap-around range of valid values, that is, if /// min > max, it represents min..=u128::MAX followed by 0..=max. // FIXME(eddyb) always use the shortest range, e.g. by finding // the largest space between two consecutive valid values and // taking everything else as the (shortest) valid range. pub valid_range: RangeInclusive<u128>, } impl Scalar { pub fn is_bool(&self) -> bool { if let Int(I8, _) = self.value { self.valid_range == (0..=1) } else { false } } /// Returns the valid range as a `x..y` range. /// /// If `x` and `y` are equal, the range is full, not empty. pub fn valid_range_exclusive<C: HasDataLayout>(&self, cx: C) -> Range<u128> { // For a (max) value of -1, max will be `-1 as usize`, which overflows. // However, that is fine here (it would still represent the full range), // i.e., if the range is everything. let bits = self.value.size(cx).bits(); assert!(bits <= 128); let mask = !0u128 >> (128 - bits); let start = *self.valid_range.start(); let end = *self.valid_range.end(); assert_eq!(start, start & mask); assert_eq!(end, end & mask); start..(end.wrapping_add(1) & mask) } } /// Describes how the fields of a type are located in memory. #[derive(PartialEq, Eq, Hash, Debug)] pub enum FieldPlacement { /// All fields start at no offset. The `usize` is the field count. Union(usize), /// Array/vector-like placement, with all fields of identical types. Array { stride: Size, count: u64 }, /// Struct-like placement, with precomputed offsets. /// /// Fields are guaranteed to not overlap, but note that gaps /// before, between and after all the fields are NOT always /// padding, and as such their contents may not be discarded. /// For example, enum variants leave a gap at the start, /// where the discriminant field in the enum layout goes. Arbitrary { /// Offsets for the first byte of each field, /// ordered to match the source definition order. /// This vector does not go in increasing order. // FIXME(eddyb) use small vector optimization for the common case. offsets: Vec<Size>, /// Maps source order field indices to memory order indices, /// depending how fields were permuted. // FIXME(camlorn) also consider small vector optimization here. memory_index: Vec<u32> } } impl FieldPlacement { pub fn count(&self) -> usize { match *self { FieldPlacement::Union(count) => count, FieldPlacement::Array { count, .. } => { let usize_count = count as usize; assert_eq!(usize_count as u64, count); usize_count } FieldPlacement::Arbitrary { ref offsets, .. } => offsets.len() } } pub fn offset(&self, i: usize) -> Size { match *self { FieldPlacement::Union(_) => Size::ZERO, FieldPlacement::Array { stride, count } => { let i = i as u64; assert!(i < count); stride * i } FieldPlacement::Arbitrary { ref offsets, .. } => offsets[i] } } pub fn memory_index(&self, i: usize) -> usize { match *self { FieldPlacement::Union(_) | FieldPlacement::Array { .. } => i, FieldPlacement::Arbitrary { ref memory_index, .. } => { let r = memory_index[i]; assert_eq!(r as usize as u32, r); r as usize } } } /// Get source indices of the fields by increasing offsets. #[inline] pub fn index_by_increasing_offset<'a>(&'a self) -> impl Iterator<Item=usize>+'a { let mut inverse_small = [0u8; 64]; let mut inverse_big = vec![]; let use_small = self.count() <= inverse_small.len(); // We have to write this logic twice in order to keep the array small. if let FieldPlacement::Arbitrary { ref memory_index, .. } = *self { if use_small { for i in 0..self.count() { inverse_small[memory_index[i] as usize] = i as u8; } } else { inverse_big = vec![0; self.count()]; for i in 0..self.count() { inverse_big[memory_index[i] as usize] = i as u32; } } } (0..self.count()).map(move |i| { match *self { FieldPlacement::Union(_) | FieldPlacement::Array { .. } => i, FieldPlacement::Arbitrary { .. } => { if use_small { inverse_small[i] as usize } else { inverse_big[i] as usize } } } }) } } /// Describes how values of the type are passed by target ABIs, /// in terms of categories of C types there are ABI rules for. #[derive(Clone, PartialEq, Eq, Hash, Debug)] pub enum Abi { Uninhabited, Scalar(Scalar), ScalarPair(Scalar, Scalar), Vector { element: Scalar, count: u64 }, Aggregate { /// If true, the size is exact, otherwise it's only a lower bound. sized: bool, } } impl Abi { /// Returns true if the layout corresponds to an unsized type. pub fn is_unsized(&self) -> bool { match *self { Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false, Abi::Aggregate { sized } => !sized } } /// Returns true if this is a single signed integer scalar pub fn is_signed(&self) -> bool { match *self { Abi::Scalar(ref scal) => match scal.value { Primitive::Int(_, signed) => signed, _ => false, }, _ => false, } } } #[derive(PartialEq, Eq, Hash, Debug)] pub enum Variants { /// Single enum variants, structs/tuples, unions, and all non-ADTs. Single { index: usize }, /// General-case enums: for each case there is a struct, and they all have /// all space reserved for the tag, and their first field starts /// at a non-0 offset, after where the tag would go. Tagged { tag: Scalar, variants: Vec<LayoutDetails>, }, /// Multiple cases distinguished by a niche (values invalid for a type): /// the variant `dataful_variant` contains a niche at an arbitrary /// offset (field 0 of the enum), which for a variant with discriminant /// `d` is set to `(d - niche_variants.start).wrapping_add(niche_start)`. /// /// For example, `Option<(usize, &T)>` is represented such that /// `None` has a null pointer for the second tuple field, and /// `Some` is the identity function (with a non-null reference). NicheFilling { dataful_variant: usize, niche_variants: RangeInclusive<usize>, niche: Scalar, niche_start: u128, variants: Vec<LayoutDetails>, } } #[derive(PartialEq, Eq, Hash, Debug)] pub struct LayoutDetails { pub variants: Variants, pub fields: FieldPlacement, pub abi: Abi, pub align: Align, pub size: Size } impl LayoutDetails { pub fn scalar<C: HasDataLayout>(cx: C, scalar: Scalar) -> Self { let size = scalar.value.size(cx); let align = scalar.value.align(cx); LayoutDetails { variants: Variants::Single { index: 0 }, fields: FieldPlacement::Union(0), abi: Abi::Scalar(scalar), size, align, } } } /// The details of the layout of a type, alongside the type itself. /// Provides various type traversal APIs (e.g. recursing into fields). /// /// Note that the details are NOT guaranteed to always be identical /// to those obtained from `layout_of(ty)`, as we need to produce /// layouts for which Rust types do not exist, such as enum variants /// or synthetic fields of enums (i.e. discriminants) and fat pointers. #[derive(Copy, Clone, Debug)] pub struct TyLayout<'a, Ty> { pub ty: Ty, pub details: &'a LayoutDetails } impl<'a, Ty> Deref for TyLayout<'a, Ty> { type Target = &'a LayoutDetails; fn deref(&self) -> &&'a LayoutDetails { &self.details } } pub trait LayoutOf { type Ty; type TyLayout; fn layout_of(self, ty: Self::Ty) -> Self::TyLayout; } pub trait TyLayoutMethods<'a, C: LayoutOf<Ty = Self>>: Sized { fn for_variant(this: TyLayout<'a, Self>, cx: C, variant_index: usize) -> TyLayout<'a, Self>; fn field(this: TyLayout<'a, Self>, cx: C, i: usize) -> C::TyLayout; } impl<'a, Ty> TyLayout<'a, Ty> { pub fn for_variant<C>(self, cx: C, variant_index: usize) -> Self where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> { Ty::for_variant(self, cx, variant_index) } pub fn field<C>(self, cx: C, i: usize) -> C::TyLayout where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> { Ty::field(self, cx, i) } } impl<'a, Ty> TyLayout<'a, Ty> { /// Returns true if the layout corresponds to an unsized type. pub fn is_unsized(&self) -> bool { self.abi.is_unsized() } /// Returns true if the type is a ZST and not unsized. pub fn is_zst(&self) -> bool { match self.abi { Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false, Abi::Uninhabited => self.size.bytes() == 0, Abi::Aggregate { sized } => sized && self.size.bytes() == 0 } } pub fn size_and_align(&self) -> (Size, Align) { (self.size, self.align) } }
32.211538
99
0.538718
e4ce82fa43caaa183546153edbdf63e59c178bc0
613
use std::io::*; use std::str::FromStr; fn main() { let a:i64 = read(); let b:i64 = read(); let x:i64 = read(); println!("{}", f(b, x) - f(a - 1, x)); } fn f(v:i64, x:i64) -> i64 { if v < 0 { return 0; } else { return v / x + 1; } } fn read<T: FromStr>() -> T { let stdin = stdin(); let stdin = stdin.lock(); let token: String = stdin .bytes() .map(|c| c.expect("failed to read char") as char) .skip_while(|c| c.is_whitespace()) .take_while(|c| !c.is_whitespace()) .collect(); token.parse().ok().expect("failed to parse token") }
21.892857
57
0.50571
8fc6bbdfd24ba0a93431fdd4cdff7fcc813e915e
8,839
mod ec; mod fq; mod fq12; mod fq2; mod fq6; mod fr; #[cfg(test)] mod tests; pub use self::ec::{ G1Affine, G1Compressed, G1Prepared, G1Uncompressed, G2Affine, G2Compressed, G2Prepared, G2Uncompressed, G1, G2, }; pub use self::fq::{Fq, FqRepr}; pub use self::fq12::Fq12; pub use self::fq2::Fq2; pub use self::fq6::Fq6; pub use self::fr::{Fr, FrRepr}; use super::{Engine, PairingCurveAffine}; use ff::{BitIterator, Field, ScalarEngine}; use group::CurveAffine; // The BLS parameter x for BLS12-381 is -0xd201000000010000 const BLS_X: u64 = 0xd201000000010000; const BLS_X_IS_NEGATIVE: bool = true; #[derive(Clone, Debug)] pub struct Bls12; impl ScalarEngine for Bls12 { type Fr = Fr; } impl Engine for Bls12 { type G1 = G1; type G1Affine = G1Affine; type G2 = G2; type G2Affine = G2Affine; type Fq = Fq; type Fqe = Fq2; type Fqk = Fq12; fn miller_loop<'a, I>(i: I) -> Self::Fqk where I: IntoIterator< Item = &'a ( &'a <Self::G1Affine as PairingCurveAffine>::Prepared, &'a <Self::G2Affine as PairingCurveAffine>::Prepared, ), >, { let mut pairs = vec![]; for &(p, q) in i { if !p.is_zero() && !q.is_zero() { pairs.push((p, q.coeffs.iter())); } } // Twisting isomorphism from E to E' fn ell(f: &mut Fq12, coeffs: &(Fq2, Fq2, Fq2), p: &G1Affine) { let mut c0 = coeffs.0; let mut c1 = coeffs.1; c0.c0.mul_assign(&p.y); c0.c1.mul_assign(&p.y); c1.c0.mul_assign(&p.x); c1.c1.mul_assign(&p.x); // Sparse multiplication in Fq12 f.mul_by_014(&coeffs.2, &c1, &c0); } let mut f = Fq12::one(); let mut found_one = false; for i in BitIterator::new(&[BLS_X >> 1]) { if !found_one { found_one = i; continue; } for &mut (p, ref mut coeffs) in &mut pairs { ell(&mut f, coeffs.next().unwrap(), &p.0); } if i { for &mut (p, ref mut coeffs) in &mut pairs { ell(&mut f, coeffs.next().unwrap(), &p.0); } } f.square(); } for &mut (p, ref mut coeffs) in &mut pairs { ell(&mut f, coeffs.next().unwrap(), &p.0); } if BLS_X_IS_NEGATIVE { f.conjugate(); } f } fn final_exponentiation(r: &Fq12) -> Option<Fq12> { let mut f1 = *r; f1.conjugate(); match r.inverse() { Some(mut f2) => { let mut r = f1; r.mul_assign(&f2); f2 = r; r.frobenius_map(2); r.mul_assign(&f2); fn exp_by_x(f: &mut Fq12, x: u64) { *f = f.pow(&[x]); if BLS_X_IS_NEGATIVE { f.conjugate(); } } let mut x = BLS_X; let mut y0 = r; y0.square(); let mut y1 = y0; exp_by_x(&mut y1, x); x >>= 1; let mut y2 = y1; exp_by_x(&mut y2, x); x <<= 1; let mut y3 = r; y3.conjugate(); y1.mul_assign(&y3); y1.conjugate(); y1.mul_assign(&y2); y2 = y1; exp_by_x(&mut y2, x); y3 = y2; exp_by_x(&mut y3, x); y1.conjugate(); y3.mul_assign(&y1); y1.conjugate(); y1.frobenius_map(3); y2.frobenius_map(2); y1.mul_assign(&y2); y2 = y3; exp_by_x(&mut y2, x); y2.mul_assign(&y0); y2.mul_assign(&r); y1.mul_assign(&y2); y2 = y3; y2.frobenius_map(1); y1.mul_assign(&y2); Some(y1) } None => None, } } } impl G2Prepared { pub fn is_zero(&self) -> bool { self.infinity } pub fn from_affine(q: G2Affine) -> Self { if q.is_zero() { return G2Prepared { coeffs: vec![], infinity: true, }; } fn doubling_step(r: &mut G2) -> (Fq2, Fq2, Fq2) { // Adaptation of Algorithm 26, https://eprint.iacr.org/2010/354.pdf let mut tmp0 = r.x; tmp0.square(); let mut tmp1 = r.y; tmp1.square(); let mut tmp2 = tmp1; tmp2.square(); let mut tmp3 = tmp1; tmp3.add_assign(&r.x); tmp3.square(); tmp3.sub_assign(&tmp0); tmp3.sub_assign(&tmp2); tmp3.double(); let mut tmp4 = tmp0; tmp4.double(); tmp4.add_assign(&tmp0); let mut tmp6 = r.x; tmp6.add_assign(&tmp4); let mut tmp5 = tmp4; tmp5.square(); let mut zsquared = r.z; zsquared.square(); r.x = tmp5; r.x.sub_assign(&tmp3); r.x.sub_assign(&tmp3); r.z.add_assign(&r.y); r.z.square(); r.z.sub_assign(&tmp1); r.z.sub_assign(&zsquared); r.y = tmp3; r.y.sub_assign(&r.x); r.y.mul_assign(&tmp4); tmp2.double(); tmp2.double(); tmp2.double(); r.y.sub_assign(&tmp2); tmp3 = tmp4; tmp3.mul_assign(&zsquared); tmp3.double(); tmp3.negate(); tmp6.square(); tmp6.sub_assign(&tmp0); tmp6.sub_assign(&tmp5); tmp1.double(); tmp1.double(); tmp6.sub_assign(&tmp1); tmp0 = r.z; tmp0.mul_assign(&zsquared); tmp0.double(); (tmp0, tmp3, tmp6) } fn addition_step(r: &mut G2, q: &G2Affine) -> (Fq2, Fq2, Fq2) { // Adaptation of Algorithm 27, https://eprint.iacr.org/2010/354.pdf let mut zsquared = r.z; zsquared.square(); let mut ysquared = q.y; ysquared.square(); let mut t0 = zsquared; t0.mul_assign(&q.x); let mut t1 = q.y; t1.add_assign(&r.z); t1.square(); t1.sub_assign(&ysquared); t1.sub_assign(&zsquared); t1.mul_assign(&zsquared); let mut t2 = t0; t2.sub_assign(&r.x); let mut t3 = t2; t3.square(); let mut t4 = t3; t4.double(); t4.double(); let mut t5 = t4; t5.mul_assign(&t2); let mut t6 = t1; t6.sub_assign(&r.y); t6.sub_assign(&r.y); let mut t9 = t6; t9.mul_assign(&q.x); let mut t7 = t4; t7.mul_assign(&r.x); r.x = t6; r.x.square(); r.x.sub_assign(&t5); r.x.sub_assign(&t7); r.x.sub_assign(&t7); r.z.add_assign(&t2); r.z.square(); r.z.sub_assign(&zsquared); r.z.sub_assign(&t3); let mut t10 = q.y; t10.add_assign(&r.z); let mut t8 = t7; t8.sub_assign(&r.x); t8.mul_assign(&t6); t0 = r.y; t0.mul_assign(&t5); t0.double(); r.y = t8; r.y.sub_assign(&t0); t10.square(); t10.sub_assign(&ysquared); let mut ztsquared = r.z; ztsquared.square(); t10.sub_assign(&ztsquared); t9.double(); t9.sub_assign(&t10); t10 = r.z; t10.double(); t6.negate(); t1 = t6; t1.double(); (t10, t1, t9) } let mut coeffs = vec![]; let mut r: G2 = q.into(); let mut found_one = false; for i in BitIterator::new([BLS_X >> 1]) { if !found_one { found_one = i; continue; } coeffs.push(doubling_step(&mut r)); if i { coeffs.push(addition_step(&mut r, &q)); } } coeffs.push(doubling_step(&mut r)); G2Prepared { coeffs, infinity: false, } } } #[test] fn bls12_engine_tests() { crate::tests::engine::engine_tests::<Bls12>(); }
23.824798
91
0.427311
21045e44bbbd612d47c2222b3d2784bf8ad29f26
6,470
// Copyright 2018 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use failure::{Error, ResultExt}; use grin_keychain::{mnemonic, Keychain}; use grin_util::{ZeroingString, from_hex, to_hex}; use rand::{Rng, thread_rng}; use ring::{aead, digest, pbkdf2}; use serde_json; use std::fs::{self, File}; use std::io::{Read, Write}; use std::path::{Path, MAIN_SEPARATOR}; use crate::common::config::WalletConfig; use super::ErrorKind; pub const SEED_FILE: &'static str = "wallet.seed"; #[derive(Clone, Debug, PartialEq)] pub struct WalletSeed(Vec<u8>); impl WalletSeed { pub fn from_bytes(bytes: &[u8]) -> WalletSeed { WalletSeed(bytes.to_vec()) } pub fn from_mnemonic(word_list: &str) -> Result<WalletSeed, Error> { let res = mnemonic::to_entropy(word_list); match res { Ok(s) => Ok(WalletSeed::from_bytes(&s)), Err(_) => Err(ErrorKind::Mnemonic.into()), } } pub fn to_mnemonic(&self) -> Result<String, Error> { let result = mnemonic::from_entropy(&self.0); match result { Ok(r) => Ok(r), Err(_) => Err(ErrorKind::Mnemonic.into()), } } pub fn derive_keychain<K: Keychain>(&self, is_floonet: bool) -> Result<K, Error> { let result = K::from_seed(&self.0, is_floonet)?; Ok(result) } pub fn init_new(seed_length: usize) -> WalletSeed { let mut seed: Vec<u8> = vec![]; let mut rng = thread_rng(); for _ in 0..seed_length { seed.push(rng.gen()); } WalletSeed(seed) } pub fn seed_file_exists(wallet_config: &WalletConfig) -> Result<(), Error> { let seed_file_path = &format!( "{}{}{}", wallet_config.data_file_dir, MAIN_SEPARATOR, SEED_FILE, ); if Path::new(seed_file_path).exists() { return Err(ErrorKind::WalletSeedExists.into()); } Ok(()) } pub fn init_file( wallet_config: &WalletConfig, seed_length: usize, recovery_phrase: Option<ZeroingString>, password: &str, overwrite: bool, ) -> Result<WalletSeed, Error> { // create directory if it doesn't exist fs::create_dir_all(&wallet_config.data_file_dir).context(ErrorKind::IO)?; let seed_file_path = &format!( "{}{}{}", wallet_config.data_file_dir, MAIN_SEPARATOR, SEED_FILE, ); warn!("Generating wallet seed file at: {}", seed_file_path); if !overwrite { let _ = WalletSeed::seed_file_exists(wallet_config)?; } let seed = match recovery_phrase { Some(p) => WalletSeed::from_mnemonic(&p)?, None => WalletSeed::init_new(seed_length), }; let enc_seed = EncryptedWalletSeed::from_seed(&seed, password)?; let enc_seed_json = serde_json::to_string_pretty(&enc_seed).context(ErrorKind::Format)?; let mut file = File::create(seed_file_path).context(ErrorKind::IO)?; file.write_all(&enc_seed_json.as_bytes()) .context(ErrorKind::IO)?; Ok(seed) } pub fn from_file(wallet_config: &WalletConfig, password: &str) -> Result<WalletSeed, Error> { // create directory if it doesn't exist fs::create_dir_all(&wallet_config.data_file_dir).context(ErrorKind::IO)?; let seed_file_path = &format!( "{}{}{}", wallet_config.data_file_dir, MAIN_SEPARATOR, SEED_FILE, ); debug!("Using wallet seed file at: {}", seed_file_path); if Path::new(seed_file_path).exists() { let mut file = File::open(seed_file_path).context(ErrorKind::IO)?; let mut buffer = String::new(); file.read_to_string(&mut buffer).context(ErrorKind::IO)?; let enc_seed: EncryptedWalletSeed = serde_json::from_str(&buffer).context(ErrorKind::Format)?; let wallet_seed = enc_seed.decrypt(password)?; Ok(wallet_seed) } else { error!( "wallet seed file {} could not be opened (grin wallet init). \ Run \"grin wallet init\" to initialize a new wallet.", seed_file_path ); Err(ErrorKind::WalletSeedDoesntExist)? } } } /// Encrypted wallet seed, for storing on disk and decrypting /// with provided password #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct EncryptedWalletSeed { encrypted_seed: String, /// Salt, not so useful in single case but include anyhow for situations /// where someone wants to store many of these pub salt: String, /// Nonce pub nonce: String, } impl EncryptedWalletSeed { /// Create a new encrypted seed from the given seed + password pub fn from_seed(seed: &WalletSeed, password: &str) -> Result<EncryptedWalletSeed, Error> { let salt: [u8; 8] = thread_rng().gen(); let nonce: [u8; 12] = thread_rng().gen(); let password = password.as_bytes(); let mut key = [0; 32]; pbkdf2::derive(&digest::SHA512, 100, &salt, password, &mut key); let content = seed.0.to_vec(); let mut enc_bytes = content.clone(); let suffix_len = aead::CHACHA20_POLY1305.tag_len(); for _ in 0..suffix_len { enc_bytes.push(0); } let sealing_key = aead::SealingKey::new(&aead::CHACHA20_POLY1305, &key).context(ErrorKind::Encryption)?; aead::seal_in_place(&sealing_key, &nonce, &[], &mut enc_bytes, suffix_len) .context(ErrorKind::Encryption)?; Ok(EncryptedWalletSeed { encrypted_seed: to_hex(enc_bytes.to_vec()), salt: to_hex(salt.to_vec()), nonce: to_hex(nonce.to_vec()), }) } /// Decrypt seed pub fn decrypt(&self, password: &str) -> Result<WalletSeed, Error> { let mut encrypted_seed = match from_hex(self.encrypted_seed.clone()) { Ok(s) => s, Err(_) => return Err(ErrorKind::Encryption)?, }; let salt = match from_hex(self.salt.clone()) { Ok(s) => s, Err(_) => return Err(ErrorKind::Encryption)?, }; let nonce = match from_hex(self.nonce.clone()) { Ok(s) => s, Err(_) => return Err(ErrorKind::Encryption)?, }; let password = password.as_bytes(); let mut key = [0; 32]; pbkdf2::derive(&digest::SHA512, 100, &salt, password, &mut key); let opening_key = aead::OpeningKey::new(&aead::CHACHA20_POLY1305, &key).context(ErrorKind::Encryption)?; let decrypted_data = aead::open_in_place(&opening_key, &nonce, &[], 0, &mut encrypted_seed) .context(ErrorKind::Encryption)?; Ok(WalletSeed::from_bytes(&decrypted_data)) } }
31.715686
94
0.689645
f4eac3858e4db1fb38dabd2eb34b5016b6abb1aa
8,721
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files.git) // DO NOT EDIT use crate::EventController; use crate::EventControllerScrollFlags; use crate::PropagationLimit; use crate::PropagationPhase; use glib::object::Cast; use glib::object::ObjectType as ObjectType_; use glib::signal::connect_raw; use glib::signal::SignalHandlerId; use glib::translate::*; use glib::StaticType; use glib::ToValue; use std::boxed::Box as Box_; use std::fmt; use std::mem::transmute; glib::wrapper! { #[doc(alias = "GtkEventControllerScroll")] pub struct EventControllerScroll(Object<ffi::GtkEventControllerScroll, ffi::GtkEventControllerScrollClass>) @extends EventController; match fn { type_ => || ffi::gtk_event_controller_scroll_get_type(), } } impl EventControllerScroll { #[doc(alias = "gtk_event_controller_scroll_new")] pub fn new(flags: EventControllerScrollFlags) -> EventControllerScroll { assert_initialized_main_thread!(); unsafe { EventController::from_glib_full(ffi::gtk_event_controller_scroll_new(flags.into_glib())) .unsafe_cast() } } // rustdoc-stripper-ignore-next /// Creates a new builder-pattern struct instance to construct [`EventControllerScroll`] objects. /// /// This method returns an instance of [`EventControllerScrollBuilder`] which can be used to create [`EventControllerScroll`] objects. pub fn builder() -> EventControllerScrollBuilder { EventControllerScrollBuilder::default() } #[doc(alias = "gtk_event_controller_scroll_get_flags")] #[doc(alias = "get_flags")] pub fn flags(&self) -> EventControllerScrollFlags { unsafe { from_glib(ffi::gtk_event_controller_scroll_get_flags( self.to_glib_none().0, )) } } #[doc(alias = "gtk_event_controller_scroll_set_flags")] pub fn set_flags(&self, flags: EventControllerScrollFlags) { unsafe { ffi::gtk_event_controller_scroll_set_flags(self.to_glib_none().0, flags.into_glib()); } } #[doc(alias = "decelerate")] pub fn connect_decelerate<F: Fn(&Self, f64, f64) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn decelerate_trampoline< F: Fn(&EventControllerScroll, f64, f64) + 'static, >( this: *mut ffi::GtkEventControllerScroll, vel_x: libc::c_double, vel_y: libc::c_double, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this), vel_x, vel_y) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"decelerate\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( decelerate_trampoline::<F> as *const (), )), Box_::into_raw(f), ) } } #[doc(alias = "scroll")] pub fn connect_scroll<F: Fn(&Self, f64, f64) -> glib::signal::Inhibit + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn scroll_trampoline< F: Fn(&EventControllerScroll, f64, f64) -> glib::signal::Inhibit + 'static, >( this: *mut ffi::GtkEventControllerScroll, dx: libc::c_double, dy: libc::c_double, f: glib::ffi::gpointer, ) -> glib::ffi::gboolean { let f: &F = &*(f as *const F); f(&from_glib_borrow(this), dx, dy).into_glib() } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"scroll\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( scroll_trampoline::<F> as *const (), )), Box_::into_raw(f), ) } } #[doc(alias = "scroll-begin")] pub fn connect_scroll_begin<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn scroll_begin_trampoline<F: Fn(&EventControllerScroll) + 'static>( this: *mut ffi::GtkEventControllerScroll, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"scroll-begin\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( scroll_begin_trampoline::<F> as *const (), )), Box_::into_raw(f), ) } } #[doc(alias = "scroll-end")] pub fn connect_scroll_end<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn scroll_end_trampoline<F: Fn(&EventControllerScroll) + 'static>( this: *mut ffi::GtkEventControllerScroll, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"scroll-end\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( scroll_end_trampoline::<F> as *const (), )), Box_::into_raw(f), ) } } #[doc(alias = "flags")] pub fn connect_flags_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_flags_trampoline<F: Fn(&EventControllerScroll) + 'static>( this: *mut ffi::GtkEventControllerScroll, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::flags\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_flags_trampoline::<F> as *const (), )), Box_::into_raw(f), ) } } } #[derive(Clone, Default)] // rustdoc-stripper-ignore-next /// A [builder-pattern] type to construct [`EventControllerScroll`] objects. /// /// [builder-pattern]: https://doc.rust-lang.org/1.0.0/style/ownership/builders.html pub struct EventControllerScrollBuilder { flags: Option<EventControllerScrollFlags>, name: Option<String>, propagation_limit: Option<PropagationLimit>, propagation_phase: Option<PropagationPhase>, } impl EventControllerScrollBuilder { // rustdoc-stripper-ignore-next /// Create a new [`EventControllerScrollBuilder`]. pub fn new() -> Self { Self::default() } // rustdoc-stripper-ignore-next /// Build the [`EventControllerScroll`]. pub fn build(self) -> EventControllerScroll { let mut properties: Vec<(&str, &dyn ToValue)> = vec![]; if let Some(ref flags) = self.flags { properties.push(("flags", flags)); } if let Some(ref name) = self.name { properties.push(("name", name)); } if let Some(ref propagation_limit) = self.propagation_limit { properties.push(("propagation-limit", propagation_limit)); } if let Some(ref propagation_phase) = self.propagation_phase { properties.push(("propagation-phase", propagation_phase)); } glib::Object::new::<EventControllerScroll>(&properties) .expect("Failed to create an instance of EventControllerScroll") } pub fn flags(mut self, flags: EventControllerScrollFlags) -> Self { self.flags = Some(flags); self } pub fn name(mut self, name: &str) -> Self { self.name = Some(name.to_string()); self } pub fn propagation_limit(mut self, propagation_limit: PropagationLimit) -> Self { self.propagation_limit = Some(propagation_limit); self } pub fn propagation_phase(mut self, propagation_phase: PropagationPhase) -> Self { self.propagation_phase = Some(propagation_phase); self } } impl fmt::Display for EventControllerScroll { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("EventControllerScroll") } }
34.607143
138
0.567022
f93ccd794117dffc46ad5bcdcdc3bc12d2975f04
184
#![crate_type="lib"] #[repr(i8)] pub enum Type { Type1 = 0, Type2 = 1 } // CHECK: define signext i8 @test() #[no_mangle] pub extern "C" fn test() -> Type { Type::Type1 }
13.142857
35
0.565217
56a2038abf204bc2335aeb7c7b60983acfcc583b
699
use std::io::Result; pub fn solve<L: Iterator<Item=Result<String>>>(input: &mut L) -> () { let line = input.next().unwrap().unwrap(); let bytes = line.into_bytes(); if bytes.len() < 2 { println!("The input is too short."); return; } let mut sum1: u64 = 0; let mut sum2: u64 = 0; let len = bytes.len(); for i in 0..bytes.len() { if bytes[i] == bytes[(i+1)%len] { sum1 += (bytes[i] as u64)-48; // 48 == '0' } if bytes[i] == bytes[(i+len/2)%len] { sum2 += (bytes[i] as u64)-48; // 48 == '0' } } println!("The captcha is {}", sum1); println!("The secondary captcha is {}", sum2); }
24.964286
69
0.48784
ac1fbcffde470897532057c3fa67ab271a41fcbe
805
extern crate env_logger; extern crate ticketed_lock; use std::thread; use ticketed_lock as tl; #[cfg(feature = "futuring")] fn main() { //empty } #[cfg(not(feature = "futuring"))] fn main() { env_logger::init().unwrap(); let mut storage = tl::TicketedLock::new(4u8); let t1 = storage.read(); let t2 = storage.read(); let t3 = storage.write(); let g3 = thread::spawn(move|| { let mut guard = t3.wait(); *guard += 1; println!("t3: {}", *guard); }); let g2 = thread::spawn(move|| { let guard = t2.wait(); println!("t2: {}", *guard); }); let g1 = thread::spawn(move|| { let guard = t1.wait(); println!("t1: {}", *guard); }); g1.join().unwrap(); g2.join().unwrap(); g3.join().unwrap(); }
20.641026
49
0.526708
288b57e53fe1bf14f8d8544cdb08ee34e7d683be
15,676
//! Models relating to channels and types within channels. mod attachment; mod channel_category; mod channel_id; mod embed; mod guild_channel; mod message; mod private_channel; mod reaction; mod sticker; #[cfg(feature = "model")] use std::fmt::{Display, Formatter, Result as FmtResult}; #[cfg(all(feature = "cache", feature = "model", feature = "utils"))] use async_trait::async_trait; use serde::de::{Error as DeError, Unexpected}; use serde::ser::{Serialize, SerializeStruct, Serializer}; pub use self::attachment::*; pub use self::channel_category::*; pub use self::channel_id::*; pub use self::embed::*; pub use self::guild_channel::*; pub use self::message::*; pub use self::private_channel::*; pub use self::reaction::*; pub use self::sticker::*; use super::utils::deserialize_u64; #[cfg(all(feature = "cache", feature = "model"))] use crate::cache::Cache; #[cfg(all(feature = "cache", feature = "model", feature = "utils"))] use crate::cache::FromStrAndCache; #[cfg(feature = "model")] use crate::http::CacheHttp; #[cfg(all(feature = "cache", feature = "model", feature = "utils"))] use crate::model::misc::ChannelParseError; use crate::model::prelude::*; #[cfg(all(feature = "cache", feature = "model", feature = "utils"))] use crate::utils::parse_channel; /// A container for any channel. #[derive(Clone, Debug)] #[non_exhaustive] pub enum Channel { /// A [text] or [voice] channel within a [`Guild`]. /// /// [text]: ChannelType::Text /// [voice]: ChannelType::Voice Guild(GuildChannel), /// A private channel to another [`User`]. No other users may access the /// channel. For multi-user "private channels", use a group. Private(PrivateChannel), /// A category of [`GuildChannel`]s Category(ChannelCategory), } #[cfg(feature = "model")] impl Channel { /// Converts from `Channel` to `Option<GuildChannel>`. /// /// Converts `self` into an `Option<GuildChannel>`, consuming /// `self`, and discarding a `PrivateChannel`, or /// `ChannelCategory`, if any. /// /// # Examples /// /// Basic usage: /// /// ```rust,no_run /// # #[cfg(all(feature = "model", feature = "cache"))] /// # async fn run() { /// # use serenity::{cache::Cache, model::id::ChannelId}; /// # use tokio::sync::RwLock; /// # use std::sync::Arc; /// # /// # let cache = Cache::default(); /// # let channel = ChannelId(0).to_channel_cached(&cache).await.unwrap(); /// # /// match channel.guild() { /// Some(guild) => { /// println!("It's a guild named {}!", guild.name); /// }, /// None => { println!("It's not a guild!"); }, /// } /// # } /// ``` pub fn guild(self) -> Option<GuildChannel> { match self { Channel::Guild(lock) => Some(lock), _ => None, } } /// Converts from `Channel` to `Option<PrivateChannel>`. /// /// Converts `self` into an `Option<PrivateChannel>`, consuming /// `self`, and discarding a `GuildChannel`, or `ChannelCategory`, /// if any. /// /// # Examples /// /// Basic usage: /// /// ```rust,no_run /// # #[cfg(all(feature = "model", feature = "cache"))] /// # async fn run() { /// # use serenity::{cache::Cache, model::id::ChannelId}; /// # use tokio::sync::RwLock; /// # use std::sync::Arc; /// # /// # let cache = Cache::default(); /// # let channel = ChannelId(0).to_channel_cached(&cache).await.unwrap(); /// # /// match channel.private() { /// Some(private) => { /// println!("It's a private channel with {}!", &private.recipient); /// }, /// None => { println!("It's not a private channel!"); }, /// } /// # } /// ``` pub fn private(self) -> Option<PrivateChannel> { match self { Channel::Private(lock) => Some(lock), _ => None, } } /// Converts from `Channel` to `Option<ChannelCategory>`. /// /// Converts `self` into an `Option<ChannelCategory>`, /// consuming `self`, and discarding a `GuildChannel`, or /// `PrivateChannel`, if any. /// /// # Examples /// /// Basic usage: /// /// ```rust,no_run /// # #[cfg(all(feature = "model", feature = "cache"))] /// # async fn run() { /// # use serenity::{cache::Cache, model::id::ChannelId}; /// # use tokio::sync::RwLock; /// # use std::sync::Arc; /// # /// # let cache = Cache::default(); /// # let channel = ChannelId(0).to_channel_cached(&cache).await.unwrap(); /// # /// match channel.category() { /// Some(category) => { /// println!("It's a category named {}!", category.name); /// }, /// None => { println!("It's not a category!"); }, /// } /// # /// # } /// ``` pub fn category(self) -> Option<ChannelCategory> { match self { Channel::Category(lock) => Some(lock), _ => None, } } /// Deletes the inner channel. /// /// # Errors /// /// If the `cache` is enabled, returns [`ModelError::InvalidPermissions`], /// if the current user lacks permission. /// /// Otherwise will return [`Error::Http`] if the current user does not /// have permission. pub async fn delete(&self, cache_http: impl CacheHttp) -> Result<()> { match self { Channel::Guild(public_channel) => { public_channel.delete(cache_http).await?; }, Channel::Private(private_channel) => { private_channel.delete(cache_http.http()).await?; }, Channel::Category(category) => { category.delete(cache_http).await?; }, } Ok(()) } /// Determines if the channel is NSFW. #[inline] pub fn is_nsfw(&self) -> bool { match self { Channel::Guild(channel) => channel.is_nsfw(), Channel::Category(category) => category.is_nsfw(), Channel::Private(_) => false, } } /// Retrieves the Id of the inner [`GuildChannel`], or /// [`PrivateChannel`]. #[inline] pub fn id(&self) -> ChannelId { match self { Channel::Guild(ch) => ch.id, Channel::Private(ch) => ch.id, Channel::Category(ch) => ch.id, } } /// Retrieves the position of the inner [`GuildChannel`] or /// [`ChannelCategory`]. /// /// If other channel types are used it will return None. #[inline] pub fn position(&self) -> Option<i64> { match self { Channel::Guild(channel) => Some(channel.position), Channel::Category(catagory) => Some(catagory.position), _ => None, } } } impl<'de> Deserialize<'de> for Channel { fn deserialize<D: Deserializer<'de>>(deserializer: D) -> StdResult<Self, D::Error> { let v = JsonMap::deserialize(deserializer)?; let kind = { let kind = v.get("type").ok_or_else(|| DeError::missing_field("type"))?; match kind.as_u64() { Some(kind) => kind, None => { return Err(DeError::invalid_type( Unexpected::Other("non-positive integer"), &"a positive integer", )); }, } }; match kind { 0 | 2 | 5 | 6 => serde_json::from_value::<GuildChannel>(Value::Object(v)) .map(Channel::Guild) .map_err(DeError::custom), 1 => serde_json::from_value::<PrivateChannel>(Value::Object(v)) .map(Channel::Private) .map_err(DeError::custom), 4 => serde_json::from_value::<ChannelCategory>(Value::Object(v)) .map(Channel::Category) .map_err(DeError::custom), _ => Err(DeError::custom("Unknown channel type")), } } } impl Serialize for Channel { fn serialize<S>(&self, serializer: S) -> StdResult<S::Ok, S::Error> where S: Serializer, { match self { Channel::Category(c) => ChannelCategory::serialize(c, serializer), Channel::Guild(c) => GuildChannel::serialize(c, serializer), Channel::Private(c) => PrivateChannel::serialize(c, serializer), } } } impl Display for Channel { /// Formats the channel into a "mentioned" string. /// /// This will return a different format for each type of channel: /// /// - [`PrivateChannel`]s: the recipient's name; /// - [`GuildChannel`]s: a string mentioning the channel that users who can /// see the channel can click on. fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { match self { Channel::Guild(ch) => Display::fmt(&ch.id.mention(), f), Channel::Private(ch) => Display::fmt(&ch.recipient.name, f), Channel::Category(ch) => Display::fmt(&ch.name, f), } } } /// A representation of a type of channel. #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] #[non_exhaustive] pub enum ChannelType { /// An indicator that the channel is a text [`GuildChannel`]. Text = 0, /// An indicator that the channel is a [`PrivateChannel`]. Private = 1, /// An indicator that the channel is a voice [`GuildChannel`]. Voice = 2, /// An indicator that the channel is the channel of a [`ChannelCategory`]. Category = 4, /// An indicator that the channel is a `NewsChannel`. /// /// Note: `NewsChannel` is serialized into a [`GuildChannel`] News = 5, /// An indicator that the channel is a `StoreChannel` /// /// Note: `StoreChannel` is serialized into a [`GuildChannel`] Store = 6, } enum_number!(ChannelType { Text, Private, Voice, Category, News, Store }); impl ChannelType { #[inline] pub fn name(&self) -> &str { match *self { ChannelType::Private => "private", ChannelType::Text => "text", ChannelType::Voice => "voice", ChannelType::Category => "category", ChannelType::News => "news", ChannelType::Store => "store", } } #[inline] pub fn num(self) -> u64 { match self { ChannelType::Text => 0, ChannelType::Private => 1, ChannelType::Voice => 2, ChannelType::Category => 4, ChannelType::News => 5, ChannelType::Store => 6, } } } #[derive(Deserialize, Serialize)] struct PermissionOverwriteData { allow: Permissions, deny: Permissions, #[serde(serialize_with = "serialize_u64", deserialize_with = "deserialize_u64")] id: u64, #[serde(rename = "type")] kind: u8, } /// A channel-specific permission overwrite for a member or role. #[derive(Clone, Debug)] pub struct PermissionOverwrite { pub allow: Permissions, pub deny: Permissions, pub kind: PermissionOverwriteType, } impl<'de> Deserialize<'de> for PermissionOverwrite { fn deserialize<D: Deserializer<'de>>( deserializer: D, ) -> StdResult<PermissionOverwrite, D::Error> { let data = PermissionOverwriteData::deserialize(deserializer)?; let kind = match &data.kind { 0 => PermissionOverwriteType::Role(RoleId(data.id)), 1 => PermissionOverwriteType::Member(UserId(data.id)), _ => return Err(DeError::custom("Unknown PermissionOverwriteType")), }; Ok(PermissionOverwrite { allow: data.allow, deny: data.deny, kind, }) } } impl Serialize for PermissionOverwrite { fn serialize<S>(&self, serializer: S) -> StdResult<S::Ok, S::Error> where S: Serializer, { let (id, kind) = match self.kind { PermissionOverwriteType::Role(id) => (id.0, 0), PermissionOverwriteType::Member(id) => (id.0, 1), }; let mut state = serializer.serialize_struct("PermissionOverwrite", 4)?; state.serialize_field("allow", &self.allow)?; state.serialize_field("deny", &self.deny)?; state.serialize_field("id", &id)?; state.serialize_field("type", &kind)?; state.end() } } /// The type of edit being made to a Channel's permissions. /// /// This is for use with methods such as [`GuildChannel::create_permission`]. #[derive(Copy, Clone, Debug, Eq, PartialEq)] #[non_exhaustive] pub enum PermissionOverwriteType { /// A member which is having its permission overwrites edited. Member(UserId), /// A role which is having its permission overwrites edited. Role(RoleId), } #[cfg(test)] mod test { #[cfg(all(feature = "model", feature = "utils"))] mod model_utils { use crate::model::prelude::*; fn guild_channel() -> GuildChannel { GuildChannel { id: ChannelId(1), bitrate: None, category_id: None, guild_id: GuildId(2), kind: ChannelType::Text, last_message_id: None, last_pin_timestamp: None, name: "nsfw-stuff".to_string(), permission_overwrites: vec![], position: 0, topic: None, user_limit: None, nsfw: false, slow_mode_rate: Some(0), } } fn private_channel() -> PrivateChannel { PrivateChannel { id: ChannelId(1), last_message_id: None, last_pin_timestamp: None, kind: ChannelType::Private, recipient: User { id: UserId(2), avatar: None, bot: false, discriminator: 1, name: "ab".to_string(), }, } } #[test] fn nsfw_checks() { let mut channel = guild_channel(); assert!(!channel.is_nsfw()); channel.kind = ChannelType::Voice; assert!(!channel.is_nsfw()); channel.kind = ChannelType::Text; channel.name = "nsfw-".to_string(); assert!(!channel.is_nsfw()); channel.name = "nsfw".to_string(); assert!(!channel.is_nsfw()); channel.kind = ChannelType::Voice; assert!(!channel.is_nsfw()); channel.kind = ChannelType::Text; channel.name = "nsf".to_string(); channel.nsfw = true; assert!(channel.is_nsfw()); channel.nsfw = false; assert!(!channel.is_nsfw()); let channel = Channel::Guild(channel); assert!(!channel.is_nsfw()); let private_channel = private_channel(); assert!(!private_channel.is_nsfw()); } } } #[cfg(all(feature = "cache", feature = "model", feature = "utils"))] #[async_trait] impl FromStrAndCache for Channel { type Err = ChannelParseError; async fn from_str<C>(cache: C, s: &str) -> StdResult<Self, Self::Err> where C: AsRef<Cache> + Send + Sync, { match parse_channel(s) { Some(x) => match ChannelId(x).to_channel_cached(&cache).await { Some(channel) => Ok(channel), _ => Err(ChannelParseError::NotPresentInCache), }, _ => Err(ChannelParseError::InvalidChannel), } } }
30.919132
88
0.544782
dda14fb10af027d25086a84a70569316e8f443f7
6,112
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /*! Syntax extension to create floating point literals from hexadecimal strings Once loaded, hexfloat!() is called with a string containing the hexadecimal floating-point literal, and an optional type (f32 or f64). If the type is omitted, the literal is treated the same as a normal unsuffixed literal. # Examples To load the extension and use it: ```rust,ignore #[phase(syntax)] extern crate hexfloat; fn main() { let val = hexfloat!("0x1.ffffb4", f32); } ``` # References * [ExploringBinary: hexadecimal floating point constants] (http://www.exploringbinary.com/hexadecimal-floating-point-constants/) */ #![crate_id = "hexfloat#0.11.0-pre"] #![crate_type = "rlib"] #![crate_type = "dylib"] #![license = "MIT/ASL2"] #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "http://www.rust-lang.org/favicon.ico", html_root_url = "http://static.rust-lang.org/doc/master")] #![deny(deprecated_owned_vector)] #![feature(macro_registrar, managed_boxes)] extern crate syntax; use syntax::ast; use syntax::ast::Name; use syntax::codemap::{Span, mk_sp}; use syntax::ext::base; use syntax::ext::base::{SyntaxExtension, BasicMacroExpander, NormalTT, ExtCtxt, MacExpr}; use syntax::ext::build::AstBuilder; use syntax::parse; use syntax::parse::token; #[macro_registrar] pub fn macro_registrar(register: |Name, SyntaxExtension|) { register(token::intern("hexfloat"), NormalTT(box BasicMacroExpander { expander: expand_syntax_ext, span: None, }, None)); } //Check if the literal is valid (as LLVM expects), //and return a descriptive error if not. fn hex_float_lit_err(s: &str) -> Option<(uint, StrBuf)> { let mut chars = s.chars().peekable(); let mut i = 0; if chars.peek() == Some(&'-') { chars.next(); i+= 1 } if chars.next() != Some('0') { return Some((i, "Expected '0'".to_strbuf())); } i+=1; if chars.next() != Some('x') { return Some((i, "Expected 'x'".to_strbuf())); } i+=1; let mut d_len = 0; for _ in chars.take_while(|c| c.is_digit_radix(16)) { chars.next(); i+=1; d_len += 1;} if chars.next() != Some('.') { return Some((i, "Expected '.'".to_strbuf())); } i+=1; let mut f_len = 0; for _ in chars.take_while(|c| c.is_digit_radix(16)) { chars.next(); i+=1; f_len += 1;} if d_len == 0 && f_len == 0 { return Some((i, "Expected digits before or after decimal \ point".to_strbuf())); } if chars.next() != Some('p') { return Some((i, "Expected 'p'".to_strbuf())); } i+=1; if chars.peek() == Some(&'-') { chars.next(); i+= 1 } let mut e_len = 0; for _ in chars.take_while(|c| c.is_digit()) { chars.next(); i+=1; e_len += 1} if e_len == 0 { return Some((i, "Expected exponent digits".to_strbuf())); } match chars.next() { None => None, Some(_) => Some((i, "Expected end of string".to_strbuf())) } } pub fn expand_syntax_ext(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) -> Box<base::MacResult> { let (expr, ty_lit) = parse_tts(cx, tts); let ty = match ty_lit { None => None, Some(Ident{ident, span}) => match token::get_ident(ident).get() { "f32" => Some(ast::TyF32), "f64" => Some(ast::TyF64), "f128" => Some(ast::TyF128), _ => { cx.span_err(span, "invalid floating point type in hexfloat!"); None } } }; let s = match expr.node { // expression is a literal ast::ExprLit(lit) => match lit.node { // string literal ast::LitStr(ref s, _) => { s.clone() } _ => { cx.span_err(expr.span, "unsupported literal in hexfloat!"); return base::DummyResult::expr(sp); } }, _ => { cx.span_err(expr.span, "non-literal in hexfloat!"); return base::DummyResult::expr(sp); } }; { let err = hex_float_lit_err(s.get()); match err { Some((err_pos, err_str)) => { let pos = expr.span.lo + syntax::codemap::Pos::from_uint(err_pos + 1); let span = syntax::codemap::mk_sp(pos,pos); cx.span_err(span, format!("invalid hex float literal in hexfloat!: {}", err_str)); return base::DummyResult::expr(sp); } _ => () } } let lit = match ty { None => ast::LitFloatUnsuffixed(s), Some (ty) => ast::LitFloat(s, ty) }; MacExpr::new(cx.expr_lit(sp, lit)) } struct Ident { ident: ast::Ident, span: Span } fn parse_tts(cx: &ExtCtxt, tts: &[ast::TokenTree]) -> (@ast::Expr, Option<Ident>) { let p = &mut parse::new_parser_from_tts(cx.parse_sess(), cx.cfg(), tts.iter() .map(|x| (*x).clone()) .collect()); let ex = p.parse_expr(); let id = if p.token == token::EOF { None } else { p.expect(&token::COMMA); let lo = p.span.lo; let ident = p.parse_ident(); let hi = p.last_span.hi; Some(Ident{ident: ident, span: mk_sp(lo, hi)}) }; if p.token != token::EOF { p.unexpected(); } (ex, id) } // FIXME (10872): This is required to prevent an LLVM assert on Windows #[test] fn dummy_test() { }
31.505155
98
0.560373
d7a366798bc98dadc83f387e59416006b58af489
645
#[macro_use] extern crate rustler; #[macro_use] extern crate rustler_codegen; extern crate sled; #[macro_use] extern crate err_derive; use rustler::{Env, Term}; mod atoms; mod config; mod db; mod error; type SledExResult<T> = Result<T, error::SledExError>; rustler_export_nifs! { "Elixir.Sled.Native", [("start_default", 1, db::start_default),("set", 3, db::set),("get", 2, db::get),("del", 2, db::del),("scan", 3, db::scan),("iter_next", 1, db::iter_next)], Some(on_load) } fn on_load<'a>(env: Env<'a>, _info: Term) -> bool { resource_struct_init!(db::DBHandle, env); resource_struct_init!(db::Cursor, env); true }
22.241379
160
0.660465
f859e835425c435f0bb8002ceb60d24f92175503
62,010
//! This file builds up the `ScopeTree`, which describes //! the parent links in the region hierarchy. //! //! For more information about how MIR-based region-checking works, //! see the [rustc guide]. //! //! [rustc guide]: https://rust-lang.github.io/rustc-guide/mir/borrowck.html use crate::ich::{StableHashingContext, NodeIdHashingMode}; use crate::util::nodemap::{FxHashMap, FxHashSet}; use crate::ty; use std::mem; use std::fmt; use rustc_macros::HashStable; use syntax::source_map; use syntax_pos::{Span, DUMMY_SP}; use crate::ty::{DefIdTree, TyCtxt}; use crate::ty::query::Providers; use crate::hir; use crate::hir::Node; use crate::hir::def_id::DefId; use crate::hir::intravisit::{self, Visitor, NestedVisitorMap}; use crate::hir::{Block, Arm, Pat, PatKind, Stmt, Expr, Local}; use rustc_data_structures::indexed_vec::Idx; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableHasherResult}; /// Scope represents a statically-describable scope that can be /// used to bound the lifetime/region for values. /// /// `Node(node_id)`: Any AST node that has any scope at all has the /// `Node(node_id)` scope. Other variants represent special cases not /// immediately derivable from the abstract syntax tree structure. /// /// `DestructionScope(node_id)` represents the scope of destructors /// implicitly-attached to `node_id` that run immediately after the /// expression for `node_id` itself. Not every AST node carries a /// `DestructionScope`, but those that are `terminating_scopes` do; /// see discussion with `ScopeTree`. /// /// `Remainder { block, statement_index }` represents /// the scope of user code running immediately after the initializer /// expression for the indexed statement, until the end of the block. /// /// So: the following code can be broken down into the scopes beneath: /// /// ```text /// let a = f().g( 'b: { let x = d(); let y = d(); x.h(y) } ) ; /// /// +-+ (D12.) /// +-+ (D11.) /// +---------+ (R10.) /// +-+ (D9.) /// +----------+ (M8.) /// +----------------------+ (R7.) /// +-+ (D6.) /// +----------+ (M5.) /// +-----------------------------------+ (M4.) /// +--------------------------------------------------+ (M3.) /// +--+ (M2.) /// +-----------------------------------------------------------+ (M1.) /// /// (M1.): Node scope of the whole `let a = ...;` statement. /// (M2.): Node scope of the `f()` expression. /// (M3.): Node scope of the `f().g(..)` expression. /// (M4.): Node scope of the block labeled `'b:`. /// (M5.): Node scope of the `let x = d();` statement /// (D6.): DestructionScope for temporaries created during M5. /// (R7.): Remainder scope for block `'b:`, stmt 0 (let x = ...). /// (M8.): Node scope of the `let y = d();` statement. /// (D9.): DestructionScope for temporaries created during M8. /// (R10.): Remainder scope for block `'b:`, stmt 1 (let y = ...). /// (D11.): DestructionScope for temporaries and bindings from block `'b:`. /// (D12.): DestructionScope for temporaries created during M1 (e.g., f()). /// ``` /// /// Note that while the above picture shows the destruction scopes /// as following their corresponding node scopes, in the internal /// data structures of the compiler the destruction scopes are /// represented as enclosing parents. This is sound because we use the /// enclosing parent relationship just to ensure that referenced /// values live long enough; phrased another way, the starting point /// of each range is not really the important thing in the above /// picture, but rather the ending point. // // FIXME(pnkfelix): this currently derives `PartialOrd` and `Ord` to // placate the same deriving in `ty::FreeRegion`, but we may want to // actually attach a more meaningful ordering to scopes than the one // generated via deriving here. #[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Copy, RustcEncodable, RustcDecodable, HashStable)] pub struct Scope { pub id: hir::ItemLocalId, pub data: ScopeData, } impl fmt::Debug for Scope { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { match self.data { ScopeData::Node => write!(fmt, "Node({:?})", self.id), ScopeData::CallSite => write!(fmt, "CallSite({:?})", self.id), ScopeData::Arguments => write!(fmt, "Arguments({:?})", self.id), ScopeData::Destruction => write!(fmt, "Destruction({:?})", self.id), ScopeData::Remainder(fsi) => write!( fmt, "Remainder {{ block: {:?}, first_statement_index: {}}}", self.id, fsi.as_u32(), ), } } } #[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Debug, Copy, RustcEncodable, RustcDecodable, HashStable)] pub enum ScopeData { Node, /// Scope of the call-site for a function or closure /// (outlives the arguments as well as the body). CallSite, /// Scope of arguments passed to a function or closure /// (they outlive its body). Arguments, /// Scope of destructors for temporaries of node-id. Destruction, /// Scope following a `let id = expr;` binding in a block. Remainder(FirstStatementIndex) } newtype_index! { /// Represents a subscope of `block` for a binding that is introduced /// by `block.stmts[first_statement_index]`. Such subscopes represent /// a suffix of the block. Note that each subscope does not include /// the initializer expression, if any, for the statement indexed by /// `first_statement_index`. /// /// For example, given `{ let (a, b) = EXPR_1; let c = EXPR_2; ... }`: /// /// * The subscope with `first_statement_index == 0` is scope of both /// `a` and `b`; it does not include EXPR_1, but does include /// everything after that first `let`. (If you want a scope that /// includes EXPR_1 as well, then do not use `Scope::Remainder`, /// but instead another `Scope` that encompasses the whole block, /// e.g., `Scope::Node`. /// /// * The subscope with `first_statement_index == 1` is scope of `c`, /// and thus does not include EXPR_2, but covers the `...`. pub struct FirstStatementIndex { derive [HashStable] } } // compilation error if size of `ScopeData` is not the same as a `u32` static_assert_size!(ScopeData, 4); impl Scope { /// Returns a item-local ID associated with this scope. /// /// N.B., likely to be replaced as API is refined; e.g., pnkfelix /// anticipates `fn entry_node_id` and `fn each_exit_node_id`. pub fn item_local_id(&self) -> hir::ItemLocalId { self.id } pub fn hir_id(&self, scope_tree: &ScopeTree) -> hir::HirId { match scope_tree.root_body { Some(hir_id) => { hir::HirId { owner: hir_id.owner, local_id: self.item_local_id() } } None => hir::DUMMY_HIR_ID } } /// Returns the span of this `Scope`. Note that in general the /// returned span may not correspond to the span of any `NodeId` in /// the AST. pub fn span(&self, tcx: TyCtxt<'_>, scope_tree: &ScopeTree) -> Span { let hir_id = self.hir_id(scope_tree); if hir_id == hir::DUMMY_HIR_ID { return DUMMY_SP; } let span = tcx.hir().span(hir_id); if let ScopeData::Remainder(first_statement_index) = self.data { if let Node::Block(ref blk) = tcx.hir().get(hir_id) { // Want span for scope starting after the // indexed statement and ending at end of // `blk`; reuse span of `blk` and shift `lo` // forward to end of indexed statement. // // (This is the special case aluded to in the // doc-comment for this method) let stmt_span = blk.stmts[first_statement_index.index()].span; // To avoid issues with macro-generated spans, the span // of the statement must be nested in that of the block. if span.lo() <= stmt_span.lo() && stmt_span.lo() <= span.hi() { return Span::new(stmt_span.lo(), span.hi(), span.ctxt()); } } } span } } pub type ScopeDepth = u32; /// The region scope tree encodes information about region relationships. #[derive(Default, Debug)] pub struct ScopeTree { /// If not empty, this body is the root of this region hierarchy. root_body: Option<hir::HirId>, /// The parent of the root body owner, if the latter is an /// an associated const or method, as impls/traits can also /// have lifetime parameters free in this body. root_parent: Option<hir::HirId>, /// `parent_map` maps from a scope ID to the enclosing scope id; /// this is usually corresponding to the lexical nesting, though /// in the case of closures the parent scope is the innermost /// conditional expression or repeating block. (Note that the /// enclosing scope ID for the block associated with a closure is /// the closure itself.) parent_map: FxHashMap<Scope, (Scope, ScopeDepth)>, /// `var_map` maps from a variable or binding ID to the block in /// which that variable is declared. var_map: FxHashMap<hir::ItemLocalId, Scope>, /// maps from a `NodeId` to the associated destruction scope (if any) destruction_scopes: FxHashMap<hir::ItemLocalId, Scope>, /// `rvalue_scopes` includes entries for those expressions whose cleanup scope is /// larger than the default. The map goes from the expression id /// to the cleanup scope id. For rvalues not present in this /// table, the appropriate cleanup scope is the innermost /// enclosing statement, conditional expression, or repeating /// block (see `terminating_scopes`). /// In constants, None is used to indicate that certain expressions /// escape into 'static and should have no local cleanup scope. rvalue_scopes: FxHashMap<hir::ItemLocalId, Option<Scope>>, /// Encodes the hierarchy of fn bodies. Every fn body (including /// closures) forms its own distinct region hierarchy, rooted in /// the block that is the fn body. This map points from the ID of /// that root block to the ID of the root block for the enclosing /// fn, if any. Thus the map structures the fn bodies into a /// hierarchy based on their lexical mapping. This is used to /// handle the relationships between regions in a fn and in a /// closure defined by that fn. See the "Modeling closures" /// section of the README in infer::region_constraints for /// more details. closure_tree: FxHashMap<hir::ItemLocalId, hir::ItemLocalId>, /// If there are any `yield` nested within a scope, this map /// stores the `Span` of the last one and its index in the /// postorder of the Visitor traversal on the HIR. /// /// HIR Visitor postorder indexes might seem like a peculiar /// thing to care about. but it turns out that HIR bindings /// and the temporary results of HIR expressions are never /// storage-live at the end of HIR nodes with postorder indexes /// lower than theirs, and therefore don't need to be suspended /// at yield-points at these indexes. /// /// For an example, suppose we have some code such as: /// ```rust,ignore (example) /// foo(f(), yield y, bar(g())) /// ``` /// /// With the HIR tree (calls numbered for expository purposes) /// ``` /// Call#0(foo, [Call#1(f), Yield(y), Call#2(bar, Call#3(g))]) /// ``` /// /// Obviously, the result of `f()` was created before the yield /// (and therefore needs to be kept valid over the yield) while /// the result of `g()` occurs after the yield (and therefore /// doesn't). If we want to infer that, we can look at the /// postorder traversal: /// ```plain,ignore /// `foo` `f` Call#1 `y` Yield `bar` `g` Call#3 Call#2 Call#0 /// ``` /// /// In which we can easily see that `Call#1` occurs before the yield, /// and `Call#3` after it. /// /// To see that this method works, consider: /// /// Let `D` be our binding/temporary and `U` be our other HIR node, with /// `HIR-postorder(U) < HIR-postorder(D)` (in our example, U would be /// the yield and D would be one of the calls). Let's show that /// `D` is storage-dead at `U`. /// /// Remember that storage-live/storage-dead refers to the state of /// the *storage*, and does not consider moves/drop flags. /// /// Then: /// 1. From the ordering guarantee of HIR visitors (see /// `rustc::hir::intravisit`), `D` does not dominate `U`. /// 2. Therefore, `D` is *potentially* storage-dead at `U` (because /// we might visit `U` without ever getting to `D`). /// 3. However, we guarantee that at each HIR point, each /// binding/temporary is always either always storage-live /// or always storage-dead. This is what is being guaranteed /// by `terminating_scopes` including all blocks where the /// count of executions is not guaranteed. /// 4. By `2.` and `3.`, `D` is *statically* storage-dead at `U`, /// QED. /// /// I don't think this property relies on `3.` in an essential way - it /// is probably still correct even if we have "unrestricted" terminating /// scopes. However, why use the complicated proof when a simple one /// works? /// /// A subtle thing: `box` expressions, such as `box (&x, yield 2, &y)`. It /// might seem that a `box` expression creates a `Box<T>` temporary /// when it *starts* executing, at `HIR-preorder(BOX-EXPR)`. That might /// be true in the MIR desugaring, but it is not important in the semantics. /// /// The reason is that semantically, until the `box` expression returns, /// the values are still owned by their containing expressions. So /// we'll see that `&x`. yield_in_scope: FxHashMap<Scope, YieldData>, /// The number of visit_expr and visit_pat calls done in the body. /// Used to sanity check visit_expr/visit_pat call count when /// calculating generator interiors. body_expr_count: FxHashMap<hir::BodyId, usize>, } #[derive(Debug, Copy, Clone, RustcEncodable, RustcDecodable, HashStable)] pub struct YieldData { /// `Span` of the yield. pub span: Span, /// The number of expressions and patterns appearing before the `yield` in the body + 1. pub expr_and_pat_count: usize, pub source: hir::YieldSource, } #[derive(Debug, Copy, Clone)] pub struct Context { /// the root of the current region tree. This is typically the id /// of the innermost fn body. Each fn forms its own disjoint tree /// in the region hierarchy. These fn bodies are themselves /// arranged into a tree. See the "Modeling closures" section of /// the README in infer::region_constraints for more /// details. root_id: Option<hir::ItemLocalId>, /// The scope that contains any new variables declared, plus its depth in /// the scope tree. var_parent: Option<(Scope, ScopeDepth)>, /// Region parent of expressions, etc., plus its depth in the scope tree. parent: Option<(Scope, ScopeDepth)>, } struct RegionResolutionVisitor<'tcx> { tcx: TyCtxt<'tcx>, // The number of expressions and patterns visited in the current body expr_and_pat_count: usize, // When this is `true`, we record the `Scopes` we encounter // when processing a Yield expression. This allows us to fix // up their indices. pessimistic_yield: bool, // Stores scopes when pessimistic_yield is true. fixup_scopes: Vec<Scope>, // Generated scope tree: scope_tree: ScopeTree, cx: Context, /// `terminating_scopes` is a set containing the ids of each /// statement, or conditional/repeating expression. These scopes /// are calling "terminating scopes" because, when attempting to /// find the scope of a temporary, by default we search up the /// enclosing scopes until we encounter the terminating scope. A /// conditional/repeating expression is one which is not /// guaranteed to execute exactly once upon entering the parent /// scope. This could be because the expression only executes /// conditionally, such as the expression `b` in `a && b`, or /// because the expression may execute many times, such as a loop /// body. The reason that we distinguish such expressions is that, /// upon exiting the parent scope, we cannot statically know how /// many times the expression executed, and thus if the expression /// creates temporaries we cannot know statically how many such /// temporaries we would have to cleanup. Therefore, we ensure that /// the temporaries never outlast the conditional/repeating /// expression, preventing the need for dynamic checks and/or /// arbitrary amounts of stack space. Terminating scopes end /// up being contained in a DestructionScope that contains the /// destructor's execution. terminating_scopes: FxHashSet<hir::ItemLocalId>, } struct ExprLocatorVisitor { hir_id: hir::HirId, result: Option<usize>, expr_and_pat_count: usize, } // This visitor has to have the same visit_expr calls as RegionResolutionVisitor // since `expr_count` is compared against the results there. impl<'tcx> Visitor<'tcx> for ExprLocatorVisitor { fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { NestedVisitorMap::None } fn visit_pat(&mut self, pat: &'tcx Pat) { intravisit::walk_pat(self, pat); self.expr_and_pat_count += 1; if pat.hir_id == self.hir_id { self.result = Some(self.expr_and_pat_count); } } fn visit_expr(&mut self, expr: &'tcx Expr) { debug!("ExprLocatorVisitor - pre-increment {} expr = {:?}", self.expr_and_pat_count, expr); intravisit::walk_expr(self, expr); self.expr_and_pat_count += 1; debug!("ExprLocatorVisitor - post-increment {} expr = {:?}", self.expr_and_pat_count, expr); if expr.hir_id == self.hir_id { self.result = Some(self.expr_and_pat_count); } } } impl<'tcx> ScopeTree { pub fn record_scope_parent(&mut self, child: Scope, parent: Option<(Scope, ScopeDepth)>) { debug!("{:?}.parent = {:?}", child, parent); if let Some(p) = parent { let prev = self.parent_map.insert(child, p); assert!(prev.is_none()); } // record the destruction scopes for later so we can query them if let ScopeData::Destruction = child.data { self.destruction_scopes.insert(child.item_local_id(), child); } } pub fn each_encl_scope<E>(&self, mut e: E) where E: FnMut(Scope, Scope) { for (&child, &parent) in &self.parent_map { e(child, parent.0) } } pub fn each_var_scope<E>(&self, mut e: E) where E: FnMut(&hir::ItemLocalId, Scope) { for (child, &parent) in self.var_map.iter() { e(child, parent) } } pub fn opt_destruction_scope(&self, n: hir::ItemLocalId) -> Option<Scope> { self.destruction_scopes.get(&n).cloned() } /// Records that `sub_closure` is defined within `sup_closure`. These ids /// should be the ID of the block that is the fn body, which is /// also the root of the region hierarchy for that fn. fn record_closure_parent(&mut self, sub_closure: hir::ItemLocalId, sup_closure: hir::ItemLocalId) { debug!("record_closure_parent(sub_closure={:?}, sup_closure={:?})", sub_closure, sup_closure); assert!(sub_closure != sup_closure); let previous = self.closure_tree.insert(sub_closure, sup_closure); assert!(previous.is_none()); } fn record_var_scope(&mut self, var: hir::ItemLocalId, lifetime: Scope) { debug!("record_var_scope(sub={:?}, sup={:?})", var, lifetime); assert!(var != lifetime.item_local_id()); self.var_map.insert(var, lifetime); } fn record_rvalue_scope(&mut self, var: hir::ItemLocalId, lifetime: Option<Scope>) { debug!("record_rvalue_scope(sub={:?}, sup={:?})", var, lifetime); if let Some(lifetime) = lifetime { assert!(var != lifetime.item_local_id()); } self.rvalue_scopes.insert(var, lifetime); } pub fn opt_encl_scope(&self, id: Scope) -> Option<Scope> { //! Returns the narrowest scope that encloses `id`, if any. self.parent_map.get(&id).cloned().map(|(p, _)| p) } #[allow(dead_code)] // used in cfg pub fn encl_scope(&self, id: Scope) -> Scope { //! Returns the narrowest scope that encloses `id`, if any. self.opt_encl_scope(id).unwrap() } /// Returns the lifetime of the local variable `var_id` pub fn var_scope(&self, var_id: hir::ItemLocalId) -> Scope { self.var_map.get(&var_id).cloned().unwrap_or_else(|| bug!("no enclosing scope for id {:?}", var_id)) } pub fn temporary_scope(&self, expr_id: hir::ItemLocalId) -> Option<Scope> { //! Returns the scope when temp created by expr_id will be cleaned up // check for a designated rvalue scope if let Some(&s) = self.rvalue_scopes.get(&expr_id) { debug!("temporary_scope({:?}) = {:?} [custom]", expr_id, s); return s; } // else, locate the innermost terminating scope // if there's one. Static items, for instance, won't // have an enclosing scope, hence no scope will be // returned. let mut id = Scope { id: expr_id, data: ScopeData::Node }; while let Some(&(p, _)) = self.parent_map.get(&id) { match p.data { ScopeData::Destruction => { debug!("temporary_scope({:?}) = {:?} [enclosing]", expr_id, id); return Some(id); } _ => id = p } } debug!("temporary_scope({:?}) = None", expr_id); return None; } pub fn var_region(&self, id: hir::ItemLocalId) -> ty::RegionKind { //! Returns the lifetime of the variable `id`. let scope = ty::ReScope(self.var_scope(id)); debug!("var_region({:?}) = {:?}", id, scope); scope } pub fn scopes_intersect(&self, scope1: Scope, scope2: Scope) -> bool { self.is_subscope_of(scope1, scope2) || self.is_subscope_of(scope2, scope1) } /// Returns `true` if `subscope` is equal to or is lexically nested inside `superscope`, and /// `false` otherwise. pub fn is_subscope_of(&self, subscope: Scope, superscope: Scope) -> bool { let mut s = subscope; debug!("is_subscope_of({:?}, {:?})", subscope, superscope); while superscope != s { match self.opt_encl_scope(s) { None => { debug!("is_subscope_of({:?}, {:?}, s={:?})=false", subscope, superscope, s); return false; } Some(scope) => s = scope } } debug!("is_subscope_of({:?}, {:?})=true", subscope, superscope); return true; } /// Returns the ID of the innermost containing body pub fn containing_body(&self, mut scope: Scope) -> Option<hir::ItemLocalId> { loop { if let ScopeData::CallSite = scope.data { return Some(scope.item_local_id()); } scope = self.opt_encl_scope(scope)?; } } /// Finds the nearest common ancestor of two scopes. That is, finds the /// smallest scope which is greater than or equal to both `scope_a` and /// `scope_b`. pub fn nearest_common_ancestor(&self, scope_a: Scope, scope_b: Scope) -> Scope { if scope_a == scope_b { return scope_a; } let mut a = scope_a; let mut b = scope_b; // Get the depth of each scope's parent. If either scope has no parent, // it must be the root, which means we can stop immediately because the // root must be the nearest common ancestor. (In practice, this is // moderately common.) let (parent_a, parent_a_depth) = match self.parent_map.get(&a) { Some(pd) => *pd, None => return a, }; let (parent_b, parent_b_depth) = match self.parent_map.get(&b) { Some(pd) => *pd, None => return b, }; if parent_a_depth > parent_b_depth { // `a` is lower than `b`. Move `a` up until it's at the same depth // as `b`. The first move up is trivial because we already found // `parent_a` above; the loop does the remaining N-1 moves. a = parent_a; for _ in 0..(parent_a_depth - parent_b_depth - 1) { a = self.parent_map.get(&a).unwrap().0; } } else if parent_b_depth > parent_a_depth { // `b` is lower than `a`. b = parent_b; for _ in 0..(parent_b_depth - parent_a_depth - 1) { b = self.parent_map.get(&b).unwrap().0; } } else { // Both scopes are at the same depth, and we know they're not equal // because that case was tested for at the top of this function. So // we can trivially move them both up one level now. assert!(parent_a_depth != 0); a = parent_a; b = parent_b; } // Now both scopes are at the same level. We move upwards in lockstep // until they match. In practice, this loop is almost always executed // zero times because `a` is almost always a direct ancestor of `b` or // vice versa. while a != b { a = self.parent_map.get(&a).unwrap().0; b = self.parent_map.get(&b).unwrap().0; }; a } /// Assuming that the provided region was defined within this `ScopeTree`, /// returns the outermost `Scope` that the region outlives. pub fn early_free_scope(&self, tcx: TyCtxt<'tcx>, br: &ty::EarlyBoundRegion) -> Scope { let param_owner = tcx.parent(br.def_id).unwrap(); let param_owner_id = tcx.hir().as_local_hir_id(param_owner).unwrap(); let scope = tcx.hir().maybe_body_owned_by(param_owner_id).map(|body_id| { tcx.hir().body(body_id).value.hir_id.local_id }).unwrap_or_else(|| { // The lifetime was defined on node that doesn't own a body, // which in practice can only mean a trait or an impl, that // is the parent of a method, and that is enforced below. if Some(param_owner_id) != self.root_parent { tcx.sess.delay_span_bug( DUMMY_SP, &format!("free_scope: {:?} not recognized by the \ region scope tree for {:?} / {:?}", param_owner, self.root_parent.map(|id| tcx.hir().local_def_id_from_hir_id(id)), self.root_body.map(|hir_id| DefId::local(hir_id.owner)))); } // The trait/impl lifetime is in scope for the method's body. self.root_body.unwrap().local_id }); Scope { id: scope, data: ScopeData::CallSite } } /// Assuming that the provided region was defined within this `ScopeTree`, /// returns the outermost `Scope` that the region outlives. pub fn free_scope(&self, tcx: TyCtxt<'tcx>, fr: &ty::FreeRegion) -> Scope { let param_owner = match fr.bound_region { ty::BoundRegion::BrNamed(def_id, _) => { tcx.parent(def_id).unwrap() } _ => fr.scope }; // Ensure that the named late-bound lifetimes were defined // on the same function that they ended up being freed in. assert_eq!(param_owner, fr.scope); let param_owner_id = tcx.hir().as_local_hir_id(param_owner).unwrap(); let body_id = tcx.hir().body_owned_by(param_owner_id); Scope { id: tcx.hir().body(body_id).value.hir_id.local_id, data: ScopeData::CallSite } } /// Checks whether the given scope contains a `yield`. If so, /// returns `Some((span, expr_count))` with the span of a yield we found and /// the number of expressions and patterns appearing before the `yield` in the body + 1. /// If there a are multiple yields in a scope, the one with the highest number is returned. pub fn yield_in_scope(&self, scope: Scope) -> Option<YieldData> { self.yield_in_scope.get(&scope).cloned() } /// Checks whether the given scope contains a `yield` and if that yield could execute /// after `expr`. If so, it returns the span of that `yield`. /// `scope` must be inside the body. pub fn yield_in_scope_for_expr(&self, scope: Scope, expr_hir_id: hir::HirId, body: &'tcx hir::Body) -> Option<Span> { self.yield_in_scope(scope).and_then(|YieldData { span, expr_and_pat_count, .. }| { let mut visitor = ExprLocatorVisitor { hir_id: expr_hir_id, result: None, expr_and_pat_count: 0, }; visitor.visit_body(body); if expr_and_pat_count >= visitor.result.unwrap() { Some(span) } else { None } }) } /// Gives the number of expressions visited in a body. /// Used to sanity check visit_expr call count when /// calculating generator interiors. pub fn body_expr_count(&self, body_id: hir::BodyId) -> Option<usize> { self.body_expr_count.get(&body_id).map(|r| *r) } } /// Records the lifetime of a local variable as `cx.var_parent` fn record_var_lifetime( visitor: &mut RegionResolutionVisitor<'_>, var_id: hir::ItemLocalId, _sp: Span, ) { match visitor.cx.var_parent { None => { // this can happen in extern fn declarations like // // extern fn isalnum(c: c_int) -> c_int } Some((parent_scope, _)) => visitor.scope_tree.record_var_scope(var_id, parent_scope), } } fn resolve_block<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, blk: &'tcx hir::Block) { debug!("resolve_block(blk.hir_id={:?})", blk.hir_id); let prev_cx = visitor.cx; // We treat the tail expression in the block (if any) somewhat // differently from the statements. The issue has to do with // temporary lifetimes. Consider the following: // // quux({ // let inner = ... (&bar()) ...; // // (... (&foo()) ...) // (the tail expression) // }, other_argument()); // // Each of the statements within the block is a terminating // scope, and thus a temporary (e.g., the result of calling // `bar()` in the initializer expression for `let inner = ...;`) // will be cleaned up immediately after its corresponding // statement (i.e., `let inner = ...;`) executes. // // On the other hand, temporaries associated with evaluating the // tail expression for the block are assigned lifetimes so that // they will be cleaned up as part of the terminating scope // *surrounding* the block expression. Here, the terminating // scope for the block expression is the `quux(..)` call; so // those temporaries will only be cleaned up *after* both // `other_argument()` has run and also the call to `quux(..)` // itself has returned. visitor.enter_node_scope_with_dtor(blk.hir_id.local_id); visitor.cx.var_parent = visitor.cx.parent; { // This block should be kept approximately in sync with // `intravisit::walk_block`. (We manually walk the block, rather // than call `walk_block`, in order to maintain precise // index information.) for (i, statement) in blk.stmts.iter().enumerate() { match statement.node { hir::StmtKind::Local(..) | hir::StmtKind::Item(..) => { // Each declaration introduces a subscope for bindings // introduced by the declaration; this subscope covers a // suffix of the block. Each subscope in a block has the // previous subscope in the block as a parent, except for // the first such subscope, which has the block itself as a // parent. visitor.enter_scope( Scope { id: blk.hir_id.local_id, data: ScopeData::Remainder(FirstStatementIndex::new(i)) } ); visitor.cx.var_parent = visitor.cx.parent; } hir::StmtKind::Expr(..) | hir::StmtKind::Semi(..) => {} } visitor.visit_stmt(statement) } walk_list!(visitor, visit_expr, &blk.expr); } visitor.cx = prev_cx; } fn resolve_arm<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, arm: &'tcx hir::Arm) { let prev_cx = visitor.cx; visitor.enter_scope( Scope { id: arm.hir_id.local_id, data: ScopeData::Node, } ); visitor.cx.var_parent = visitor.cx.parent; visitor.terminating_scopes.insert(arm.body.hir_id.local_id); if let Some(hir::Guard::If(ref expr)) = arm.guard { visitor.terminating_scopes.insert(expr.hir_id.local_id); } intravisit::walk_arm(visitor, arm); visitor.cx = prev_cx; } fn resolve_pat<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, pat: &'tcx hir::Pat) { visitor.record_child_scope(Scope { id: pat.hir_id.local_id, data: ScopeData::Node }); // If this is a binding then record the lifetime of that binding. if let PatKind::Binding(..) = pat.node { record_var_lifetime(visitor, pat.hir_id.local_id, pat.span); } debug!("resolve_pat - pre-increment {} pat = {:?}", visitor.expr_and_pat_count, pat); intravisit::walk_pat(visitor, pat); visitor.expr_and_pat_count += 1; debug!("resolve_pat - post-increment {} pat = {:?}", visitor.expr_and_pat_count, pat); } fn resolve_stmt<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, stmt: &'tcx hir::Stmt) { let stmt_id = stmt.hir_id.local_id; debug!("resolve_stmt(stmt.id={:?})", stmt_id); // Every statement will clean up the temporaries created during // execution of that statement. Therefore each statement has an // associated destruction scope that represents the scope of the // statement plus its destructors, and thus the scope for which // regions referenced by the destructors need to survive. visitor.terminating_scopes.insert(stmt_id); let prev_parent = visitor.cx.parent; visitor.enter_node_scope_with_dtor(stmt_id); intravisit::walk_stmt(visitor, stmt); visitor.cx.parent = prev_parent; } fn resolve_expr<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, expr: &'tcx hir::Expr) { debug!("resolve_expr - pre-increment {} expr = {:?}", visitor.expr_and_pat_count, expr); let prev_cx = visitor.cx; visitor.enter_node_scope_with_dtor(expr.hir_id.local_id); { let terminating_scopes = &mut visitor.terminating_scopes; let mut terminating = |id: hir::ItemLocalId| { terminating_scopes.insert(id); }; match expr.node { // Conditional or repeating scopes are always terminating // scopes, meaning that temporaries cannot outlive them. // This ensures fixed size stacks. hir::ExprKind::Binary( source_map::Spanned { node: hir::BinOpKind::And, .. }, _, ref r) | hir::ExprKind::Binary( source_map::Spanned { node: hir::BinOpKind::Or, .. }, _, ref r) => { // For shortcircuiting operators, mark the RHS as a terminating // scope since it only executes conditionally. terminating(r.hir_id.local_id); } hir::ExprKind::Loop(ref body, _, _) => { terminating(body.hir_id.local_id); } hir::ExprKind::While(ref expr, ref body, _) => { terminating(expr.hir_id.local_id); terminating(body.hir_id.local_id); } hir::ExprKind::DropTemps(ref expr) => { // `DropTemps(expr)` does not denote a conditional scope. // Rather, we want to achieve the same behavior as `{ let _t = expr; _t }`. terminating(expr.hir_id.local_id); } hir::ExprKind::AssignOp(..) | hir::ExprKind::Index(..) | hir::ExprKind::Unary(..) | hir::ExprKind::Call(..) | hir::ExprKind::MethodCall(..) => { // FIXME(https://github.com/rust-lang/rfcs/issues/811) Nested method calls // // The lifetimes for a call or method call look as follows: // // call.id // - arg0.id // - ... // - argN.id // - call.callee_id // // The idea is that call.callee_id represents *the time when // the invoked function is actually running* and call.id // represents *the time to prepare the arguments and make the // call*. See the section "Borrows in Calls" borrowck/README.md // for an extended explanation of why this distinction is // important. // // record_superlifetime(new_cx, expr.callee_id); } _ => {} } } let prev_pessimistic = visitor.pessimistic_yield; // Ordinarily, we can rely on the visit order of HIR intravisit // to correspond to the actual execution order of statements. // However, there's a weird corner case with compund assignment // operators (e.g. `a += b`). The evaluation order depends on whether // or not the operator is overloaded (e.g. whether or not a trait // like AddAssign is implemented). // For primitive types (which, despite having a trait impl, don't actually // end up calling it), the evluation order is right-to-left. For example, // the following code snippet: // // let y = &mut 0; // *{println!("LHS!"); y} += {println!("RHS!"); 1}; // // will print: // // RHS! // LHS! // // However, if the operator is used on a non-primitive type, // the evaluation order will be left-to-right, since the operator // actually get desugared to a method call. For example, this // nearly identical code snippet: // // let y = &mut String::new(); // *{println!("LHS String"); y} += {println!("RHS String"); "hi"}; // // will print: // LHS String // RHS String // // To determine the actual execution order, we need to perform // trait resolution. Unfortunately, we need to be able to compute // yield_in_scope before type checking is even done, as it gets // used by AST borrowcheck. // // Fortunately, we don't need to know the actual execution order. // It suffices to know the 'worst case' order with respect to yields. // Specifically, we need to know the highest 'expr_and_pat_count' // that we could assign to the yield expression. To do this, // we pick the greater of the two values from the left-hand // and right-hand expressions. This makes us overly conservative // about what types could possibly live across yield points, // but we will never fail to detect that a type does actually // live across a yield point. The latter part is critical - // we're already overly conservative about what types will live // across yield points, as the generated MIR will determine // when things are actually live. However, for typecheck to work // properly, we can't miss any types. match expr.node { // Manually recurse over closures, because they are the only // case of nested bodies that share the parent environment. hir::ExprKind::Closure(.., body, _, _) => { let body = visitor.tcx.hir().body(body); visitor.visit_body(body); }, hir::ExprKind::AssignOp(_, ref left_expr, ref right_expr) => { debug!("resolve_expr - enabling pessimistic_yield, was previously {}", prev_pessimistic); let start_point = visitor.fixup_scopes.len(); visitor.pessimistic_yield = true; // If the actual execution order turns out to be right-to-left, // then we're fine. However, if the actual execution order is left-to-right, // then we'll assign too low a count to any `yield` expressions // we encounter in 'right_expression' - they should really occur after all of the // expressions in 'left_expression'. visitor.visit_expr(&right_expr); visitor.pessimistic_yield = prev_pessimistic; debug!("resolve_expr - restoring pessimistic_yield to {}", prev_pessimistic); visitor.visit_expr(&left_expr); debug!("resolve_expr - fixing up counts to {}", visitor.expr_and_pat_count); // Remove and process any scopes pushed by the visitor let target_scopes = visitor.fixup_scopes.drain(start_point..); for scope in target_scopes { let mut yield_data = visitor.scope_tree.yield_in_scope.get_mut(&scope).unwrap(); let count = yield_data.expr_and_pat_count; let span = yield_data.span; // expr_and_pat_count never decreases. Since we recorded counts in yield_in_scope // before walking the left-hand side, it should be impossible for the recorded // count to be greater than the left-hand side count. if count > visitor.expr_and_pat_count { bug!("Encountered greater count {} at span {:?} - expected no greater than {}", count, span, visitor.expr_and_pat_count); } let new_count = visitor.expr_and_pat_count; debug!("resolve_expr - increasing count for scope {:?} from {} to {} at span {:?}", scope, count, new_count, span); yield_data.expr_and_pat_count = new_count; } } _ => intravisit::walk_expr(visitor, expr) } visitor.expr_and_pat_count += 1; debug!("resolve_expr post-increment {}, expr = {:?}", visitor.expr_and_pat_count, expr); if let hir::ExprKind::Yield(_, source) = &expr.node { // Mark this expr's scope and all parent scopes as containing `yield`. let mut scope = Scope { id: expr.hir_id.local_id, data: ScopeData::Node }; loop { let data = YieldData { span: expr.span, expr_and_pat_count: visitor.expr_and_pat_count, source: *source, }; visitor.scope_tree.yield_in_scope.insert(scope, data); if visitor.pessimistic_yield { debug!("resolve_expr in pessimistic_yield - marking scope {:?} for fixup", scope); visitor.fixup_scopes.push(scope); } // Keep traversing up while we can. match visitor.scope_tree.parent_map.get(&scope) { // Don't cross from closure bodies to their parent. Some(&(superscope, _)) => match superscope.data { ScopeData::CallSite => break, _ => scope = superscope }, None => break } } } visitor.cx = prev_cx; } fn resolve_local<'tcx>( visitor: &mut RegionResolutionVisitor<'tcx>, pat: Option<&'tcx hir::Pat>, init: Option<&'tcx hir::Expr>, ) { debug!("resolve_local(pat={:?}, init={:?})", pat, init); let blk_scope = visitor.cx.var_parent.map(|(p, _)| p); // As an exception to the normal rules governing temporary // lifetimes, initializers in a let have a temporary lifetime // of the enclosing block. This means that e.g., a program // like the following is legal: // // let ref x = HashMap::new(); // // Because the hash map will be freed in the enclosing block. // // We express the rules more formally based on 3 grammars (defined // fully in the helpers below that implement them): // // 1. `E&`, which matches expressions like `&<rvalue>` that // own a pointer into the stack. // // 2. `P&`, which matches patterns like `ref x` or `(ref x, ref // y)` that produce ref bindings into the value they are // matched against or something (at least partially) owned by // the value they are matched against. (By partially owned, // I mean that creating a binding into a ref-counted or managed value // would still count.) // // 3. `ET`, which matches both rvalues like `foo()` as well as places // based on rvalues like `foo().x[2].y`. // // A subexpression `<rvalue>` that appears in a let initializer // `let pat [: ty] = expr` has an extended temporary lifetime if // any of the following conditions are met: // // A. `pat` matches `P&` and `expr` matches `ET` // (covers cases where `pat` creates ref bindings into an rvalue // produced by `expr`) // B. `ty` is a borrowed pointer and `expr` matches `ET` // (covers cases where coercion creates a borrow) // C. `expr` matches `E&` // (covers cases `expr` borrows an rvalue that is then assigned // to memory (at least partially) owned by the binding) // // Here are some examples hopefully giving an intuition where each // rule comes into play and why: // // Rule A. `let (ref x, ref y) = (foo().x, 44)`. The rvalue `(22, 44)` // would have an extended lifetime, but not `foo()`. // // Rule B. `let x = &foo().x`. The rvalue ``foo()` would have extended // lifetime. // // In some cases, multiple rules may apply (though not to the same // rvalue). For example: // // let ref x = [&a(), &b()]; // // Here, the expression `[...]` has an extended lifetime due to rule // A, but the inner rvalues `a()` and `b()` have an extended lifetime // due to rule C. if let Some(expr) = init { record_rvalue_scope_if_borrow_expr(visitor, &expr, blk_scope); if let Some(pat) = pat { if is_binding_pat(pat) { record_rvalue_scope(visitor, &expr, blk_scope); } } } // Make sure we visit the initializer first, so expr_and_pat_count remains correct if let Some(expr) = init { visitor.visit_expr(expr); } if let Some(pat) = pat { visitor.visit_pat(pat); } /// Returns `true` if `pat` match the `P&` non-terminal. /// /// P& = ref X /// | StructName { ..., P&, ... } /// | VariantName(..., P&, ...) /// | [ ..., P&, ... ] /// | ( ..., P&, ... ) /// | box P& fn is_binding_pat(pat: &hir::Pat) -> bool { // Note that the code below looks for *explicit* refs only, that is, it won't // know about *implicit* refs as introduced in #42640. // // This is not a problem. For example, consider // // let (ref x, ref y) = (Foo { .. }, Bar { .. }); // // Due to the explicit refs on the left hand side, the below code would signal // that the temporary value on the right hand side should live until the end of // the enclosing block (as opposed to being dropped after the let is complete). // // To create an implicit ref, however, you must have a borrowed value on the RHS // already, as in this example (which won't compile before #42640): // // let Foo { x, .. } = &Foo { x: ..., ... }; // // in place of // // let Foo { ref x, .. } = Foo { ... }; // // In the former case (the implicit ref version), the temporary is created by the // & expression, and its lifetime would be extended to the end of the block (due // to a different rule, not the below code). match pat.node { PatKind::Binding(hir::BindingAnnotation::Ref, ..) | PatKind::Binding(hir::BindingAnnotation::RefMut, ..) => true, PatKind::Struct(_, ref field_pats, _) => { field_pats.iter().any(|fp| is_binding_pat(&fp.node.pat)) } PatKind::Slice(ref pats1, ref pats2, ref pats3) => { pats1.iter().any(|p| is_binding_pat(&p)) || pats2.iter().any(|p| is_binding_pat(&p)) || pats3.iter().any(|p| is_binding_pat(&p)) } PatKind::TupleStruct(_, ref subpats, _) | PatKind::Tuple(ref subpats, _) => { subpats.iter().any(|p| is_binding_pat(&p)) } PatKind::Box(ref subpat) => { is_binding_pat(&subpat) } _ => false, } } /// If `expr` matches the `E&` grammar, then records an extended rvalue scope as appropriate: /// /// E& = & ET /// | StructName { ..., f: E&, ... } /// | [ ..., E&, ... ] /// | ( ..., E&, ... ) /// | {...; E&} /// | box E& /// | E& as ... /// | ( E& ) fn record_rvalue_scope_if_borrow_expr<'tcx>( visitor: &mut RegionResolutionVisitor<'tcx>, expr: &hir::Expr, blk_id: Option<Scope>, ) { match expr.node { hir::ExprKind::AddrOf(_, ref subexpr) => { record_rvalue_scope_if_borrow_expr(visitor, &subexpr, blk_id); record_rvalue_scope(visitor, &subexpr, blk_id); } hir::ExprKind::Struct(_, ref fields, _) => { for field in fields { record_rvalue_scope_if_borrow_expr( visitor, &field.expr, blk_id); } } hir::ExprKind::Array(ref subexprs) | hir::ExprKind::Tup(ref subexprs) => { for subexpr in subexprs { record_rvalue_scope_if_borrow_expr( visitor, &subexpr, blk_id); } } hir::ExprKind::Cast(ref subexpr, _) => { record_rvalue_scope_if_borrow_expr(visitor, &subexpr, blk_id) } hir::ExprKind::Block(ref block, _) => { if let Some(ref subexpr) = block.expr { record_rvalue_scope_if_borrow_expr( visitor, &subexpr, blk_id); } } _ => {} } } /// Applied to an expression `expr` if `expr` -- or something owned or partially owned by /// `expr` -- is going to be indirectly referenced by a variable in a let statement. In that /// case, the "temporary lifetime" or `expr` is extended to be the block enclosing the `let` /// statement. /// /// More formally, if `expr` matches the grammar `ET`, record the rvalue scope of the matching /// `<rvalue>` as `blk_id`: /// /// ET = *ET /// | ET[...] /// | ET.f /// | (ET) /// | <rvalue> /// /// Note: ET is intended to match "rvalues or places based on rvalues". fn record_rvalue_scope<'tcx>( visitor: &mut RegionResolutionVisitor<'tcx>, expr: &hir::Expr, blk_scope: Option<Scope>, ) { let mut expr = expr; loop { // Note: give all the expressions matching `ET` with the // extended temporary lifetime, not just the innermost rvalue, // because in codegen if we must compile e.g., `*rvalue()` // into a temporary, we request the temporary scope of the // outer expression. visitor.scope_tree.record_rvalue_scope(expr.hir_id.local_id, blk_scope); match expr.node { hir::ExprKind::AddrOf(_, ref subexpr) | hir::ExprKind::Unary(hir::UnDeref, ref subexpr) | hir::ExprKind::Field(ref subexpr, _) | hir::ExprKind::Index(ref subexpr, _) => { expr = &subexpr; } _ => { return; } } } } } impl<'tcx> RegionResolutionVisitor<'tcx> { /// Records the current parent (if any) as the parent of `child_scope`. /// Returns the depth of `child_scope`. fn record_child_scope(&mut self, child_scope: Scope) -> ScopeDepth { let parent = self.cx.parent; self.scope_tree.record_scope_parent(child_scope, parent); // If `child_scope` has no parent, it must be the root node, and so has // a depth of 1. Otherwise, its depth is one more than its parent's. parent.map_or(1, |(_p, d)| d + 1) } /// Records the current parent (if any) as the parent of `child_scope`, /// and sets `child_scope` as the new current parent. fn enter_scope(&mut self, child_scope: Scope) { let child_depth = self.record_child_scope(child_scope); self.cx.parent = Some((child_scope, child_depth)); } fn enter_node_scope_with_dtor(&mut self, id: hir::ItemLocalId) { // If node was previously marked as a terminating scope during the // recursive visit of its parent node in the AST, then we need to // account for the destruction scope representing the scope of // the destructors that run immediately after it completes. if self.terminating_scopes.contains(&id) { self.enter_scope(Scope { id, data: ScopeData::Destruction }); } self.enter_scope(Scope { id, data: ScopeData::Node }); } } impl<'tcx> Visitor<'tcx> for RegionResolutionVisitor<'tcx> { fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { NestedVisitorMap::None } fn visit_block(&mut self, b: &'tcx Block) { resolve_block(self, b); } fn visit_body(&mut self, body: &'tcx hir::Body) { let body_id = body.id(); let owner_id = self.tcx.hir().body_owner(body_id); debug!("visit_body(id={:?}, span={:?}, body.id={:?}, cx.parent={:?})", owner_id, self.tcx.sess.source_map().span_to_string(body.value.span), body_id, self.cx.parent); let outer_ec = mem::replace(&mut self.expr_and_pat_count, 0); let outer_cx = self.cx; let outer_ts = mem::take(&mut self.terminating_scopes); self.terminating_scopes.insert(body.value.hir_id.local_id); if let Some(root_id) = self.cx.root_id { self.scope_tree.record_closure_parent(body.value.hir_id.local_id, root_id); } self.cx.root_id = Some(body.value.hir_id.local_id); self.enter_scope(Scope { id: body.value.hir_id.local_id, data: ScopeData::CallSite }); self.enter_scope(Scope { id: body.value.hir_id.local_id, data: ScopeData::Arguments }); // The arguments and `self` are parented to the fn. self.cx.var_parent = self.cx.parent.take(); for argument in &body.arguments { self.visit_pat(&argument.pat); } // The body of the every fn is a root scope. self.cx.parent = self.cx.var_parent; if self.tcx.hir().body_owner_kind(owner_id).is_fn_or_closure() { self.visit_expr(&body.value) } else { // Only functions have an outer terminating (drop) scope, while // temporaries in constant initializers may be 'static, but only // according to rvalue lifetime semantics, using the same // syntactical rules used for let initializers. // // e.g., in `let x = &f();`, the temporary holding the result from // the `f()` call lives for the entirety of the surrounding block. // // Similarly, `const X: ... = &f();` would have the result of `f()` // live for `'static`, implying (if Drop restrictions on constants // ever get lifted) that the value *could* have a destructor, but // it'd get leaked instead of the destructor running during the // evaluation of `X` (if at all allowed by CTFE). // // However, `const Y: ... = g(&f());`, like `let y = g(&f());`, // would *not* let the `f()` temporary escape into an outer scope // (i.e., `'static`), which means that after `g` returns, it drops, // and all the associated destruction scope rules apply. self.cx.var_parent = None; resolve_local(self, None, Some(&body.value)); } if body.generator_kind.is_some() { self.scope_tree.body_expr_count.insert(body_id, self.expr_and_pat_count); } // Restore context we had at the start. self.expr_and_pat_count = outer_ec; self.cx = outer_cx; self.terminating_scopes = outer_ts; } fn visit_arm(&mut self, a: &'tcx Arm) { resolve_arm(self, a); } fn visit_pat(&mut self, p: &'tcx Pat) { resolve_pat(self, p); } fn visit_stmt(&mut self, s: &'tcx Stmt) { resolve_stmt(self, s); } fn visit_expr(&mut self, ex: &'tcx Expr) { resolve_expr(self, ex); } fn visit_local(&mut self, l: &'tcx Local) { resolve_local(self, Some(&l.pat), l.init.as_ref().map(|e| &**e)); } } fn region_scope_tree(tcx: TyCtxt<'_>, def_id: DefId) -> &ScopeTree { let closure_base_def_id = tcx.closure_base_def_id(def_id); if closure_base_def_id != def_id { return tcx.region_scope_tree(closure_base_def_id); } let id = tcx.hir().as_local_hir_id(def_id).unwrap(); let scope_tree = if let Some(body_id) = tcx.hir().maybe_body_owned_by(id) { let mut visitor = RegionResolutionVisitor { tcx, scope_tree: ScopeTree::default(), expr_and_pat_count: 0, cx: Context { root_id: None, parent: None, var_parent: None, }, terminating_scopes: Default::default(), pessimistic_yield: false, fixup_scopes: vec![], }; let body = tcx.hir().body(body_id); visitor.scope_tree.root_body = Some(body.value.hir_id); // If the item is an associated const or a method, // record its impl/trait parent, as it can also have // lifetime parameters free in this body. match tcx.hir().get(id) { Node::ImplItem(_) | Node::TraitItem(_) => { visitor.scope_tree.root_parent = Some(tcx.hir().get_parent_item(id)); } _ => {} } visitor.visit_body(body); visitor.scope_tree } else { ScopeTree::default() }; tcx.arena.alloc(scope_tree) } pub fn provide(providers: &mut Providers<'_>) { *providers = Providers { region_scope_tree, ..*providers }; } impl<'a> HashStable<StableHashingContext<'a>> for ScopeTree { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { let ScopeTree { root_body, root_parent, ref body_expr_count, ref parent_map, ref var_map, ref destruction_scopes, ref rvalue_scopes, ref closure_tree, ref yield_in_scope, } = *self; hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { root_body.hash_stable(hcx, hasher); root_parent.hash_stable(hcx, hasher); }); body_expr_count.hash_stable(hcx, hasher); parent_map.hash_stable(hcx, hasher); var_map.hash_stable(hcx, hasher); destruction_scopes.hash_stable(hcx, hasher); rvalue_scopes.hash_stable(hcx, hasher); closure_tree.hash_stable(hcx, hasher); yield_in_scope.hash_stable(hcx, hasher); } }
40.476501
99
0.5821
384def3f61707749672180fe04925f276782f5df
544
impl Solution { pub fn count_good_triplets(arr: Vec<i32>, a: i32, b: i32, c: i32) -> i32 { let mut ans = 0; for i in 0..arr.len() { for j in (i + 1)..arr.len() { for k in (j + 1)..arr.len() { if (arr[i] - arr[j]).abs() <= a && (arr[j] - arr[k]).abs() <= b && (arr[i] - arr[k]).abs() <= c { ans += 1; } } } } ans } }
28.631579
78
0.286765
5b4b8a7bc6658d863f284e4f99bcac7ead9722de
1,716
//! This module contains the required definitions and //! functions for the dual of the basic role A. use crate::role::a::RoleA; use crate::role::Role; use crossbeam_channel::{bounded, Sender}; /// Gives the order to the /// [`MeshedChannels`] related to /// the `Dual` of A. /// /// This `struct` should only be used in the `stack` field /// of the [`MeshedChannels`] related /// to the `Dual` of A. /// /// [`MeshedChannels`]: crate::meshedchannels::MeshedChannels /// /// # Example /// /// ``` /// use mpstthree::role::a_dual::RoleADual; /// use mpstthree::role::end::RoleEnd; /// use mpstthree::role::Role; // Only used for example /// /// type NameADual = RoleADual<RoleEnd>; /// /// let _ = NameADual::new(); // Only used for example /// ``` #[derive(Debug)] pub struct RoleADual<R> where R: Role, R::Dual: Role, { #[doc(hidden)] pub sender: Sender<R::Dual>, } impl<R: Role> Role for RoleADual<R> { type Dual = RoleA<R::Dual>; #[doc(hidden)] fn new() -> (Self, Self::Dual) { let (sender_normal, _) = bounded::<R>(1); let (sender_dual, _) = bounded::<R::Dual>(1); ( RoleADual { sender: sender_dual, }, RoleA { sender: sender_normal, }, ) } #[doc(hidden)] fn head_str() -> String { "RoleADual".to_string() } #[doc(hidden)] fn tail_str() -> String { format!("{}<{}>", R::head_str(), R::tail_str()) } #[doc(hidden)] fn self_head_str(&self) -> String { "RoleADual".to_string() } #[doc(hidden)] fn self_tail_str(&self) -> String { format!("{}<{}>", R::head_str(), R::tail_str()) } }
22.285714
61
0.547786
21b8d7101a30477c71114c19e1e6257ca8d1b36c
42,151
//! This module contains implements of the `Lift` and `TypeFoldable` //! traits for various types in the Rust compiler. Most are written by //! hand, though we've recently added some macros and proc-macros to help with the tedium. use crate::mir::interpret; use crate::mir::ProjectionKind; use crate::ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; use crate::ty::print::{FmtPrinter, Printer}; use crate::ty::{self, InferConst, Lift, Ty, TyCtxt}; use rustc_hir as hir; use rustc_hir::def::Namespace; use rustc_hir::def_id::CRATE_DEF_INDEX; use rustc_index::vec::{Idx, IndexVec}; use smallvec::SmallVec; use std::fmt; use std::rc::Rc; use std::sync::Arc; impl fmt::Debug for ty::TraitDef { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ty::tls::with(|tcx| { FmtPrinter::new(tcx, f, Namespace::TypeNS).print_def_path(self.def_id, &[])?; Ok(()) }) } } impl fmt::Debug for ty::AdtDef { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ty::tls::with(|tcx| { FmtPrinter::new(tcx, f, Namespace::TypeNS).print_def_path(self.did, &[])?; Ok(()) }) } } impl fmt::Debug for ty::UpvarId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let name = ty::tls::with(|tcx| tcx.hir().name(self.var_path.hir_id)); write!(f, "UpvarId({:?};`{}`;{:?})", self.var_path.hir_id, name, self.closure_expr_id) } } impl fmt::Debug for ty::UpvarBorrow<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "UpvarBorrow({:?}, {:?})", self.kind, self.region) } } impl fmt::Debug for ty::ExistentialTraitRef<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(self, f) } } impl fmt::Debug for ty::adjustment::Adjustment<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:?} -> {}", self.kind, self.target) } } impl fmt::Debug for ty::BoundRegion { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { ty::BrAnon(n) => write!(f, "BrAnon({:?})", n), ty::BrNamed(did, name) => { if did.index == CRATE_DEF_INDEX { write!(f, "BrNamed({})", name) } else { write!(f, "BrNamed({:?}, {})", did, name) } } ty::BrEnv => write!(f, "BrEnv"), } } } impl fmt::Debug for ty::RegionKind { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { ty::ReEarlyBound(ref data) => write!(f, "ReEarlyBound({}, {})", data.index, data.name), ty::ReLateBound(binder_id, ref bound_region) => { write!(f, "ReLateBound({:?}, {:?})", binder_id, bound_region) } ty::ReFree(ref fr) => fr.fmt(f), ty::ReStatic => write!(f, "ReStatic"), ty::ReVar(ref vid) => vid.fmt(f), ty::RePlaceholder(placeholder) => write!(f, "RePlaceholder({:?})", placeholder), ty::ReEmpty(ui) => write!(f, "ReEmpty({:?})", ui), ty::ReErased => write!(f, "ReErased"), } } } impl fmt::Debug for ty::FreeRegion { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "ReFree({:?}, {:?})", self.scope, self.bound_region) } } impl fmt::Debug for ty::Variance { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(match *self { ty::Covariant => "+", ty::Contravariant => "-", ty::Invariant => "o", ty::Bivariant => "*", }) } } impl fmt::Debug for ty::FnSig<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "({:?}; c_variadic: {})->{:?}", self.inputs(), self.c_variadic, self.output()) } } impl fmt::Debug for ty::TyVid { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "_#{}t", self.index) } } impl<'tcx> fmt::Debug for ty::ConstVid<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "_#{}c", self.index) } } impl fmt::Debug for ty::IntVid { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "_#{}i", self.index) } } impl fmt::Debug for ty::FloatVid { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "_#{}f", self.index) } } impl fmt::Debug for ty::RegionVid { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "'_#{}r", self.index()) } } impl fmt::Debug for ty::InferTy { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { ty::TyVar(ref v) => v.fmt(f), ty::IntVar(ref v) => v.fmt(f), ty::FloatVar(ref v) => v.fmt(f), ty::FreshTy(v) => write!(f, "FreshTy({:?})", v), ty::FreshIntTy(v) => write!(f, "FreshIntTy({:?})", v), ty::FreshFloatTy(v) => write!(f, "FreshFloatTy({:?})", v), } } } impl fmt::Debug for ty::IntVarValue { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { ty::IntType(ref v) => v.fmt(f), ty::UintType(ref v) => v.fmt(f), } } } impl fmt::Debug for ty::FloatVarValue { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.0.fmt(f) } } impl fmt::Debug for ty::TraitRef<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(self, f) } } impl fmt::Debug for Ty<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(self, f) } } impl fmt::Debug for ty::ParamTy { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}/#{}", self.name, self.index) } } impl fmt::Debug for ty::ParamConst { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}/#{}", self.name, self.index) } } impl fmt::Debug for ty::TraitPredicate<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "TraitPredicate({:?})", self.trait_ref) } } impl fmt::Debug for ty::ProjectionPredicate<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "ProjectionPredicate({:?}, {:?})", self.projection_ty, self.ty) } } impl fmt::Debug for ty::Predicate<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:?}", self.kind()) } } impl fmt::Debug for ty::PredicateKind<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { ty::PredicateKind::ForAll(binder) => write!(f, "ForAll({:?})", binder), ty::PredicateKind::Atom(atom) => write!(f, "{:?}", atom), } } } impl fmt::Debug for ty::PredicateAtom<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { ty::PredicateAtom::Trait(ref a, constness) => { if let hir::Constness::Const = constness { write!(f, "const ")?; } a.fmt(f) } ty::PredicateAtom::Subtype(ref pair) => pair.fmt(f), ty::PredicateAtom::RegionOutlives(ref pair) => pair.fmt(f), ty::PredicateAtom::TypeOutlives(ref pair) => pair.fmt(f), ty::PredicateAtom::Projection(ref pair) => pair.fmt(f), ty::PredicateAtom::WellFormed(data) => write!(f, "WellFormed({:?})", data), ty::PredicateAtom::ObjectSafe(trait_def_id) => { write!(f, "ObjectSafe({:?})", trait_def_id) } ty::PredicateAtom::ClosureKind(closure_def_id, closure_substs, kind) => { write!(f, "ClosureKind({:?}, {:?}, {:?})", closure_def_id, closure_substs, kind) } ty::PredicateAtom::ConstEvaluatable(def_id, substs) => { write!(f, "ConstEvaluatable({:?}, {:?})", def_id, substs) } ty::PredicateAtom::ConstEquate(c1, c2) => write!(f, "ConstEquate({:?}, {:?})", c1, c2), } } } /////////////////////////////////////////////////////////////////////////// // Atomic structs // // For things that don't carry any arena-allocated data (and are // copy...), just add them to this list. CloneTypeFoldableAndLiftImpls! { (), bool, usize, ::rustc_target::abi::VariantIdx, u64, String, crate::middle::region::Scope, ::rustc_ast::ast::FloatTy, ::rustc_ast::ast::InlineAsmOptions, ::rustc_ast::ast::InlineAsmTemplatePiece, ::rustc_ast::ast::NodeId, ::rustc_span::symbol::Symbol, ::rustc_hir::def::Res, ::rustc_hir::def_id::DefId, ::rustc_hir::def_id::LocalDefId, ::rustc_hir::LlvmInlineAsmInner, ::rustc_hir::MatchSource, ::rustc_hir::Mutability, ::rustc_hir::Unsafety, ::rustc_target::asm::InlineAsmRegOrRegClass, ::rustc_target::spec::abi::Abi, crate::mir::Local, crate::mir::Promoted, crate::traits::Reveal, crate::ty::adjustment::AutoBorrowMutability, crate::ty::AdtKind, // Including `BoundRegion` is a *bit* dubious, but direct // references to bound region appear in `ty::Error`, and aren't // really meant to be folded. In general, we can only fold a fully // general `Region`. crate::ty::BoundRegion, crate::ty::Placeholder<crate::ty::BoundRegion>, crate::ty::ClosureKind, crate::ty::FreeRegion, crate::ty::InferTy, crate::ty::IntVarValue, crate::ty::ParamConst, crate::ty::ParamTy, crate::ty::adjustment::PointerCast, crate::ty::RegionVid, crate::ty::UniverseIndex, crate::ty::Variance, ::rustc_span::Span, } /////////////////////////////////////////////////////////////////////////// // Lift implementations // FIXME(eddyb) replace all the uses of `Option::map` with `?`. impl<'tcx, A: Lift<'tcx>, B: Lift<'tcx>> Lift<'tcx> for (A, B) { type Lifted = (A::Lifted, B::Lifted); fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { tcx.lift(&self.0).and_then(|a| tcx.lift(&self.1).map(|b| (a, b))) } } impl<'tcx, A: Lift<'tcx>, B: Lift<'tcx>, C: Lift<'tcx>> Lift<'tcx> for (A, B, C) { type Lifted = (A::Lifted, B::Lifted, C::Lifted); fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { tcx.lift(&self.0) .and_then(|a| tcx.lift(&self.1).and_then(|b| tcx.lift(&self.2).map(|c| (a, b, c)))) } } impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Option<T> { type Lifted = Option<T::Lifted>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { match *self { Some(ref x) => tcx.lift(x).map(Some), None => Some(None), } } } impl<'tcx, T: Lift<'tcx>, E: Lift<'tcx>> Lift<'tcx> for Result<T, E> { type Lifted = Result<T::Lifted, E::Lifted>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { match *self { Ok(ref x) => tcx.lift(x).map(Ok), Err(ref e) => tcx.lift(e).map(Err), } } } impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Box<T> { type Lifted = Box<T::Lifted>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { tcx.lift(&**self).map(Box::new) } } impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Rc<T> { type Lifted = Rc<T::Lifted>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { tcx.lift(&**self).map(Rc::new) } } impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Arc<T> { type Lifted = Arc<T::Lifted>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { tcx.lift(&**self).map(Arc::new) } } impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for [T] { type Lifted = Vec<T::Lifted>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { // type annotation needed to inform `projection_must_outlive` let mut result: Vec<<T as Lift<'tcx>>::Lifted> = Vec::with_capacity(self.len()); for x in self { if let Some(value) = tcx.lift(x) { result.push(value); } else { return None; } } Some(result) } } impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Vec<T> { type Lifted = Vec<T::Lifted>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { tcx.lift(&self[..]) } } impl<'tcx, I: Idx, T: Lift<'tcx>> Lift<'tcx> for IndexVec<I, T> { type Lifted = IndexVec<I, T::Lifted>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { self.iter().map(|e| tcx.lift(e)).collect() } } impl<'a, 'tcx> Lift<'tcx> for ty::TraitRef<'a> { type Lifted = ty::TraitRef<'tcx>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { tcx.lift(&self.substs).map(|substs| ty::TraitRef { def_id: self.def_id, substs }) } } impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialTraitRef<'a> { type Lifted = ty::ExistentialTraitRef<'tcx>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { tcx.lift(&self.substs).map(|substs| ty::ExistentialTraitRef { def_id: self.def_id, substs }) } } impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialPredicate<'a> { type Lifted = ty::ExistentialPredicate<'tcx>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { match self { ty::ExistentialPredicate::Trait(x) => tcx.lift(x).map(ty::ExistentialPredicate::Trait), ty::ExistentialPredicate::Projection(x) => { tcx.lift(x).map(ty::ExistentialPredicate::Projection) } ty::ExistentialPredicate::AutoTrait(def_id) => { Some(ty::ExistentialPredicate::AutoTrait(*def_id)) } } } } impl<'a, 'tcx> Lift<'tcx> for ty::TraitPredicate<'a> { type Lifted = ty::TraitPredicate<'tcx>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<ty::TraitPredicate<'tcx>> { tcx.lift(&self.trait_ref).map(|trait_ref| ty::TraitPredicate { trait_ref }) } } impl<'a, 'tcx> Lift<'tcx> for ty::SubtypePredicate<'a> { type Lifted = ty::SubtypePredicate<'tcx>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<ty::SubtypePredicate<'tcx>> { tcx.lift(&(self.a, self.b)).map(|(a, b)| ty::SubtypePredicate { a_is_expected: self.a_is_expected, a, b, }) } } impl<'tcx, A: Copy + Lift<'tcx>, B: Copy + Lift<'tcx>> Lift<'tcx> for ty::OutlivesPredicate<A, B> { type Lifted = ty::OutlivesPredicate<A::Lifted, B::Lifted>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { tcx.lift(&(self.0, self.1)).map(|(a, b)| ty::OutlivesPredicate(a, b)) } } impl<'a, 'tcx> Lift<'tcx> for ty::ProjectionTy<'a> { type Lifted = ty::ProjectionTy<'tcx>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<ty::ProjectionTy<'tcx>> { tcx.lift(&self.substs) .map(|substs| ty::ProjectionTy { item_def_id: self.item_def_id, substs }) } } impl<'a, 'tcx> Lift<'tcx> for ty::ProjectionPredicate<'a> { type Lifted = ty::ProjectionPredicate<'tcx>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<ty::ProjectionPredicate<'tcx>> { tcx.lift(&(self.projection_ty, self.ty)) .map(|(projection_ty, ty)| ty::ProjectionPredicate { projection_ty, ty }) } } impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialProjection<'a> { type Lifted = ty::ExistentialProjection<'tcx>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { tcx.lift(&self.substs).map(|substs| ty::ExistentialProjection { substs, ty: tcx.lift(&self.ty).expect("type must lift when substs do"), item_def_id: self.item_def_id, }) } } impl<'a, 'tcx> Lift<'tcx> for ty::PredicateKind<'a> { type Lifted = ty::PredicateKind<'tcx>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { match self { ty::PredicateKind::ForAll(binder) => tcx.lift(binder).map(ty::PredicateKind::ForAll), ty::PredicateKind::Atom(atom) => tcx.lift(atom).map(ty::PredicateKind::Atom), } } } impl<'a, 'tcx> Lift<'tcx> for ty::PredicateAtom<'a> { type Lifted = ty::PredicateAtom<'tcx>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { match *self { ty::PredicateAtom::Trait(ref data, constness) => { tcx.lift(data).map(|data| ty::PredicateAtom::Trait(data, constness)) } ty::PredicateAtom::Subtype(ref data) => tcx.lift(data).map(ty::PredicateAtom::Subtype), ty::PredicateAtom::RegionOutlives(ref data) => { tcx.lift(data).map(ty::PredicateAtom::RegionOutlives) } ty::PredicateAtom::TypeOutlives(ref data) => { tcx.lift(data).map(ty::PredicateAtom::TypeOutlives) } ty::PredicateAtom::Projection(ref data) => { tcx.lift(data).map(ty::PredicateAtom::Projection) } ty::PredicateAtom::WellFormed(ty) => tcx.lift(&ty).map(ty::PredicateAtom::WellFormed), ty::PredicateAtom::ClosureKind(closure_def_id, closure_substs, kind) => { tcx.lift(&closure_substs).map(|closure_substs| { ty::PredicateAtom::ClosureKind(closure_def_id, closure_substs, kind) }) } ty::PredicateAtom::ObjectSafe(trait_def_id) => { Some(ty::PredicateAtom::ObjectSafe(trait_def_id)) } ty::PredicateAtom::ConstEvaluatable(def_id, substs) => { tcx.lift(&substs).map(|substs| ty::PredicateAtom::ConstEvaluatable(def_id, substs)) } ty::PredicateAtom::ConstEquate(c1, c2) => { tcx.lift(&(c1, c2)).map(|(c1, c2)| ty::PredicateAtom::ConstEquate(c1, c2)) } } } } impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::Binder<T> { type Lifted = ty::Binder<T::Lifted>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { tcx.lift(self.as_ref().skip_binder()).map(ty::Binder::bind) } } impl<'a, 'tcx> Lift<'tcx> for ty::ParamEnv<'a> { type Lifted = ty::ParamEnv<'tcx>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { tcx.lift(&self.caller_bounds()) .map(|caller_bounds| ty::ParamEnv::new(caller_bounds, self.reveal(), self.def_id)) } } impl<'a, 'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::ParamEnvAnd<'a, T> { type Lifted = ty::ParamEnvAnd<'tcx, T::Lifted>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { tcx.lift(&self.param_env).and_then(|param_env| { tcx.lift(&self.value).map(|value| ty::ParamEnvAnd { param_env, value }) }) } } impl<'a, 'tcx> Lift<'tcx> for ty::ClosureSubsts<'a> { type Lifted = ty::ClosureSubsts<'tcx>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { tcx.lift(&self.substs).map(|substs| ty::ClosureSubsts { substs }) } } impl<'a, 'tcx> Lift<'tcx> for ty::GeneratorSubsts<'a> { type Lifted = ty::GeneratorSubsts<'tcx>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { tcx.lift(&self.substs).map(|substs| ty::GeneratorSubsts { substs }) } } impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::Adjustment<'a> { type Lifted = ty::adjustment::Adjustment<'tcx>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { tcx.lift(&self.kind).and_then(|kind| { tcx.lift(&self.target).map(|target| ty::adjustment::Adjustment { kind, target }) }) } } impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::Adjust<'a> { type Lifted = ty::adjustment::Adjust<'tcx>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { match *self { ty::adjustment::Adjust::NeverToAny => Some(ty::adjustment::Adjust::NeverToAny), ty::adjustment::Adjust::Pointer(ptr) => Some(ty::adjustment::Adjust::Pointer(ptr)), ty::adjustment::Adjust::Deref(ref overloaded) => { tcx.lift(overloaded).map(ty::adjustment::Adjust::Deref) } ty::adjustment::Adjust::Borrow(ref autoref) => { tcx.lift(autoref).map(ty::adjustment::Adjust::Borrow) } } } } impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::OverloadedDeref<'a> { type Lifted = ty::adjustment::OverloadedDeref<'tcx>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { tcx.lift(&self.region) .map(|region| ty::adjustment::OverloadedDeref { region, mutbl: self.mutbl }) } } impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::AutoBorrow<'a> { type Lifted = ty::adjustment::AutoBorrow<'tcx>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { match *self { ty::adjustment::AutoBorrow::Ref(r, m) => { tcx.lift(&r).map(|r| ty::adjustment::AutoBorrow::Ref(r, m)) } ty::adjustment::AutoBorrow::RawPtr(m) => Some(ty::adjustment::AutoBorrow::RawPtr(m)), } } } impl<'a, 'tcx> Lift<'tcx> for ty::GenSig<'a> { type Lifted = ty::GenSig<'tcx>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { tcx.lift(&(self.resume_ty, self.yield_ty, self.return_ty)) .map(|(resume_ty, yield_ty, return_ty)| ty::GenSig { resume_ty, yield_ty, return_ty }) } } impl<'a, 'tcx> Lift<'tcx> for ty::FnSig<'a> { type Lifted = ty::FnSig<'tcx>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { tcx.lift(&self.inputs_and_output).map(|x| ty::FnSig { inputs_and_output: x, c_variadic: self.c_variadic, unsafety: self.unsafety, abi: self.abi, }) } } impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::error::ExpectedFound<T> { type Lifted = ty::error::ExpectedFound<T::Lifted>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { tcx.lift(&self.expected).and_then(|expected| { tcx.lift(&self.found).map(|found| ty::error::ExpectedFound { expected, found }) }) } } impl<'a, 'tcx> Lift<'tcx> for ty::error::TypeError<'a> { type Lifted = ty::error::TypeError<'tcx>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { use crate::ty::error::TypeError::*; Some(match *self { Mismatch => Mismatch, UnsafetyMismatch(x) => UnsafetyMismatch(x), AbiMismatch(x) => AbiMismatch(x), Mutability => Mutability, TupleSize(x) => TupleSize(x), FixedArraySize(x) => FixedArraySize(x), ArgCount => ArgCount, RegionsDoesNotOutlive(a, b) => { return tcx.lift(&(a, b)).map(|(a, b)| RegionsDoesNotOutlive(a, b)); } RegionsInsufficientlyPolymorphic(a, b) => { return tcx.lift(&b).map(|b| RegionsInsufficientlyPolymorphic(a, b)); } RegionsOverlyPolymorphic(a, b) => { return tcx.lift(&b).map(|b| RegionsOverlyPolymorphic(a, b)); } RegionsPlaceholderMismatch => RegionsPlaceholderMismatch, IntMismatch(x) => IntMismatch(x), FloatMismatch(x) => FloatMismatch(x), Traits(x) => Traits(x), VariadicMismatch(x) => VariadicMismatch(x), CyclicTy(t) => return tcx.lift(&t).map(|t| CyclicTy(t)), ProjectionMismatched(x) => ProjectionMismatched(x), Sorts(ref x) => return tcx.lift(x).map(Sorts), ExistentialMismatch(ref x) => return tcx.lift(x).map(ExistentialMismatch), ConstMismatch(ref x) => return tcx.lift(x).map(ConstMismatch), IntrinsicCast => IntrinsicCast, TargetFeatureCast(ref x) => TargetFeatureCast(*x), ObjectUnsafeCoercion(ref x) => return tcx.lift(x).map(ObjectUnsafeCoercion), }) } } impl<'a, 'tcx> Lift<'tcx> for ty::InstanceDef<'a> { type Lifted = ty::InstanceDef<'tcx>; fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { match *self { ty::InstanceDef::Item(def_id) => Some(ty::InstanceDef::Item(def_id)), ty::InstanceDef::VtableShim(def_id) => Some(ty::InstanceDef::VtableShim(def_id)), ty::InstanceDef::ReifyShim(def_id) => Some(ty::InstanceDef::ReifyShim(def_id)), ty::InstanceDef::Intrinsic(def_id) => Some(ty::InstanceDef::Intrinsic(def_id)), ty::InstanceDef::FnPtrShim(def_id, ref ty) => { Some(ty::InstanceDef::FnPtrShim(def_id, tcx.lift(ty)?)) } ty::InstanceDef::Virtual(def_id, n) => Some(ty::InstanceDef::Virtual(def_id, n)), ty::InstanceDef::ClosureOnceShim { call_once } => { Some(ty::InstanceDef::ClosureOnceShim { call_once }) } ty::InstanceDef::DropGlue(def_id, ref ty) => { Some(ty::InstanceDef::DropGlue(def_id, tcx.lift(ty)?)) } ty::InstanceDef::CloneShim(def_id, ref ty) => { Some(ty::InstanceDef::CloneShim(def_id, tcx.lift(ty)?)) } } } } /////////////////////////////////////////////////////////////////////////// // TypeFoldable implementations. // // Ideally, each type should invoke `folder.fold_foo(self)` and // nothing else. In some cases, though, we haven't gotten around to // adding methods on the `folder` yet, and thus the folding is // hard-coded here. This is less-flexible, because folders cannot // override the behavior, but there are a lot of random types and one // can easily refactor the folding into the TypeFolder trait as // needed. /// AdtDefs are basically the same as a DefId. impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::AdtDef { fn super_fold_with<F: TypeFolder<'tcx>>(&self, _folder: &mut F) -> Self { *self } fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _visitor: &mut V) -> bool { false } } impl<'tcx, T: TypeFoldable<'tcx>, U: TypeFoldable<'tcx>> TypeFoldable<'tcx> for (T, U) { fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> (T, U) { (self.0.fold_with(folder), self.1.fold_with(folder)) } fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool { self.0.visit_with(visitor) || self.1.visit_with(visitor) } } impl<'tcx, A: TypeFoldable<'tcx>, B: TypeFoldable<'tcx>, C: TypeFoldable<'tcx>> TypeFoldable<'tcx> for (A, B, C) { fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> (A, B, C) { (self.0.fold_with(folder), self.1.fold_with(folder), self.2.fold_with(folder)) } fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool { self.0.visit_with(visitor) || self.1.visit_with(visitor) || self.2.visit_with(visitor) } } EnumTypeFoldableImpl! { impl<'tcx, T> TypeFoldable<'tcx> for Option<T> { (Some)(a), (None), } where T: TypeFoldable<'tcx> } EnumTypeFoldableImpl! { impl<'tcx, T, E> TypeFoldable<'tcx> for Result<T, E> { (Ok)(a), (Err)(a), } where T: TypeFoldable<'tcx>, E: TypeFoldable<'tcx>, } impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Rc<T> { fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self { Rc::new((**self).fold_with(folder)) } fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool { (**self).visit_with(visitor) } } impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Arc<T> { fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self { Arc::new((**self).fold_with(folder)) } fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool { (**self).visit_with(visitor) } } impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Box<T> { fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self { let content: T = (**self).fold_with(folder); box content } fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool { (**self).visit_with(visitor) } } impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Vec<T> { fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self { self.iter().map(|t| t.fold_with(folder)).collect() } fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool { self.iter().any(|t| t.visit_with(visitor)) } } impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Box<[T]> { fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self { self.iter().map(|t| t.fold_with(folder)).collect::<Vec<_>>().into_boxed_slice() } fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool { self.iter().any(|t| t.visit_with(visitor)) } } impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::Binder<T> { fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self { self.map_bound_ref(|ty| ty.fold_with(folder)) } fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self { folder.fold_binder(self) } fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool { self.as_ref().skip_binder().visit_with(visitor) } fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool { visitor.visit_binder(self) } } impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<ty::ExistentialPredicate<'tcx>> { fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self { fold_list(*self, folder, |tcx, v| tcx.intern_existential_predicates(v)) } fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool { self.iter().any(|p| p.visit_with(visitor)) } } impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<Ty<'tcx>> { fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self { fold_list(*self, folder, |tcx, v| tcx.intern_type_list(v)) } fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool { self.iter().any(|t| t.visit_with(visitor)) } } impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<ProjectionKind> { fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self { fold_list(*self, folder, |tcx, v| tcx.intern_projs(v)) } fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool { self.iter().any(|t| t.visit_with(visitor)) } } impl<'tcx> TypeFoldable<'tcx> for ty::instance::Instance<'tcx> { fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self { use crate::ty::InstanceDef::*; Self { substs: self.substs.fold_with(folder), def: match self.def { Item(def) => Item(def.fold_with(folder)), VtableShim(did) => VtableShim(did.fold_with(folder)), ReifyShim(did) => ReifyShim(did.fold_with(folder)), Intrinsic(did) => Intrinsic(did.fold_with(folder)), FnPtrShim(did, ty) => FnPtrShim(did.fold_with(folder), ty.fold_with(folder)), Virtual(did, i) => Virtual(did.fold_with(folder), i), ClosureOnceShim { call_once } => { ClosureOnceShim { call_once: call_once.fold_with(folder) } } DropGlue(did, ty) => DropGlue(did.fold_with(folder), ty.fold_with(folder)), CloneShim(did, ty) => CloneShim(did.fold_with(folder), ty.fold_with(folder)), }, } } fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool { use crate::ty::InstanceDef::*; self.substs.visit_with(visitor) || match self.def { Item(def) => def.visit_with(visitor), VtableShim(did) | ReifyShim(did) | Intrinsic(did) | Virtual(did, _) => { did.visit_with(visitor) } FnPtrShim(did, ty) | CloneShim(did, ty) => { did.visit_with(visitor) || ty.visit_with(visitor) } DropGlue(did, ty) => did.visit_with(visitor) || ty.visit_with(visitor), ClosureOnceShim { call_once } => call_once.visit_with(visitor), } } } impl<'tcx> TypeFoldable<'tcx> for interpret::GlobalId<'tcx> { fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self { Self { instance: self.instance.fold_with(folder), promoted: self.promoted } } fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool { self.instance.visit_with(visitor) } } impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> { fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self { let kind = match self.kind { ty::RawPtr(tm) => ty::RawPtr(tm.fold_with(folder)), ty::Array(typ, sz) => ty::Array(typ.fold_with(folder), sz.fold_with(folder)), ty::Slice(typ) => ty::Slice(typ.fold_with(folder)), ty::Adt(tid, substs) => ty::Adt(tid, substs.fold_with(folder)), ty::Dynamic(ref trait_ty, ref region) => { ty::Dynamic(trait_ty.fold_with(folder), region.fold_with(folder)) } ty::Tuple(ts) => ty::Tuple(ts.fold_with(folder)), ty::FnDef(def_id, substs) => ty::FnDef(def_id, substs.fold_with(folder)), ty::FnPtr(f) => ty::FnPtr(f.fold_with(folder)), ty::Ref(ref r, ty, mutbl) => ty::Ref(r.fold_with(folder), ty.fold_with(folder), mutbl), ty::Generator(did, substs, movability) => { ty::Generator(did, substs.fold_with(folder), movability) } ty::GeneratorWitness(types) => ty::GeneratorWitness(types.fold_with(folder)), ty::Closure(did, substs) => ty::Closure(did, substs.fold_with(folder)), ty::Projection(ref data) => ty::Projection(data.fold_with(folder)), ty::Opaque(did, substs) => ty::Opaque(did, substs.fold_with(folder)), ty::Bool | ty::Char | ty::Str | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Error(_) | ty::Infer(_) | ty::Param(..) | ty::Bound(..) | ty::Placeholder(..) | ty::Never | ty::Foreign(..) => return self, }; if self.kind == kind { self } else { folder.tcx().mk_ty(kind) } } fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self { folder.fold_ty(*self) } fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool { match self.kind { ty::RawPtr(ref tm) => tm.visit_with(visitor), ty::Array(typ, sz) => typ.visit_with(visitor) || sz.visit_with(visitor), ty::Slice(typ) => typ.visit_with(visitor), ty::Adt(_, substs) => substs.visit_with(visitor), ty::Dynamic(ref trait_ty, ref reg) => { trait_ty.visit_with(visitor) || reg.visit_with(visitor) } ty::Tuple(ts) => ts.visit_with(visitor), ty::FnDef(_, substs) => substs.visit_with(visitor), ty::FnPtr(ref f) => f.visit_with(visitor), ty::Ref(r, ty, _) => r.visit_with(visitor) || ty.visit_with(visitor), ty::Generator(_did, ref substs, _) => substs.visit_with(visitor), ty::GeneratorWitness(ref types) => types.visit_with(visitor), ty::Closure(_did, ref substs) => substs.visit_with(visitor), ty::Projection(ref data) => data.visit_with(visitor), ty::Opaque(_, ref substs) => substs.visit_with(visitor), ty::Bool | ty::Char | ty::Str | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Error(_) | ty::Infer(_) | ty::Bound(..) | ty::Placeholder(..) | ty::Param(..) | ty::Never | ty::Foreign(..) => false, } } fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool { visitor.visit_ty(self) } } impl<'tcx> TypeFoldable<'tcx> for ty::Region<'tcx> { fn super_fold_with<F: TypeFolder<'tcx>>(&self, _folder: &mut F) -> Self { *self } fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self { folder.fold_region(*self) } fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _visitor: &mut V) -> bool { false } fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool { visitor.visit_region(*self) } } impl<'tcx> TypeFoldable<'tcx> for ty::Predicate<'tcx> { fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self { let new = ty::PredicateKind::super_fold_with(&self.inner.kind, folder); folder.tcx().reuse_or_mk_predicate(*self, new) } fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool { ty::PredicateKind::super_visit_with(&self.inner.kind, visitor) } fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool { visitor.visit_predicate(*self) } fn has_vars_bound_at_or_above(&self, binder: ty::DebruijnIndex) -> bool { self.inner.outer_exclusive_binder > binder } fn has_type_flags(&self, flags: ty::TypeFlags) -> bool { self.inner.flags.intersects(flags) } } pub(super) trait PredicateVisitor<'tcx>: TypeVisitor<'tcx> { fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> bool; } impl<T: TypeVisitor<'tcx>> PredicateVisitor<'tcx> for T { default fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> bool { predicate.super_visit_with(self) } } impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<ty::Predicate<'tcx>> { fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self { fold_list(*self, folder, |tcx, v| tcx.intern_predicates(v)) } fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool { self.iter().any(|p| p.visit_with(visitor)) } } impl<'tcx, T: TypeFoldable<'tcx>, I: Idx> TypeFoldable<'tcx> for IndexVec<I, T> { fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self { self.iter().map(|x| x.fold_with(folder)).collect() } fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool { self.iter().any(|t| t.visit_with(visitor)) } } impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Const<'tcx> { fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self { let ty = self.ty.fold_with(folder); let val = self.val.fold_with(folder); if ty != self.ty || val != self.val { folder.tcx().mk_const(ty::Const { ty, val }) } else { *self } } fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self { folder.fold_const(*self) } fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool { self.ty.visit_with(visitor) || self.val.visit_with(visitor) } fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool { visitor.visit_const(self) } } impl<'tcx> TypeFoldable<'tcx> for ty::ConstKind<'tcx> { fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self { match *self { ty::ConstKind::Infer(ic) => ty::ConstKind::Infer(ic.fold_with(folder)), ty::ConstKind::Param(p) => ty::ConstKind::Param(p.fold_with(folder)), ty::ConstKind::Unevaluated(did, substs, promoted) => { ty::ConstKind::Unevaluated(did, substs.fold_with(folder), promoted) } ty::ConstKind::Value(_) | ty::ConstKind::Bound(..) | ty::ConstKind::Placeholder(..) | ty::ConstKind::Error(_) => *self, } } fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool { match *self { ty::ConstKind::Infer(ic) => ic.visit_with(visitor), ty::ConstKind::Param(p) => p.visit_with(visitor), ty::ConstKind::Unevaluated(_, substs, _) => substs.visit_with(visitor), ty::ConstKind::Value(_) | ty::ConstKind::Bound(..) | ty::ConstKind::Placeholder(_) | ty::ConstKind::Error(_) => false, } } } impl<'tcx> TypeFoldable<'tcx> for InferConst<'tcx> { fn super_fold_with<F: TypeFolder<'tcx>>(&self, _folder: &mut F) -> Self { *self } fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _visitor: &mut V) -> bool { false } } // Does the equivalent of // ``` // let v = self.iter().map(|p| p.fold_with(folder)).collect::<SmallVec<[_; 8]>>(); // folder.tcx().intern_*(&v) // ``` fn fold_list<'tcx, F, T>( list: &'tcx ty::List<T>, folder: &mut F, intern: impl FnOnce(TyCtxt<'tcx>, &[T]) -> &'tcx ty::List<T>, ) -> &'tcx ty::List<T> where F: TypeFolder<'tcx>, T: TypeFoldable<'tcx> + PartialEq + Copy, { let mut iter = list.iter(); // Look for the first element that changed if let Some((i, new_t)) = iter.by_ref().enumerate().find_map(|(i, t)| { let new_t = t.fold_with(folder); if new_t == t { None } else { Some((i, new_t)) } }) { // An element changed, prepare to intern the resulting list let mut new_list = SmallVec::<[_; 8]>::with_capacity(list.len()); new_list.extend_from_slice(&list[..i]); new_list.push(new_t); new_list.extend(iter.map(|t| t.fold_with(folder))); intern(folder.tcx(), &new_list) } else { list } }
36.337069
100
0.565253
7172a34c8cfeba6a4c5994482251a387fc6252c2
1,176
// run-pass #![allow(dead_code)] #![allow(unused_unsafe)] use std::marker::Sync; struct Foo { a: usize, b: *const () } unsafe impl Sync for Foo {} fn foo<T>(a: T) -> T { a } static BLOCK_INTEGRAL: usize = { 1 }; static BLOCK_EXPLICIT_UNIT: () = { () }; static BLOCK_IMPLICIT_UNIT: () = { }; static BLOCK_FLOAT: f64 = { 1.0 }; static BLOCK_ENUM: Option<usize> = { Some(100) }; static BLOCK_STRUCT: Foo = { Foo { a: 12, b: std::ptr::null::<()>() } }; static BLOCK_UNSAFE: usize = unsafe { 1000 }; static BLOCK_FN_INFERRED: fn(usize) -> usize = { foo }; static BLOCK_FN: fn(usize) -> usize = { foo::<usize> }; static BLOCK_ENUM_CONSTRUCTOR: fn(usize) -> Option<usize> = { Some }; pub fn main() { assert_eq!(BLOCK_INTEGRAL, 1); assert_eq!(BLOCK_EXPLICIT_UNIT, ()); assert_eq!(BLOCK_IMPLICIT_UNIT, ()); assert_eq!(BLOCK_FLOAT, 1.0_f64); assert_eq!(BLOCK_STRUCT.a, 12); assert_eq!(BLOCK_STRUCT.b, std::ptr::null::<()>()); assert_eq!(BLOCK_ENUM, Some(100)); assert_eq!(BLOCK_UNSAFE, 1000); assert_eq!(BLOCK_FN_INFERRED(300), 300); assert_eq!(BLOCK_FN(300), 300); assert_eq!(BLOCK_ENUM_CONSTRUCTOR(200), Some(200)); }
25.565217
72
0.636905
4ac70ea39111e1d37b2fbeecbae050f8bc30ca29
39,790
use super::*; const POS_STAND: u8 = 1; const POS_CROUCH: u8 = 2; const POS_PRONE: u8 = 3; const RUNSPEED: f32 = 0.118; const RUNSPEEDUP: f32 = RUNSPEED / 6.0; const FLYSPEED: f32 = 0.03; const JUMPSPEED: f32 = 0.66; const CROUCHRUNSPEED: f32 = RUNSPEED / 0.6; const PRONESPEED: f32 = RUNSPEED * 4.0; const ROLLSPEED: f32 = RUNSPEED / 1.2; const JUMPDIRSPEED: f32 = 0.30; const JETSPEED: f32 = 0.10; const SECOND: i32 = 60; const DEFAULT_IDLETIME: i32 = SECOND * 8; #[derive(Default, Debug)] pub struct Control { pub left: bool, pub right: bool, pub up: bool, pub down: bool, pub fire: bool, pub jets: bool, pub grenade: bool, pub change: bool, pub throw: bool, pub drop: bool, pub reload: bool, pub prone: bool, pub flag_throw: bool, pub mouse_aim_x: i32, pub mouse_aim_y: i32, pub mouse_dist: i32, pub was_running_left: bool, pub was_jumping: bool, pub was_throwing_weapon: bool, pub was_changing_weapon: bool, pub was_throwing_grenade: bool, pub was_reloading_weapon: bool, } impl Soldier { #[allow(clippy::collapsible_if)] pub fn control(&mut self, emitter: &mut Vec<EmitterItem>, gravity: f32) { let mut player_pressed_left_right = false; if self.legs_animation.speed < 1 { self.legs_animation.speed = 1; } if self.body_animation.speed < 1 { self.body_animation.speed = 1; } let (mut cleft, mut cright) = (self.control.left, self.control.right); // If both left and right directions are pressed, then decide which direction to go in if cleft && cright { // Remember that both directions were pressed, as it's useful for some moves player_pressed_left_right = true; if self.control.was_jumping { // If jumping, keep going in the old direction if self.control.was_running_left { cright = false; } else { cleft = false; } } else { // If not jumping, instead go in the new direction if self.control.was_running_left { cleft = false; } else { cright = false; } } } else { self.control.was_running_left = cleft; self.control.was_jumping = self.control.up; } let conflicting_keys_pressed = |c: &Control| (c.grenade as u8 + c.change as u8 + c.throw as u8 + c.reload as u8) > 1; // Handle simultaneous key presses that would conflict if conflicting_keys_pressed(&self.control) { // At least two buttons pressed, so deactivate any previous one if self.control.was_throwing_grenade { self.control.grenade = false; } else if self.control.was_changing_weapon { self.control.change = false; } else if self.control.was_throwing_weapon { self.control.throw = false; } else if self.control.was_reloading_weapon { self.control.reload = false; } // If simultaneously pressing two or more new buttons, then deactivate them in order // of least preference while conflicting_keys_pressed(&self.control) { if self.control.reload { self.control.reload = false; } else if self.control.change { self.control.change = false; } else if self.control.throw { self.control.throw = false; } else if self.control.grenade { self.control.grenade = false; } } } else { self.control.was_throwing_grenade = self.control.grenade; self.control.was_changing_weapon = self.control.change; self.control.was_throwing_weapon = self.control.throw; self.control.was_reloading_weapon = self.control.reload; } if self.dead_meat { self.control.free_controls(); } //self.fired = 0; self.control.mouse_aim_x = (self.control.mouse_aim_x as f32 + self.particle.velocity.x).round() as i32; self.control.mouse_aim_y = (self.control.mouse_aim_y as f32 + self.particle.velocity.y).round() as i32; if self.control.jets && (((self.legs_animation.id == Anim::JumpSide) && (((self.direction == -1) && cright) || ((self.direction == 1) && cleft) || player_pressed_left_right)) || ((self.legs_animation.id == Anim::RollBack) && self.control.up)) { self.body_apply_animation(Anim::RollBack, 1); self.legs_apply_animation(Anim::RollBack, 1); } else if self.control.jets && (self.jets_count > 0) { if self.on_ground { self.particle.force.y = -2.5 * iif!(gravity > 0.05, JETSPEED, gravity * 2.0); } else if self.position != POS_PRONE { self.particle.force.y -= iif!(gravity > 0.05, JETSPEED, gravity * 2.0); } else { self.particle.force.x += f32::from(self.direction) * iif!(gravity > 0.05, JETSPEED / 2.0, gravity); } if (self.legs_animation.id != Anim::GetUp) && (self.body_animation.id != Anim::Roll) && (self.body_animation.id != Anim::RollBack) { self.legs_apply_animation(Anim::Fall, 1); } self.jets_count -= 1; } // FIRE!!!! if self.primary_weapon().kind == WeaponKind::Chainsaw || (self.body_animation.id != Anim::Roll) && (self.body_animation.id != Anim::RollBack) && (self.body_animation.id != Anim::Melee) && (self.body_animation.id != Anim::Change) { if ((self.body_animation.id == Anim::HandsUpAim) && (self.body_animation.frame == 11)) || (self.body_animation.id != Anim::HandsUpAim) { if self.control.fire // and (SpriteC.CeaseFireCounter < 0) */ { if self.primary_weapon().kind == WeaponKind::NoWeapon || self.primary_weapon().kind == WeaponKind::Knife { self.body_apply_animation(Anim::Punch, 1); } else { self.fire(emitter); self.control.fire = false; } } } } // change weapon animation if (self.body_animation.id != Anim::Roll) && (self.body_animation.id != Anim::RollBack) { if self.control.change { self.body_apply_animation(Anim::Change, 1); } } // change weapon if self.body_animation.id == Anim::Change { if self.body_animation.frame == 2 { // TODO: play sound self.body_animation.frame += 1; } else if self.body_animation.frame == 25 { self.switch_weapon(); } else if (self.body_animation.frame == Anim::Change.num_frames()) && (self.primary_weapon().ammo_count == 0) { self.body_apply_animation(Anim::Stand, 1); } } // throw weapon if self.control.drop && (self.body_animation.id != Anim::Change || self.body_animation.frame > 25) && !self.body_animation.is_any(&[Anim::Roll, Anim::RollBack, Anim::ThrowWeapon]) // && !flamegod bonus && !self.primary_weapon().is_any( &[ WeaponKind::Bow, WeaponKind::FlameBow, WeaponKind::NoWeapon, ] ) { self.body_apply_animation(Anim::ThrowWeapon, 1); if self.primary_weapon().kind == WeaponKind::Knife { self.body_animation.speed = 2; } } // throw knife if self.body_animation.id == Anim::ThrowWeapon && self.primary_weapon().kind == WeaponKind::Knife && (!self.control.drop || self.body_animation.frame == 16) { let weapon = Weapon::new(WeaponKind::ThrownKnife, false); let aim_x = self.control.mouse_aim_x as f32; let aim_y = self.control.mouse_aim_y as f32; let dir = vec2normalize(vec2(aim_x, aim_y) - self.skeleton.pos(15)); let frame = self.body_animation.frame as f32; let thrown_mul = 1.5 * f32::min(16.0, f32::max(8.0, frame)) / 16.0; let bullet_vel = dir * weapon.speed * thrown_mul; let inherited_vel = self.particle.velocity * weapon.inherited_velocity; let velocity = bullet_vel + inherited_vel; emitter.push(EmitterItem::Bullet(BulletParams { style: weapon.bullet_style, weapon: weapon.kind, position: self.skeleton.pos(16) + velocity, velocity, timeout: weapon.timeout as i16, hit_multiply: weapon.hit_multiply, team: Team::None, sprite: weapon.bullet_sprite, })); self.control.drop = false; self.body_apply_animation(Anim::Stand, 1); } // Punch! if !self.dead_meat { if (self.body_animation.id == Anim::Punch) && (self.body_animation.frame == 11) { self.body_animation.frame += 1; } } // Buttstock! if self.dead_meat { if (self.body_animation.id == Anim::Melee) && (self.body_animation.frame == 12) { // weapons } } if self.body_animation.id == Anim::Melee && self.body_animation.frame > 20 { self.body_apply_animation(Anim::Stand, 1); } // Prone if self.control.prone { if (self.legs_animation.id != Anim::GetUp) && (self.legs_animation.id != Anim::Prone) && (self.legs_animation.id != Anim::ProneMove) { self.legs_apply_animation(Anim::Prone, 1); if (self.body_animation.id != Anim::Reload) && (self.body_animation.id != Anim::Change) && (self.body_animation.id != Anim::ThrowWeapon) { self.body_apply_animation(Anim::Prone, 1); } self.old_direction = self.direction; self.control.prone = false; } } // Get up if self.position == POS_PRONE { if self.control.prone || (self.direction != self.old_direction) { if ((self.legs_animation.id == Anim::Prone) && (self.legs_animation.frame > 23)) || (self.legs_animation.id == Anim::ProneMove) { if self.legs_animation.id != Anim::GetUp { self.legs_animation = AnimState::new(Anim::GetUp); self.legs_animation.frame = 9; self.control.prone = false; } if (self.body_animation.id != Anim::Reload) && (self.body_animation.id != Anim::Change) && (self.body_animation.id != Anim::ThrowWeapon) { self.body_apply_animation(Anim::GetUp, 9); } } } } let mut unprone = false; // Immediately switch from unprone to jump/sidejump, because the end of the unprone // animation can be seen as the "wind up" for the jump if (self.legs_animation.id == Anim::GetUp) && (self.legs_animation.frame > 23 - (4 - 1)) && self.on_ground && self.control.up && (cright || cleft) { // Set sidejump frame 1 to 4 depending on which unprone frame we're in let id = self.legs_animation.frame - (23 - (4 - 1)); self.legs_apply_animation(Anim::JumpSide, id); unprone = true; } else if (self.legs_animation.id == Anim::GetUp) && (self.legs_animation.frame > 23 - (4 - 1)) && self.on_ground && self.control.up && !(cright || cleft) { // Set jump frame 6 to 9 depending on which unprone frame we're in let id = self.legs_animation.frame - (23 - (9 - 1)); self.legs_apply_animation(Anim::Jump, id); unprone = true; } else if (self.legs_animation.id == Anim::GetUp) && (self.legs_animation.frame > 23) { if cright || cleft { if (self.direction == 1) ^ cleft { self.legs_apply_animation(Anim::Run, 1); } else { self.legs_apply_animation(Anim::RunBack, 1); } } else if !self.on_ground && self.control.up { self.legs_apply_animation(Anim::Run, 1); } else { self.legs_apply_animation(Anim::Stand, 1); } unprone = true; } if unprone { self.position = POS_STAND; if (self.body_animation.id != Anim::Reload) && (self.body_animation.id != Anim::Change) && (self.body_animation.id != Anim::ThrowWeapon) { self.body_apply_animation(Anim::Stand, 1); } } if true { // self.stat == 0 { if ((self.body_animation.id == Anim::Stand) && (self.legs_animation.id == Anim::Stand) && !self.dead_meat && (self.idle_time > 0)) || (self.idle_time > DEFAULT_IDLETIME) { if self.idle_random >= 0 { self.idle_time -= 1; } } else { self.idle_time = DEFAULT_IDLETIME; } if self.idle_random == 0 { if self.idle_time == 0 { self.body_apply_animation(Anim::Smoke, 1); self.idle_time = DEFAULT_IDLETIME; } if (self.body_animation.id == Anim::Smoke) && (self.body_animation.frame == 17) { self.body_animation.frame += 1; } if !self.dead_meat { if (self.idle_time == 1) && (self.body_animation.id != Anim::Smoke) && (self.legs_animation.id == Anim::Stand) { self.idle_time = DEFAULT_IDLETIME; self.idle_random = -1; } } } // *CHEAT* if self.legs_animation.speed > 1 { if (self.legs_animation.id == Anim::Jump) || (self.legs_animation.id == Anim::JumpSide) || (self.legs_animation.id == Anim::Roll) || (self.legs_animation.id == Anim::RollBack) || (self.legs_animation.id == Anim::Prone) || (self.legs_animation.id == Anim::Run) || (self.legs_animation.id == Anim::RunBack) { self.particle.velocity.x /= self.legs_animation.speed as f32; self.particle.velocity.y /= self.legs_animation.speed as f32; } if self.legs_animation.speed > 2 { if (self.legs_animation.id == Anim::ProneMove) || (self.legs_animation.id == Anim::CrouchRun) { self.particle.velocity.x /= self.legs_animation.speed as f32; self.particle.velocity.y /= self.legs_animation.speed as f32; } } } // TODO: Check if near collider // TODO if targetmode > freecontrols // End any ongoing idle animations if a key is pressed if (self.body_animation.id == Anim::Cigar) || (self.body_animation.id == Anim::Match) || (self.body_animation.id == Anim::Smoke) || (self.body_animation.id == Anim::Wipe) || (self.body_animation.id == Anim::Groin) { if cleft || cright || self.control.up || self.control.down || self.control.fire || self.control.jets || self.control.grenade || self.control.change || self.control.change || self.control.throw || self.control.reload || self.control.prone { self.body_animation.frame = self.body_animation.num_frames(); } } // make anims out of controls // rolling if (self.body_animation.id != Anim::TakeOff) && (self.body_animation.id != Anim::Piss) && (self.body_animation.id != Anim::Mercy) && (self.body_animation.id != Anim::Mercy2) && (self.body_animation.id != Anim::Victory) && (self.body_animation.id != Anim::Own) { if (self.body_animation.id == Anim::Roll) || (self.body_animation.id == Anim::RollBack) { if self.legs_animation.id == Anim::Roll { if self.on_ground { self.particle.force.x = f32::from(self.direction) * ROLLSPEED; } else { self.particle.force.x = f32::from(self.direction) * 2.0 * FLYSPEED; } } else if self.legs_animation.id == Anim::RollBack { if self.on_ground { self.particle.force.x = -f32::from(self.direction) * ROLLSPEED; } else { self.particle.force.x = -f32::from(self.direction) * 2.0 * FLYSPEED; } // if appropriate frames to move if (self.legs_animation.frame > 1) && (self.legs_animation.frame < 8) { if self.control.up { self.particle.force.y -= JUMPDIRSPEED * 1.5; self.particle.force.x *= 0.5; self.particle.velocity.x *= 0.8; } } } // downright } else if (cright) && (self.control.down) { if self.on_ground { // roll to the side if (self.legs_animation.id == Anim::Run) || (self.legs_animation.id == Anim::RunBack) || (self.legs_animation.id == Anim::Fall) || (self.legs_animation.id == Anim::ProneMove) || ((self.legs_animation.id == Anim::Prone) && (self.legs_animation.frame >= 24)) { if (self.legs_animation.id == Anim::ProneMove) || ((self.legs_animation.id == Anim::Prone) && (self.legs_animation.frame == self.legs_animation.num_frames())) { self.control.prone = false; self.position = POS_STAND; } if self.direction == 1 { self.body_apply_animation(Anim::Roll, 1); self.legs_animation = AnimState::new(Anim::Roll); } else { self.body_apply_animation(Anim::RollBack, 1); self.legs_animation = AnimState::new(Anim::RollBack); } self.legs_animation.frame = 1; } else if self.direction == 1 { self.legs_apply_animation(Anim::CrouchRun, 1); } else { self.legs_apply_animation(Anim::CrouchRunBack, 1); } if (self.legs_animation.id == Anim::CrouchRun) || (self.legs_animation.id == Anim::CrouchRunBack) { self.particle.force.x = CROUCHRUNSPEED; } else if (self.legs_animation.id == Anim::Roll) || (self.legs_animation.id == Anim::RollBack) { self.particle.force.x = 2.0 * CROUCHRUNSPEED; } } // downleft } else if cleft && self.control.down { if self.on_ground { // roll to the side if (self.legs_animation.id == Anim::Run) || (self.legs_animation.id == Anim::RunBack) || (self.legs_animation.id == Anim::Fall) || (self.legs_animation.id == Anim::ProneMove) || ((self.legs_animation.id == Anim::Prone) && (self.legs_animation.frame >= 24)) { if (self.legs_animation.id == Anim::ProneMove) || ((self.legs_animation.id == Anim::Prone) && (self.legs_animation.frame == self.legs_animation.num_frames())) { self.control.prone = false; self.position = POS_STAND; } if self.direction == 1 { self.body_apply_animation(Anim::RollBack, 1); self.legs_animation = AnimState::new(Anim::RollBack); } else { self.body_apply_animation(Anim::Roll, 1); self.legs_animation = AnimState::new(Anim::Roll); } self.legs_animation.frame = 1; } else if self.direction == 1 { self.legs_apply_animation(Anim::CrouchRunBack, 1); } else { self.legs_apply_animation(Anim::CrouchRun, 1); } if (self.legs_animation.id == Anim::CrouchRun) || (self.legs_animation.id == Anim::CrouchRunBack) { self.particle.force.x = -CROUCHRUNSPEED; } } // Proning } else if (self.legs_animation.id == Anim::Prone) || (self.legs_animation.id == Anim::ProneMove) || ((self.legs_animation.id == Anim::GetUp) && (self.body_animation.id != Anim::Throw) && (self.body_animation.id != Anim::Punch)) { if self.on_ground { if ((self.legs_animation.id == Anim::Prone) && (self.legs_animation.frame > 25)) || (self.legs_animation.id == Anim::ProneMove) { if cleft || cright { if (self.legs_animation.frame < 4) || (self.legs_animation.frame > 14) { self.particle.force.x = { if cleft { -PRONESPEED } else { PRONESPEED } } } self.legs_apply_animation(Anim::ProneMove, 1); if (self.body_animation.id != Anim::ClipIn) && (self.body_animation.id != Anim::ClipOut) && (self.body_animation.id != Anim::SlideBack) && (self.body_animation.id != Anim::Reload) && (self.body_animation.id != Anim::Change) && (self.body_animation.id != Anim::Throw) && (self.body_animation.id != Anim::ThrowWeapon) { self.body_apply_animation(Anim::ProneMove, 1); } if self.legs_animation.id != Anim::ProneMove { self.legs_animation = AnimState::new(Anim::ProneMove); } } else { if self.legs_animation.id != Anim::Prone { self.legs_animation = AnimState::new(Anim::Prone); } self.legs_animation.frame = 26; } } } } else if cright && self.control.up { if self.on_ground { if (self.legs_animation.id == Anim::Run) || (self.legs_animation.id == Anim::RunBack) || (self.legs_animation.id == Anim::Stand) || (self.legs_animation.id == Anim::Crouch) || (self.legs_animation.id == Anim::CrouchRun) || (self.legs_animation.id == Anim::CrouchRunBack) { self.legs_apply_animation(Anim::JumpSide, 1); } if self.legs_animation.frame == self.legs_animation.num_frames() { self.legs_apply_animation(Anim::Run, 1); } } else if (self.legs_animation.id == Anim::Roll) || (self.legs_animation.id == Anim::RollBack) { if self.direction == 1 { self.legs_apply_animation(Anim::Run, 1); } else { self.legs_apply_animation(Anim::RunBack, 1); } } if self.legs_animation.id == Anim::Jump { if self.legs_animation.frame < 10 { self.legs_apply_animation(Anim::JumpSide, 1); } } if self.legs_animation.id == Anim::JumpSide { if (self.legs_animation.frame > 3) && (self.legs_animation.frame < 11) { self.particle.force.x = JUMPDIRSPEED; self.particle.force.y = -JUMPDIRSPEED / 1.2; } } } else if cleft && self.control.up { if self.on_ground { if (self.legs_animation.id == Anim::Run) || (self.legs_animation.id == Anim::RunBack) || (self.legs_animation.id == Anim::Stand) || (self.legs_animation.id == Anim::Crouch) || (self.legs_animation.id == Anim::CrouchRun) || (self.legs_animation.id == Anim::CrouchRunBack) { self.legs_apply_animation(Anim::JumpSide, 1); } if self.legs_animation.frame == self.legs_animation.num_frames() { self.legs_apply_animation(Anim::Run, 1); } } else if (self.legs_animation.id == Anim::Roll) || (self.legs_animation.id == Anim::RollBack) { if self.direction == -1 { self.legs_apply_animation(Anim::Run, 1); } else { self.legs_apply_animation(Anim::RunBack, 1); } } if self.legs_animation.id == Anim::Jump { if self.legs_animation.frame < 10 { self.legs_apply_animation(Anim::JumpSide, 1); } } if self.legs_animation.id == Anim::JumpSide { if (self.legs_animation.frame > 3) && (self.legs_animation.frame < 11) { self.particle.force.x = -JUMPDIRSPEED; self.particle.force.y = -JUMPDIRSPEED / 1.2; } } } else if self.control.up { if self.on_ground { if self.legs_animation.id != Anim::Jump { self.legs_apply_animation(Anim::Jump, 1); } if self.legs_animation.frame == self.legs_animation.num_frames() { self.legs_apply_animation(Anim::Stand, 1); } } if self.legs_animation.id == Anim::Jump { if (self.legs_animation.frame > 8) && (self.legs_animation.frame < 15) { self.particle.force.y = -JUMPSPEED; } if self.legs_animation.frame == self.legs_animation.num_frames() { self.legs_apply_animation(Anim::Fall, 1); } } } else if self.control.down { if self.on_ground { self.legs_apply_animation(Anim::Crouch, 1); } } else if cright { if true { // if self.para = 0 if self.direction == 1 { self.legs_apply_animation(Anim::Run, 1); } else { self.legs_apply_animation(Anim::RunBack, 1); } } if self.on_ground { self.particle.force.x = RUNSPEED; self.particle.force.y = -RUNSPEEDUP; } else { self.particle.force.x = FLYSPEED; } } else if cleft { if true { // if self.para = 0 if self.direction == -1 { self.legs_apply_animation(Anim::Run, 1); } else { self.legs_apply_animation(Anim::RunBack, 1); } } if self.on_ground { self.particle.force.x = -RUNSPEED; self.particle.force.y = -RUNSPEEDUP; } else { self.particle.force.x = -FLYSPEED; } } else if self.on_ground { self.legs_apply_animation(Anim::Stand, 1); } else { self.legs_apply_animation(Anim::Fall, 1); } } // Body animations if (self.legs_animation.id == Anim::Roll) && (self.body_animation.id != Anim::Roll) { self.body_apply_animation(Anim::Roll, 1) } if (self.body_animation.id == Anim::Roll) && (self.legs_animation.id != Anim::Roll) { self.legs_apply_animation(Anim::Roll, 1) } if (self.legs_animation.id == Anim::RollBack) && (self.body_animation.id != Anim::RollBack) { self.body_apply_animation(Anim::RollBack, 1) } if (self.body_animation.id == Anim::RollBack) && (self.legs_animation.id != Anim::RollBack) { self.legs_apply_animation(Anim::RollBack, 1) } if (self.body_animation.id == Anim::Roll) || (self.body_animation.id == Anim::RollBack) { if self.legs_animation.frame != self.body_animation.frame { if self.legs_animation.frame > self.body_animation.frame { self.body_animation.frame = self.legs_animation.frame; } else { self.legs_animation.frame = self.body_animation.frame; } } } // Gracefully end a roll animation if ((self.body_animation.id == Anim::Roll) || (self.body_animation.id == Anim::RollBack)) && (self.body_animation.frame == self.body_animation.num_frames()) { // Was probably a roll if self.on_ground { if self.control.down { if cleft || cright { if self.body_animation.id == Anim::Roll { self.legs_apply_animation(Anim::CrouchRun, 1); } else { self.legs_apply_animation(Anim::CrouchRunBack, 1); } } else { self.legs_apply_animation(Anim::Crouch, 15); } } // Was probably a backflip } else if (self.body_animation.id == Anim::RollBack) && self.control.up { if cleft || cright { // Run back or forward depending on facing direction and direction key pressed if (self.direction == 1) ^ (cleft) { self.legs_apply_animation(Anim::Run, 1); } else { self.legs_apply_animation(Anim::RunBack, 1); } } else { self.legs_apply_animation(Anim::Fall, 1); } // Was probably a roll (that ended mid-air) } else if self.control.down { if cleft || cright { if self.body_animation.id == Anim::Roll { self.legs_apply_animation(Anim::CrouchRun, 1); } else { self.legs_apply_animation(Anim::CrouchRunBack, 1); } } else { self.legs_apply_animation(Anim::Crouch, 15); } } self.body_apply_animation(Anim::Stand, 1); } if (!self.control.grenade && (self.body_animation.id != Anim::Recoil) && (self.body_animation.id != Anim::SmallRecoil) && (self.body_animation.id != Anim::AimRecoil) && (self.body_animation.id != Anim::HandsUpRecoil) && (self.body_animation.id != Anim::Shotgun) && (self.body_animation.id != Anim::Barret) && (self.body_animation.id != Anim::Change) && (self.body_animation.id != Anim::ThrowWeapon) && (self.body_animation.id != Anim::WeaponNone) && (self.body_animation.id != Anim::Punch) && (self.body_animation.id != Anim::Roll) && (self.body_animation.id != Anim::RollBack) && (self.body_animation.id != Anim::ReloadBow) && (self.body_animation.id != Anim::Cigar) && (self.body_animation.id != Anim::Match) && (self.body_animation.id != Anim::Smoke) && (self.body_animation.id != Anim::Wipe) && (self.body_animation.id != Anim::TakeOff) && (self.body_animation.id != Anim::Groin) && (self.body_animation.id != Anim::Piss) && (self.body_animation.id != Anim::Mercy) && (self.body_animation.id != Anim::Mercy2) && (self.body_animation.id != Anim::Victory) && (self.body_animation.id != Anim::Own) && (self.body_animation.id != Anim::Reload) && (self.body_animation.id != Anim::Prone) && (self.body_animation.id != Anim::GetUp) && (self.body_animation.id != Anim::ProneMove) && (self.body_animation.id != Anim::Melee)) || ((self.body_animation.frame == self.body_animation.num_frames()) && (self.body_animation.id != Anim::Prone)) { if self.position != POS_PRONE { if self.position == POS_STAND { self.body_apply_animation(Anim::Stand, 1); } if self.position == POS_CROUCH { if self.collider_distance < 255 { if self.body_animation.id == Anim::HandsUpRecoil { self.body_apply_animation(Anim::HandsUpAim, 11); } else { self.body_apply_animation(Anim::HandsUpAim, 1); } } else if self.body_animation.id == Anim::AimRecoil { self.body_apply_animation(Anim::Aim, 6); } else { self.body_apply_animation(Anim::Aim, 1); } } } else { self.body_apply_animation(Anim::Prone, 26); } } if (self.legs_animation.id == Anim::Crouch) || (self.legs_animation.id == Anim::CrouchRun) || (self.legs_animation.id == Anim::CrouchRunBack) { self.position = POS_CROUCH; } else { self.position = POS_STAND; } if (self.legs_animation.id == Anim::Prone) || (self.legs_animation.id == Anim::ProneMove) { self.position = POS_PRONE; } } } } impl Control { pub fn free_controls(&mut self) { *self = Default::default(); } }
44.162042
102
0.443453
e5ff6960715ec915046673c19e3ed1c853803336
1,393
use std::{collections::HashMap, sync::Arc}; use settings_utils::apps::api::ApiSettings; use tokio::sync::Mutex; use crate::events::{EventStream, EventSubscriber}; #[derive(Clone)] pub struct MQEvents { pub events: Arc<Mutex<HashMap<String, EventSubscriber>>>, } impl MQEvents { pub async fn subscribe_on_communication_method( &self, topic: &str, settings: &ApiSettings, ) -> anyhow::Result<EventStream> { tracing::debug!("subscribe on message queue: {}", topic); let mut event_map = self.events.lock().await; match event_map.get(topic) { Some(subscriber) => { let stream = subscriber.subscribe(); Ok(stream) } None => { let kafka_events = self.events.clone(); let (subscriber, stream) = EventSubscriber::new(settings, topic, move |topic| async move { tracing::warn!("Message queue stream has closed"); // Remove topic from hashmap so next time someone ask about this stream, // it will be recreated kafka_events.lock().await.remove(&topic); }) .await?; event_map.insert(topic.into(), subscriber); Ok(stream) } } } }
32.395349
96
0.534099
b9be879f2bc1d002a6730bd1dcd796390ad66697
21,270
//! Batch loading support, used to solve N+1 problem. //! //! # Examples //! //! ```rust //! use async_graphql::*; //! use async_graphql::dataloader::*; //! use std::collections::{HashSet, HashMap}; //! use std::convert::Infallible; //! use async_graphql::dataloader::Loader; //! //! /// This loader simply converts the integer key into a string value. //! struct MyLoader; //! //! #[async_trait::async_trait] //! impl Loader<i32> for MyLoader { //! type Value = String; //! type Error = Infallible; //! //! async fn load(&self, keys: &[i32]) -> Result<HashMap<i32, Self::Value>, Self::Error> { //! // Use `MyLoader` to load data. //! Ok(keys.iter().copied().map(|n| (n, n.to_string())).collect()) //! } //! } //! //! struct Query; //! //! #[Object] //! impl Query { //! async fn value(&self, ctx: &Context<'_>, n: i32) -> Option<String> { //! ctx.data_unchecked::<DataLoader<MyLoader>>().load_one(n).await.unwrap() //! } //! } //! //! # tokio::runtime::Runtime::new().unwrap().block_on(async move { //! let schema = Schema::new(Query, EmptyMutation, EmptySubscription); //! let query = r#" //! { //! v1: value(n: 1) //! v2: value(n: 2) //! v3: value(n: 3) //! v4: value(n: 4) //! v5: value(n: 5) //! } //! "#; //! let request = Request::new(query).data(DataLoader::new(MyLoader, tokio::spawn)); //! let res = schema.execute(request).await.into_result().unwrap().data; //! //! assert_eq!(res, value!({ //! "v1": "1", //! "v2": "2", //! "v3": "3", //! "v4": "4", //! "v5": "5", //! })); //! # }); //! ``` mod cache; use std::any::{Any, TypeId}; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::hash::Hash; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; use std::time::Duration; use fnv::FnvHashMap; use futures_channel::oneshot; use futures_timer::Delay; use futures_util::future::BoxFuture; pub use cache::{CacheFactory, CacheStorage, HashMapCache, LruCache, NoCache}; #[allow(clippy::type_complexity)] struct ResSender<K: Send + Sync + Hash + Eq + Clone + 'static, T: Loader<K>> { use_cache_values: HashMap<K, T::Value>, tx: oneshot::Sender<Result<HashMap<K, T::Value>, T::Error>>, } struct Requests<K: Send + Sync + Hash + Eq + Clone + 'static, T: Loader<K>> { keys: HashSet<K>, pending: Vec<(HashSet<K>, ResSender<K, T>)>, cache_storage: Box<dyn CacheStorage<Key = K, Value = T::Value>>, disable_cache: bool, } type KeysAndSender<K, T> = (HashSet<K>, Vec<(HashSet<K>, ResSender<K, T>)>); impl<K: Send + Sync + Hash + Eq + Clone + 'static, T: Loader<K>> Requests<K, T> { fn new<C: CacheFactory>(cache_factory: &C) -> Self { Self { keys: Default::default(), pending: Vec::new(), cache_storage: cache_factory.create::<K, T::Value>(), disable_cache: false, } } fn take(&mut self) -> KeysAndSender<K, T> { ( std::mem::take(&mut self.keys), std::mem::take(&mut self.pending), ) } } /// Trait for batch loading. #[async_trait::async_trait] pub trait Loader<K: Send + Sync + Hash + Eq + Clone + 'static>: Send + Sync + 'static { /// type of value. type Value: Send + Sync + Clone + 'static; /// Type of error. type Error: Send + Clone + 'static; /// Load the data set specified by the `keys`. async fn load(&self, keys: &[K]) -> Result<HashMap<K, Self::Value>, Self::Error>; } struct DataLoaderInner<T> { requests: Mutex<FnvHashMap<TypeId, Box<dyn Any + Sync + Send>>>, loader: T, } impl<T> DataLoaderInner<T> { async fn do_load<K>(&self, disable_cache: bool, (keys, senders): KeysAndSender<K, T>) where K: Send + Sync + Hash + Eq + Clone + 'static, T: Loader<K>, { let tid = TypeId::of::<K>(); let keys = keys.into_iter().collect::<Vec<_>>(); match self.loader.load(&keys).await { Ok(values) => { // update cache let mut request = self.requests.lock().unwrap(); let typed_requests = request .get_mut(&tid) .unwrap() .downcast_mut::<Requests<K, T>>() .unwrap(); let disable_cache = typed_requests.disable_cache || disable_cache; if !disable_cache { for (key, value) in &values { typed_requests .cache_storage .insert(Cow::Borrowed(key), Cow::Borrowed(value)); } } // send response for (keys, sender) in senders { let mut res = HashMap::new(); res.extend(sender.use_cache_values); for key in &keys { res.extend(values.get(key).map(|value| (key.clone(), value.clone()))); } sender.tx.send(Ok(res)).ok(); } } Err(err) => { for (_, sender) in senders { sender.tx.send(Err(err.clone())).ok(); } } } } } /// Data loader. /// /// Reference: <https://github.com/facebook/dataloader> pub struct DataLoader<T, C = NoCache> { inner: Arc<DataLoaderInner<T>>, cache_factory: C, delay: Duration, max_batch_size: usize, disable_cache: AtomicBool, spawner: Box<dyn Fn(BoxFuture<'static, ()>) + Send + Sync>, } impl<T> DataLoader<T, NoCache> { /// Use `Loader` to create a [DataLoader] that does not cache records. pub fn new<S, R>(loader: T, spawner: S) -> Self where S: Fn(BoxFuture<'static, ()>) -> R + Send + Sync + 'static, { Self { inner: Arc::new(DataLoaderInner { requests: Mutex::new(Default::default()), loader, }), cache_factory: NoCache, delay: Duration::from_millis(1), max_batch_size: 1000, disable_cache: false.into(), spawner: Box::new(move |fut| { spawner(fut); }), } } } impl<T, C: CacheFactory> DataLoader<T, C> { /// Use `Loader` to create a [DataLoader] with a cache factory. pub fn with_cache<S, R>(loader: T, spawner: S, cache_factory: C) -> Self where S: Fn(BoxFuture<'static, ()>) -> R + Send + Sync + 'static, { Self { inner: Arc::new(DataLoaderInner { requests: Mutex::new(Default::default()), loader, }), cache_factory, delay: Duration::from_millis(1), max_batch_size: 1000, disable_cache: false.into(), spawner: Box::new(move |fut| { spawner(fut); }), } } /// Specify the delay time for loading data, the default is `1ms`. pub fn delay(self, delay: Duration) -> Self { Self { delay, ..self } } /// pub fn Specify the max batch size for loading data, the default is `1000`. /// /// If the keys waiting to be loaded reach the threshold, they are loaded immediately. pub fn max_batch_size(self, max_batch_size: usize) -> Self { Self { max_batch_size, ..self } } /// Get the loader. #[inline] pub fn loader(&self) -> &T { &self.inner.loader } /// Enable/Disable cache of all loaders. pub fn enable_all_cache(&self, enable: bool) { self.disable_cache.store(!enable, Ordering::SeqCst); } /// Enable/Disable cache of specified loader. pub fn enable_cache<K>(&self, enable: bool) where K: Send + Sync + Hash + Eq + Clone + 'static, T: Loader<K>, { let tid = TypeId::of::<K>(); let mut requests = self.inner.requests.lock().unwrap(); let typed_requests = requests .get_mut(&tid) .unwrap() .downcast_mut::<Requests<K, T>>() .unwrap(); typed_requests.disable_cache = !enable; } /// Use this `DataLoader` load a data. pub async fn load_one<K>(&self, key: K) -> Result<Option<T::Value>, T::Error> where K: Send + Sync + Hash + Eq + Clone + 'static, T: Loader<K>, { let mut values = self.load_many(std::iter::once(key.clone())).await?; Ok(values.remove(&key)) } /// Use this `DataLoader` to load some data. pub async fn load_many<K, I>(&self, keys: I) -> Result<HashMap<K, T::Value>, T::Error> where K: Send + Sync + Hash + Eq + Clone + 'static, I: IntoIterator<Item = K>, T: Loader<K>, { enum Action<K: Send + Sync + Hash + Eq + Clone + 'static, T: Loader<K>> { ImmediateLoad(KeysAndSender<K, T>), StartFetch, Delay, } let tid = TypeId::of::<K>(); let (action, rx) = { let mut requests = self.inner.requests.lock().unwrap(); let typed_requests = requests .entry(tid) .or_insert_with(|| Box::new(Requests::<K, T>::new(&self.cache_factory))) .downcast_mut::<Requests<K, T>>() .unwrap(); let prev_count = typed_requests.keys.len(); let mut keys_set = HashSet::new(); let mut use_cache_values = HashMap::new(); if typed_requests.disable_cache || self.disable_cache.load(Ordering::SeqCst) { keys_set = keys.into_iter().collect(); } else { for key in keys { if let Some(value) = typed_requests.cache_storage.get(&key) { // Already in cache use_cache_values.insert(key.clone(), value.clone()); } else { keys_set.insert(key); } } } if !use_cache_values.is_empty() && keys_set.is_empty() { return Ok(use_cache_values); } else if use_cache_values.is_empty() && keys_set.is_empty() { return Ok(Default::default()); } typed_requests.keys.extend(keys_set.clone()); let (tx, rx) = oneshot::channel(); typed_requests.pending.push(( keys_set, ResSender { use_cache_values, tx, }, )); if typed_requests.keys.len() >= self.max_batch_size { (Action::ImmediateLoad(typed_requests.take()), rx) } else { ( if !typed_requests.keys.is_empty() && prev_count == 0 { Action::StartFetch } else { Action::Delay }, rx, ) } }; match action { Action::ImmediateLoad(keys) => { let inner = self.inner.clone(); let disable_cache = self.disable_cache.load(Ordering::SeqCst); (self.spawner)(Box::pin( async move { inner.do_load(disable_cache, keys).await }, )); } Action::StartFetch => { let inner = self.inner.clone(); let disable_cache = self.disable_cache.load(Ordering::SeqCst); let delay = self.delay; (self.spawner)(Box::pin(async move { Delay::new(delay).await; let keys = { let mut request = inner.requests.lock().unwrap(); let typed_requests = request .get_mut(&tid) .unwrap() .downcast_mut::<Requests<K, T>>() .unwrap(); typed_requests.take() }; if !keys.0.is_empty() { inner.do_load(disable_cache, keys).await } })) } Action::Delay => {} } rx.await.unwrap() } /// Feed some data into the cache. /// /// **NOTE: If the cache type is [NoCache], this function will not take effect. ** pub async fn feed_many<K, I>(&self, values: I) where K: Send + Sync + Hash + Eq + Clone + 'static, I: IntoIterator<Item = (K, T::Value)>, T: Loader<K>, { let tid = TypeId::of::<K>(); let mut requests = self.inner.requests.lock().unwrap(); let typed_requests = requests .entry(tid) .or_insert_with(|| Box::new(Requests::<K, T>::new(&self.cache_factory))) .downcast_mut::<Requests<K, T>>() .unwrap(); for (key, value) in values { typed_requests .cache_storage .insert(Cow::Owned(key), Cow::Owned(value)); } } /// Feed some data into the cache. /// /// **NOTE: If the cache type is [NoCache], this function will not take effect. ** pub async fn feed_one<K>(&self, key: K, value: T::Value) where K: Send + Sync + Hash + Eq + Clone + 'static, T: Loader<K>, { self.feed_many(std::iter::once((key, value))).await; } /// Clears the cache. /// /// **NOTE: If the cache type is [NoCache], this function will not take effect. ** pub fn clear<K>(&self) where K: Send + Sync + Hash + Eq + Clone + 'static, T: Loader<K>, { let tid = TypeId::of::<K>(); let mut requests = self.inner.requests.lock().unwrap(); let typed_requests = requests .entry(tid) .or_insert_with(|| Box::new(Requests::<K, T>::new(&self.cache_factory))) .downcast_mut::<Requests<K, T>>() .unwrap(); typed_requests.cache_storage.clear(); } } #[cfg(test)] mod tests { use super::*; use fnv::FnvBuildHasher; use std::sync::Arc; struct MyLoader; #[async_trait::async_trait] impl Loader<i32> for MyLoader { type Value = i32; type Error = (); async fn load(&self, keys: &[i32]) -> Result<HashMap<i32, Self::Value>, Self::Error> { assert!(keys.len() <= 10); Ok(keys.iter().copied().map(|k| (k, k)).collect()) } } #[async_trait::async_trait] impl Loader<i64> for MyLoader { type Value = i64; type Error = (); async fn load(&self, keys: &[i64]) -> Result<HashMap<i64, Self::Value>, Self::Error> { assert!(keys.len() <= 10); Ok(keys.iter().copied().map(|k| (k, k)).collect()) } } #[tokio::test] async fn test_dataloader() { let loader = Arc::new(DataLoader::new(MyLoader, tokio::spawn).max_batch_size(10)); assert_eq!( futures_util::future::try_join_all((0..100i32).map({ let loader = loader.clone(); move |n| { let loader = loader.clone(); async move { loader.load_one(n).await } } })) .await .unwrap(), (0..100).map(Option::Some).collect::<Vec<_>>() ); assert_eq!( futures_util::future::try_join_all((0..100i64).map({ let loader = loader.clone(); move |n| { let loader = loader.clone(); async move { loader.load_one(n).await } } })) .await .unwrap(), (0..100).map(Option::Some).collect::<Vec<_>>() ); } #[tokio::test] async fn test_duplicate_keys() { let loader = Arc::new(DataLoader::new(MyLoader, tokio::spawn).max_batch_size(10)); assert_eq!( futures_util::future::try_join_all([1, 3, 5, 1, 7, 8, 3, 7].iter().copied().map({ let loader = loader.clone(); move |n| { let loader = loader.clone(); async move { loader.load_one(n).await } } })) .await .unwrap(), [1, 3, 5, 1, 7, 8, 3, 7] .iter() .copied() .map(Option::Some) .collect::<Vec<_>>() ); } #[tokio::test] async fn test_dataloader_load_empty() { let loader = DataLoader::new(MyLoader, tokio::spawn); assert!(loader.load_many::<i32, _>(vec![]).await.unwrap().is_empty()); } #[tokio::test] async fn test_dataloader_with_cache() { let loader = DataLoader::with_cache(MyLoader, tokio::spawn, HashMapCache::default()); loader.feed_many(vec![(1, 10), (2, 20), (3, 30)]).await; // All from the cache assert_eq!( loader.load_many(vec![1, 2, 3]).await.unwrap(), vec![(1, 10), (2, 20), (3, 30)].into_iter().collect() ); // Part from the cache assert_eq!( loader.load_many(vec![1, 5, 6]).await.unwrap(), vec![(1, 10), (5, 5), (6, 6)].into_iter().collect() ); // All from the loader assert_eq!( loader.load_many(vec![8, 9, 10]).await.unwrap(), vec![(8, 8), (9, 9), (10, 10)].into_iter().collect() ); // Clear cache loader.clear::<i32>(); assert_eq!( loader.load_many(vec![1, 2, 3]).await.unwrap(), vec![(1, 1), (2, 2), (3, 3)].into_iter().collect() ); } #[tokio::test] async fn test_dataloader_with_cache_hashmap_fnv() { let loader = DataLoader::with_cache( MyLoader, tokio::spawn, HashMapCache::<FnvBuildHasher>::new(), ); loader.feed_many(vec![(1, 10), (2, 20), (3, 30)]).await; // All from the cache assert_eq!( loader.load_many(vec![1, 2, 3]).await.unwrap(), vec![(1, 10), (2, 20), (3, 30)].into_iter().collect() ); // Part from the cache assert_eq!( loader.load_many(vec![1, 5, 6]).await.unwrap(), vec![(1, 10), (5, 5), (6, 6)].into_iter().collect() ); // All from the loader assert_eq!( loader.load_many(vec![8, 9, 10]).await.unwrap(), vec![(8, 8), (9, 9), (10, 10)].into_iter().collect() ); // Clear cache loader.clear::<i32>(); assert_eq!( loader.load_many(vec![1, 2, 3]).await.unwrap(), vec![(1, 1), (2, 2), (3, 3)].into_iter().collect() ); } #[tokio::test] async fn test_dataloader_disable_all_cache() { let loader = DataLoader::with_cache(MyLoader, tokio::spawn, HashMapCache::default()); loader.feed_many(vec![(1, 10), (2, 20), (3, 30)]).await; // All from the loader loader.enable_all_cache(false); assert_eq!( loader.load_many(vec![1, 2, 3]).await.unwrap(), vec![(1, 1), (2, 2), (3, 3)].into_iter().collect() ); // All from the cache loader.enable_all_cache(true); assert_eq!( loader.load_many(vec![1, 2, 3]).await.unwrap(), vec![(1, 10), (2, 20), (3, 30)].into_iter().collect() ); } #[tokio::test] async fn test_dataloader_disable_cache() { let loader = DataLoader::with_cache(MyLoader, tokio::spawn, HashMapCache::default()); loader.feed_many(vec![(1, 10), (2, 20), (3, 30)]).await; // All from the loader loader.enable_cache::<i32>(false); assert_eq!( loader.load_many(vec![1, 2, 3]).await.unwrap(), vec![(1, 1), (2, 2), (3, 3)].into_iter().collect() ); // All from the cache loader.enable_cache::<i32>(true); assert_eq!( loader.load_many(vec![1, 2, 3]).await.unwrap(), vec![(1, 10), (2, 20), (3, 30)].into_iter().collect() ); } #[tokio::test] async fn test_dataloader_dead_lock() { struct MyDelayLoader; #[async_trait::async_trait] impl Loader<i32> for MyDelayLoader { type Value = i32; type Error = (); async fn load(&self, keys: &[i32]) -> Result<HashMap<i32, Self::Value>, Self::Error> { tokio::time::sleep(Duration::from_secs(1)).await; Ok(keys.iter().copied().map(|k| (k, k)).collect()) } } let loader = Arc::new( DataLoader::with_cache(MyDelayLoader, tokio::spawn, NoCache) .delay(Duration::from_secs(1)), ); let handle = tokio::spawn({ let loader = loader.clone(); async move { loader.load_many(vec![1, 2, 3]).await.unwrap(); } }); tokio::time::sleep(Duration::from_millis(500)).await; handle.abort(); loader.load_many(vec![4, 5, 6]).await.unwrap(); } }
32.081448
98
0.497273
916f90f193aaaa1404783b026f2cca0a204dc018
3,005
#![allow(unused_imports)] use super::*; use wasm_bindgen::prelude::*; #[wasm_bindgen] extern "wasm-bindgen" { # [wasm_bindgen (extends = :: js_sys :: Object , js_name = ProfileTimelineLayerRect)] #[derive(Debug, Clone, PartialEq, Eq)] #[doc = "The `ProfileTimelineLayerRect` dictionary."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `ProfileTimelineLayerRect`*"] pub type ProfileTimelineLayerRect; } impl ProfileTimelineLayerRect { #[doc = "Construct a new `ProfileTimelineLayerRect`."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `ProfileTimelineLayerRect`*"] pub fn new() -> Self { #[allow(unused_mut)] let mut ret: Self = ::wasm_bindgen::JsCast::unchecked_into(::js_sys::Object::new()); ret } #[doc = "Change the `height` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `ProfileTimelineLayerRect`*"] pub fn height(&mut self, val: i32) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set(self.as_ref(), &JsValue::from("height"), &JsValue::from(val)); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } #[doc = "Change the `width` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `ProfileTimelineLayerRect`*"] pub fn width(&mut self, val: i32) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set(self.as_ref(), &JsValue::from("width"), &JsValue::from(val)); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } #[doc = "Change the `x` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `ProfileTimelineLayerRect`*"] pub fn x(&mut self, val: i32) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set(self.as_ref(), &JsValue::from("x"), &JsValue::from(val)); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } #[doc = "Change the `y` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `ProfileTimelineLayerRect`*"] pub fn y(&mut self, val: i32) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set(self.as_ref(), &JsValue::from("y"), &JsValue::from(val)); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } }
39.539474
107
0.592679
9ba37f78971307df1d63661267e8f83c06f7ae25
3,241
#![allow(unused_imports)] use super::*; use wasm_bindgen::prelude::*; #[cfg(web_sys_unstable_apis)] #[wasm_bindgen] extern "C" { # [wasm_bindgen (extends = :: js_sys :: Object , js_name = VideoFrameCopyToOptions)] #[derive(Debug, Clone, PartialEq, Eq)] #[doc = "The `VideoFrameCopyToOptions` dictionary."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `VideoFrameCopyToOptions`*"] #[doc = ""] #[doc = "*This API is unstable and requires `--cfg=web_sys_unstable_apis` to be activated, as"] #[doc = "[described in the `wasm-bindgen` guide](https://rustwasm.github.io/docs/wasm-bindgen/web-sys/unstable-apis.html)*"] pub type VideoFrameCopyToOptions; } #[cfg(web_sys_unstable_apis)] impl VideoFrameCopyToOptions { #[doc = "Construct a new `VideoFrameCopyToOptions`."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `VideoFrameCopyToOptions`*"] #[doc = ""] #[doc = "*This API is unstable and requires `--cfg=web_sys_unstable_apis` to be activated, as"] #[doc = "[described in the `wasm-bindgen` guide](https://rustwasm.github.io/docs/wasm-bindgen/web-sys/unstable-apis.html)*"] pub fn new() -> Self { #[allow(unused_mut)] let mut ret: Self = ::wasm_bindgen::JsCast::unchecked_into(::js_sys::Object::new()); ret } #[cfg(web_sys_unstable_apis)] #[doc = "Change the `layout` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `VideoFrameCopyToOptions`*"] #[doc = ""] #[doc = "*This API is unstable and requires `--cfg=web_sys_unstable_apis` to be activated, as"] #[doc = "[described in the `wasm-bindgen` guide](https://rustwasm.github.io/docs/wasm-bindgen/web-sys/unstable-apis.html)*"] pub fn layout(&mut self, val: &::wasm_bindgen::JsValue) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set(self.as_ref(), &JsValue::from("layout"), &JsValue::from(val)); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } #[cfg(web_sys_unstable_apis)] #[cfg(feature = "DomRectInit")] #[doc = "Change the `rect` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `DomRectInit`, `VideoFrameCopyToOptions`*"] #[doc = ""] #[doc = "*This API is unstable and requires `--cfg=web_sys_unstable_apis` to be activated, as"] #[doc = "[described in the `wasm-bindgen` guide](https://rustwasm.github.io/docs/wasm-bindgen/web-sys/unstable-apis.html)*"] pub fn rect(&mut self, val: &DomRectInit) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set(self.as_ref(), &JsValue::from("rect"), &JsValue::from(val)); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } } #[cfg(web_sys_unstable_apis)] impl Default for VideoFrameCopyToOptions { fn default() -> Self { Self::new() } }
44.39726
128
0.633138
11b706b625acda3a82fc62686572e56ec6bc3b3f
37,753
use crate::future::poll_fn; use crate::io::{AsyncRead, AsyncWrite, Interest, PollEvented, ReadBuf, Ready}; use crate::net::tcp::split::{split, ReadHalf, WriteHalf}; use crate::net::tcp::split_owned::{split_owned, OwnedReadHalf, OwnedWriteHalf}; use crate::net::{to_socket_addrs, ToSocketAddrs}; use std::convert::TryFrom; use std::fmt; use std::io; use std::net::{Shutdown, SocketAddr}; #[cfg(windows)] use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket}; #[cfg(unix)] use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd}; use std::pin::Pin; use std::task::{Context, Poll}; use std::time::Duration; cfg_io_util! { use bytes::BufMut; } cfg_net! { /// A TCP stream between a local and a remote socket. /// /// A TCP stream can either be created by connecting to an endpoint, via the /// [`connect`] method, or by [accepting] a connection from a [listener]. /// /// Reading and writing to a `TcpStream` is usually done using the /// convenience methods found on the [`AsyncReadExt`] and [`AsyncWriteExt`] /// traits. /// /// [`connect`]: method@TcpStream::connect /// [accepting]: method@crate::net::TcpListener::accept /// [listener]: struct@crate::net::TcpListener /// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt /// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// use tokio::io::AsyncWriteExt; /// use std::error::Error; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// // Connect to a peer /// let mut stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// // Write some data. /// stream.write_all(b"hello world!").await?; /// /// Ok(()) /// } /// ``` /// /// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait. /// /// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all /// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt /// /// To shut down the stream in the write direction, you can call the /// [`shutdown()`] method. This will cause the other peer to receive a read of /// length 0, indicating that no more data will be sent. This only closes /// the stream in one direction. /// /// [`shutdown()`]: fn@crate::io::AsyncWriteExt::shutdown pub struct TcpStream { io: PollEvented<mio::net::TcpStream>, } } impl TcpStream { /// Opens a TCP connection to a remote host. /// /// `addr` is an address of the remote host. Anything which implements the /// [`ToSocketAddrs`] trait can be supplied as the address. Note that /// strings only implement this trait when the **`net`** feature is enabled, /// as strings may contain domain names that need to be resolved. /// /// If `addr` yields multiple addresses, connect will be attempted with each /// of the addresses until a connection is successful. If none of the /// addresses result in a successful connection, the error returned from the /// last connection attempt (the last address) is returned. /// /// [`ToSocketAddrs`]: trait@crate::net::ToSocketAddrs /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// use tokio::io::AsyncWriteExt; /// use std::error::Error; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// // Connect to a peer /// let mut stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// // Write some data. /// stream.write_all(b"hello world!").await?; /// /// Ok(()) /// } /// ``` /// /// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait. /// /// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all /// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt pub async fn connect<A: ToSocketAddrs>(addr: A) -> io::Result<TcpStream> { let addrs = to_socket_addrs(addr).await?; let mut last_err = None; for addr in addrs { match TcpStream::connect_addr(addr).await { Ok(stream) => return Ok(stream), Err(e) => last_err = Some(e), } } Err(last_err.unwrap_or_else(|| { io::Error::new( io::ErrorKind::InvalidInput, "could not resolve to any address", ) })) } /// Establishes a connection to the specified `addr`. async fn connect_addr(addr: SocketAddr) -> io::Result<TcpStream> { let sys = mio::net::TcpStream::connect(addr)?; TcpStream::connect_mio(sys).await } pub(crate) async fn connect_mio(sys: mio::net::TcpStream) -> io::Result<TcpStream> { let stream = TcpStream::new(sys)?; // Once we've connected, wait for the stream to be writable as // that's when the actual connection has been initiated. Once we're // writable we check for `take_socket_error` to see if the connect // actually hit an error or not. // // If all that succeeded then we ship everything on up. poll_fn(|cx| stream.io.registration().poll_write_ready(cx)).await?; if let Some(e) = stream.io.take_error()? { return Err(e); } Ok(stream) } pub(crate) fn new(connected: mio::net::TcpStream) -> io::Result<TcpStream> { let io = PollEvented::new(connected)?; Ok(TcpStream { io }) } /// Creates new `TcpStream` from a `std::net::TcpStream`. /// /// This function is intended to be used to wrap a TCP stream from the /// standard library in the Tokio equivalent. The conversion assumes nothing /// about the underlying stream; it is left up to the user to set it in /// non-blocking mode. /// /// # Examples /// /// ```rust,no_run /// use std::error::Error; /// use tokio::net::TcpStream; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// let std_stream = std::net::TcpStream::connect("127.0.0.1:34254")?; /// std_stream.set_nonblocking(true)?; /// let stream = TcpStream::from_std(std_stream)?; /// Ok(()) /// } /// ``` /// /// # Panics /// /// This function panics if thread-local runtime is not set. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. pub fn from_std(stream: std::net::TcpStream) -> io::Result<TcpStream> { let io = mio::net::TcpStream::from_std(stream); let io = PollEvented::new(io)?; Ok(TcpStream { io }) } /// Turn a [`tokio::net::TcpStream`] into a [`std::net::TcpStream`]. /// /// The returned [`std::net::TcpStream`] will have `nonblocking mode` set as `true`. /// Use [`set_nonblocking`] to change the blocking mode if needed. /// /// # Examples /// /// ``` /// use std::error::Error; /// use std::io::Read; /// use tokio::net::TcpListener; /// # use tokio::net::TcpStream; /// # use tokio::io::AsyncWriteExt; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// let mut data = [0u8; 12]; /// let listener = TcpListener::bind("127.0.0.1:34254").await?; /// # let handle = tokio::spawn(async { /// # let mut stream: TcpStream = TcpStream::connect("127.0.0.1:34254").await.unwrap(); /// # stream.write(b"Hello world!").await.unwrap(); /// # }); /// let (tokio_tcp_stream, _) = listener.accept().await?; /// let mut std_tcp_stream = tokio_tcp_stream.into_std()?; /// # handle.await.expect("The task being joined has panicked"); /// std_tcp_stream.set_nonblocking(false)?; /// std_tcp_stream.read_exact(&mut data)?; /// # assert_eq!(b"Hello world!", &data); /// Ok(()) /// } /// ``` /// [`tokio::net::TcpStream`]: TcpStream /// [`std::net::TcpStream`]: std::net::TcpStream /// [`set_nonblocking`]: fn@std::net::TcpStream::set_nonblocking pub fn into_std(self) -> io::Result<std::net::TcpStream> { #[cfg(unix)] { self.io .into_inner() .map(|io| io.into_raw_fd()) .map(|raw_fd| unsafe { std::net::TcpStream::from_raw_fd(raw_fd) }) } #[cfg(windows)] { self.io .into_inner() .map(|io| io.into_raw_socket()) .map(|raw_socket| unsafe { std::net::TcpStream::from_raw_socket(raw_socket) }) } } /// Returns the local address that this stream is bound to. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { /// let stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// println!("{:?}", stream.local_addr()?); /// # Ok(()) /// # } /// ``` pub fn local_addr(&self) -> io::Result<SocketAddr> { self.io.local_addr() } /// Returns the remote address that this stream is connected to. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { /// let stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// println!("{:?}", stream.peer_addr()?); /// # Ok(()) /// # } /// ``` pub fn peer_addr(&self) -> io::Result<SocketAddr> { self.io.peer_addr() } /// Attempts to receive data on the socket, without removing that data from /// the queue, registering the current task for wakeup if data is not yet /// available. /// /// Note that on multiple calls to `poll_peek`, `poll_read` or /// `poll_read_ready`, only the `Waker` from the `Context` passed to the /// most recent call is scheduled to receive a wakeup. (However, /// `poll_write` retains a second, independent waker.) /// /// # Return value /// /// The function returns: /// /// * `Poll::Pending` if data is not yet available. /// * `Poll::Ready(Ok(n))` if data is available. `n` is the number of bytes peeked. /// * `Poll::Ready(Err(e))` if an error is encountered. /// /// # Errors /// /// This function may encounter any standard I/O error except `WouldBlock`. /// /// # Examples /// /// ```no_run /// use tokio::io::{self, ReadBuf}; /// use tokio::net::TcpStream; /// /// use futures::future::poll_fn; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let stream = TcpStream::connect("127.0.0.1:8000").await?; /// let mut buf = [0; 10]; /// let mut buf = ReadBuf::new(&mut buf); /// /// poll_fn(|cx| { /// stream.poll_peek(cx, &mut buf) /// }).await?; /// /// Ok(()) /// } /// ``` pub fn poll_peek( &self, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<usize>> { loop { let ev = ready!(self.io.registration().poll_read_ready(cx))?; let b = unsafe { &mut *(buf.unfilled_mut() as *mut [std::mem::MaybeUninit<u8>] as *mut [u8]) }; match self.io.peek(b) { Ok(ret) => { unsafe { buf.assume_init(ret) }; buf.advance(ret); return Poll::Ready(Ok(ret)); } Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { self.io.registration().clear_readiness(ev); } Err(e) => return Poll::Ready(Err(e)), } } } /// Wait for any of the requested ready states. /// /// This function is usually paired with `try_read()` or `try_write()`. It /// can be used to concurrently read / write to the same socket on a single /// task without splitting the socket. /// /// # Examples /// /// Concurrently read and write to the stream on the same task without /// splitting. /// /// ```no_run /// use tokio::io::Interest; /// use tokio::net::TcpStream; /// use std::error::Error; /// use std::io; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// let stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// loop { /// let ready = stream.ready(Interest::READABLE | Interest::WRITABLE).await?; /// /// if ready.is_readable() { /// let mut data = vec![0; 1024]; /// // Try to read data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.try_read(&mut data) { /// Ok(n) => { /// println!("read {} bytes", n); /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// /// } /// /// if ready.is_writable() { /// // Try to write data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.try_write(b"hello world") { /// Ok(n) => { /// println!("write {} bytes", n); /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// } /// } /// ``` pub async fn ready(&self, interest: Interest) -> io::Result<Ready> { let event = self.io.registration().readiness(interest).await?; Ok(event.ready) } /// Wait for the socket to become readable. /// /// This function is equivalent to `ready(Interest::READABLE)` and is usually /// paired with `try_read()`. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// use std::error::Error; /// use std::io; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// // Connect to a peer /// let stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// let mut msg = vec![0; 1024]; /// /// loop { /// // Wait for the socket to be readable /// stream.readable().await?; /// /// // Try to read data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.try_read(&mut msg) { /// Ok(n) => { /// msg.truncate(n); /// break; /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// println!("GOT = {:?}", msg); /// Ok(()) /// } /// ``` pub async fn readable(&self) -> io::Result<()> { self.ready(Interest::READABLE).await?; Ok(()) } /// Polls for read readiness. /// /// If the tcp stream is not currently ready for reading, this method will /// store a clone of the `Waker` from the provided `Context`. When the tcp /// stream becomes ready for reading, `Waker::wake` will be called on the /// waker. /// /// Note that on multiple calls to `poll_read_ready`, `poll_read` or /// `poll_peek`, only the `Waker` from the `Context` passed to the most /// recent call is scheduled to receive a wakeup. (However, /// `poll_write_ready` retains a second, independent waker.) /// /// This function is intended for cases where creating and pinning a future /// via [`readable`] is not feasible. Where possible, using [`readable`] is /// preferred, as this supports polling from multiple tasks at once. /// /// # Return value /// /// The function returns: /// /// * `Poll::Pending` if the tcp stream is not ready for reading. /// * `Poll::Ready(Ok(()))` if the tcp stream is ready for reading. /// * `Poll::Ready(Err(e))` if an error is encountered. /// /// # Errors /// /// This function may encounter any standard I/O error except `WouldBlock`. /// /// [`readable`]: method@Self::readable pub fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<()>> { self.io.registration().poll_read_ready(cx).map_ok(|_| ()) } /// Try to read data from the stream into the provided buffer, returning how /// many bytes were read. /// /// Receives any pending data from the socket but does not wait for new data /// to arrive. On success, returns the number of bytes read. Because /// `try_read()` is non-blocking, the buffer does not have to be stored by /// the async task and can exist entirely on the stack. /// /// Usually, [`readable()`] or [`ready()`] is used with this function. /// /// [`readable()`]: TcpStream::readable() /// [`ready()`]: TcpStream::ready() /// /// # Return /// /// If data is successfully read, `Ok(n)` is returned, where `n` is the /// number of bytes read. `Ok(0)` indicates the stream's read half is closed /// and will no longer yield data. If the stream is not ready to read data /// `Err(io::ErrorKind::WouldBlock)` is returned. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// use std::error::Error; /// use std::io; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// // Connect to a peer /// let stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// loop { /// // Wait for the socket to be readable /// stream.readable().await?; /// /// // Creating the buffer **after** the `await` prevents it from /// // being stored in the async task. /// let mut buf = [0; 4096]; /// /// // Try to read data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.try_read(&mut buf) { /// Ok(0) => break, /// Ok(n) => { /// println!("read {} bytes", n); /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub fn try_read(&self, buf: &mut [u8]) -> io::Result<usize> { use std::io::Read; self.io .registration() .try_io(Interest::READABLE, || (&*self.io).read(buf)) } cfg_io_util! { /// Try to read data from the stream into the provided buffer, advancing the /// buffer's internal cursor, returning how many bytes were read. /// /// Receives any pending data from the socket but does not wait for new data /// to arrive. On success, returns the number of bytes read. Because /// `try_read_buf()` is non-blocking, the buffer does not have to be stored by /// the async task and can exist entirely on the stack. /// /// Usually, [`readable()`] or [`ready()`] is used with this function. /// /// [`readable()`]: TcpStream::readable() /// [`ready()`]: TcpStream::ready() /// /// # Return /// /// If data is successfully read, `Ok(n)` is returned, where `n` is the /// number of bytes read. `Ok(0)` indicates the stream's read half is closed /// and will no longer yield data. If the stream is not ready to read data /// `Err(io::ErrorKind::WouldBlock)` is returned. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// use std::error::Error; /// use std::io; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// // Connect to a peer /// let stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// loop { /// // Wait for the socket to be readable /// stream.readable().await?; /// /// let mut buf = Vec::with_capacity(4096); /// /// // Try to read data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.try_read_buf(&mut buf) { /// Ok(0) => break, /// Ok(n) => { /// println!("read {} bytes", n); /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub fn try_read_buf<B: BufMut>(&self, buf: &mut B) -> io::Result<usize> { self.io.registration().try_io(Interest::READABLE, || { use std::io::Read; let dst = buf.chunk_mut(); let dst = unsafe { &mut *(dst as *mut _ as *mut [std::mem::MaybeUninit<u8>] as *mut [u8]) }; // Safety: We trust `TcpStream::read` to have filled up `n` bytes in the // buffer. let n = (&*self.io).read(dst)?; unsafe { buf.advance_mut(n); } Ok(n) }) } } /// Wait for the socket to become writable. /// /// This function is equivalent to `ready(Interest::WRITABLE)` and is usually /// paired with `try_write()`. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// use std::error::Error; /// use std::io; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// // Connect to a peer /// let stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// loop { /// // Wait for the socket to be writable /// stream.writable().await?; /// /// // Try to write data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.try_write(b"hello world") { /// Ok(n) => { /// break; /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub async fn writable(&self) -> io::Result<()> { self.ready(Interest::WRITABLE).await?; Ok(()) } /// Polls for write readiness. /// /// If the tcp stream is not currently ready for writing, this method will /// store a clone of the `Waker` from the provided `Context`. When the tcp /// stream becomes ready for writing, `Waker::wake` will be called on the /// waker. /// /// Note that on multiple calls to `poll_write_ready` or `poll_write`, only /// the `Waker` from the `Context` passed to the most recent call is /// scheduled to receive a wakeup. (However, `poll_read_ready` retains a /// second, independent waker.) /// /// This function is intended for cases where creating and pinning a future /// via [`writable`] is not feasible. Where possible, using [`writable`] is /// preferred, as this supports polling from multiple tasks at once. /// /// # Return value /// /// The function returns: /// /// * `Poll::Pending` if the tcp stream is not ready for writing. /// * `Poll::Ready(Ok(()))` if the tcp stream is ready for writing. /// * `Poll::Ready(Err(e))` if an error is encountered. /// /// # Errors /// /// This function may encounter any standard I/O error except `WouldBlock`. /// /// [`writable`]: method@Self::writable pub fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<()>> { self.io.registration().poll_write_ready(cx).map_ok(|_| ()) } /// Try to write a buffer to the stream, returning how many bytes were /// written. /// /// The function will attempt to write the entire contents of `buf`, but /// only part of the buffer may be written. /// /// This function is usually paired with `writable()`. /// /// # Return /// /// If data is successfully written, `Ok(n)` is returned, where `n` is the /// number of bytes written. If the stream is not ready to write data, /// `Err(io::ErrorKind::WouldBlock)` is returned. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// use std::error::Error; /// use std::io; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// // Connect to a peer /// let stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// loop { /// // Wait for the socket to be writable /// stream.writable().await?; /// /// // Try to write data, this may still fail with `WouldBlock` /// // if the readiness event is a false positive. /// match stream.try_write(b"hello world") { /// Ok(n) => { /// break; /// } /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { /// continue; /// } /// Err(e) => { /// return Err(e.into()); /// } /// } /// } /// /// Ok(()) /// } /// ``` pub fn try_write(&self, buf: &[u8]) -> io::Result<usize> { use std::io::Write; self.io .registration() .try_io(Interest::WRITABLE, || (&*self.io).write(buf)) } /// Receives data on the socket from the remote address to which it is /// connected, without removing that data from the queue. On success, /// returns the number of bytes peeked. /// /// Successive calls return the same data. This is accomplished by passing /// `MSG_PEEK` as a flag to the underlying recv system call. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// use tokio::io::AsyncReadExt; /// use std::error::Error; /// /// #[tokio::main] /// async fn main() -> Result<(), Box<dyn Error>> { /// // Connect to a peer /// let mut stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// let mut b1 = [0; 10]; /// let mut b2 = [0; 10]; /// /// // Peek at the data /// let n = stream.peek(&mut b1).await?; /// /// // Read the data /// assert_eq!(n, stream.read(&mut b2[..n]).await?); /// assert_eq!(&b1[..n], &b2[..n]); /// /// Ok(()) /// } /// ``` /// /// The [`read`] method is defined on the [`AsyncReadExt`] trait. /// /// [`read`]: fn@crate::io::AsyncReadExt::read /// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt pub async fn peek(&self, buf: &mut [u8]) -> io::Result<usize> { self.io .registration() .async_io(Interest::READABLE, || self.io.peek(buf)) .await } /// Shuts down the read, write, or both halves of this connection. /// /// This function will cause all pending and future I/O on the specified /// portions to return immediately with an appropriate value (see the /// documentation of `Shutdown`). pub(super) fn shutdown_std(&self, how: Shutdown) -> io::Result<()> { self.io.shutdown(how) } /// Gets the value of the `TCP_NODELAY` option on this socket. /// /// For more information about this option, see [`set_nodelay`]. /// /// [`set_nodelay`]: TcpStream::set_nodelay /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { /// let stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// println!("{:?}", stream.nodelay()?); /// # Ok(()) /// # } /// ``` pub fn nodelay(&self) -> io::Result<bool> { self.io.nodelay() } /// Sets the value of the `TCP_NODELAY` option on this socket. /// /// If set, this option disables the Nagle algorithm. This means that /// segments are always sent as soon as possible, even if there is only a /// small amount of data. When not set, data is buffered until there is a /// sufficient amount to send out, thereby avoiding the frequent sending of /// small packets. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { /// let stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// stream.set_nodelay(true)?; /// # Ok(()) /// # } /// ``` pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { self.io.set_nodelay(nodelay) } /// Reads the linger duration for this socket by getting the `SO_LINGER` /// option. /// /// For more information about this option, see [`set_linger`]. /// /// [`set_linger`]: TcpStream::set_linger /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { /// let stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// println!("{:?}", stream.linger()?); /// # Ok(()) /// # } /// ``` pub fn linger(&self) -> io::Result<Option<Duration>> { let mio_socket = std::mem::ManuallyDrop::new(self.to_mio()); mio_socket.get_linger() } /// Sets the linger duration of this socket by setting the SO_LINGER option. /// /// This option controls the action taken when a stream has unsent messages and the stream is /// closed. If SO_LINGER is set, the system shall block the process until it can transmit the /// data or until the time expires. /// /// If SO_LINGER is not specified, and the stream is closed, the system handles the call in a /// way that allows the process to continue as quickly as possible. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { /// let stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// stream.set_linger(None)?; /// # Ok(()) /// # } /// ``` pub fn set_linger(&self, dur: Option<Duration>) -> io::Result<()> { let mio_socket = std::mem::ManuallyDrop::new(self.to_mio()); mio_socket.set_linger(dur) } fn to_mio(&self) -> mio::net::TcpSocket { #[cfg(windows)] { unsafe { mio::net::TcpSocket::from_raw_socket(self.as_raw_socket()) } } #[cfg(unix)] { unsafe { mio::net::TcpSocket::from_raw_fd(self.as_raw_fd()) } } } /// Gets the value of the `IP_TTL` option for this socket. /// /// For more information about this option, see [`set_ttl`]. /// /// [`set_ttl`]: TcpStream::set_ttl /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { /// let stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// println!("{:?}", stream.ttl()?); /// # Ok(()) /// # } /// ``` pub fn ttl(&self) -> io::Result<u32> { self.io.ttl() } /// Sets the value for the `IP_TTL` option on this socket. /// /// This value sets the time-to-live field that is used in every packet sent /// from this socket. /// /// # Examples /// /// ```no_run /// use tokio::net::TcpStream; /// /// # async fn dox() -> Result<(), Box<dyn std::error::Error>> { /// let stream = TcpStream::connect("127.0.0.1:8080").await?; /// /// stream.set_ttl(123)?; /// # Ok(()) /// # } /// ``` pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { self.io.set_ttl(ttl) } // These lifetime markers also appear in the generated documentation, and make // it more clear that this is a *borrowed* split. #[allow(clippy::needless_lifetimes)] /// Splits a `TcpStream` into a read half and a write half, which can be used /// to read and write the stream concurrently. /// /// This method is more efficient than [`into_split`], but the halves cannot be /// moved into independently spawned tasks. /// /// [`into_split`]: TcpStream::into_split() pub fn split<'a>(&'a mut self) -> (ReadHalf<'a>, WriteHalf<'a>) { split(self) } /// Splits a `TcpStream` into a read half and a write half, which can be used /// to read and write the stream concurrently. /// /// Unlike [`split`], the owned halves can be moved to separate tasks, however /// this comes at the cost of a heap allocation. /// /// **Note:** Dropping the write half will shut down the write half of the TCP /// stream. This is equivalent to calling [`shutdown()`] on the `TcpStream`. /// /// [`split`]: TcpStream::split() /// [`shutdown()`]: fn@crate::io::AsyncWriteExt::shutdown pub fn into_split(self) -> (OwnedReadHalf, OwnedWriteHalf) { split_owned(self) } pub(crate) fn poll_read_priv( &self, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { // Safety: `TcpStream::read` correctly handles reads into uninitialized memory unsafe { self.io.poll_read(cx, buf) } } pub(super) fn poll_write_priv( &self, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { self.io.poll_write(cx, buf) } pub(super) fn poll_write_vectored_priv( &self, cx: &mut Context<'_>, bufs: &[io::IoSlice<'_>], ) -> Poll<io::Result<usize>> { self.io.poll_write_vectored(cx, bufs) } } impl TryFrom<std::net::TcpStream> for TcpStream { type Error = io::Error; /// Consumes stream, returning the tokio I/O object. /// /// This is equivalent to /// [`TcpStream::from_std(stream)`](TcpStream::from_std). fn try_from(stream: std::net::TcpStream) -> Result<Self, Self::Error> { Self::from_std(stream) } } // ===== impl Read / Write ===== impl AsyncRead for TcpStream { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { self.poll_read_priv(cx, buf) } } impl AsyncWrite for TcpStream { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { self.poll_write_priv(cx, buf) } fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[io::IoSlice<'_>], ) -> Poll<io::Result<usize>> { self.poll_write_vectored_priv(cx, bufs) } fn is_write_vectored(&self) -> bool { true } #[inline] fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { // tcp flush is a no-op Poll::Ready(Ok(())) } fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> { self.shutdown_std(std::net::Shutdown::Write)?; Poll::Ready(Ok(())) } } impl fmt::Debug for TcpStream { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.io.fmt(f) } } #[cfg(unix)] mod sys { use super::TcpStream; use std::os::unix::prelude::*; impl AsRawFd for TcpStream { fn as_raw_fd(&self) -> RawFd { self.io.as_raw_fd() } } } #[cfg(windows)] mod sys { use super::TcpStream; use std::os::windows::prelude::*; impl AsRawSocket for TcpStream { fn as_raw_socket(&self) -> RawSocket { self.io.as_raw_socket() } } }
33.558222
102
0.514635
5da6c3acbb63cd0ed058cbe51e06c45054207de4
613
/* * * * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: 1.0.0 * * Generated by: https://openapi-generator.tech */ #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LolLobbyGameflowGameData { #[serde(rename = "queue", skip_serializing_if = "Option::is_none")] pub queue: Option<crate::models::LolLobbyQueue>, } impl LolLobbyGameflowGameData { pub fn new() -> LolLobbyGameflowGameData { LolLobbyGameflowGameData { queue: None, } } }
21.137931
109
0.67863
6a1f6cc48648153531899c339dcae84c6465ae11
2,214
#[doc = "Register `TASKS_SAMPLE` writer"] pub struct W(crate::W<TASKS_SAMPLE_SPEC>); impl core::ops::Deref for W { type Target = crate::W<TASKS_SAMPLE_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<TASKS_SAMPLE_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<TASKS_SAMPLE_SPEC>) -> Self { W(writer) } } #[doc = "Field `TASKS_SAMPLE` writer - "] pub struct TASKS_SAMPLE_W<'a> { w: &'a mut W, } impl<'a> TASKS_SAMPLE_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01); self.w } } impl W { #[doc = "Bit 0"] #[inline(always)] pub fn tasks_sample(&mut self) -> TASKS_SAMPLE_W { TASKS_SAMPLE_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "Sample comparator value\n\nThis register you can [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [tasks_sample](index.html) module"] pub struct TASKS_SAMPLE_SPEC; impl crate::RegisterSpec for TASKS_SAMPLE_SPEC { type Ux = u32; } #[doc = "`write(|w| ..)` method takes [tasks_sample::W](W) writer structure"] impl crate::Writable for TASKS_SAMPLE_SPEC { type Writer = W; } #[doc = "`reset()` method sets TASKS_SAMPLE to value 0"] impl crate::Resettable for TASKS_SAMPLE_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
30.328767
338
0.604336
bbe61e7203b909d625c75cd6eb719ede89122d90
898
use std::time::Duration; use crate::test_set::{Aoj, Jury, Oj}; use scanner::Scanner; pub struct AojAldsOne14D {} impl Jury for AojAldsOne14D { type Input = (String, Vec<String>); type Output = Vec<bool>; const TL: Duration = Duration::from_millis(3000); const PROBLEM: Oj = Aoj("ALDS1_14_D"); fn parse_input(input: String) -> Self::Input { let mut input: Scanner = input.into(); let t = input.next().unwrap(); let q = input.next().unwrap(); let p = input.next_n(q).unwrap(); (t, p) } fn parse_output((_, p): &Self::Input, output: String) -> Self::Output { let mut output: Scanner = output.into(); p.into_iter() .map(|_| match output.get_line().trim() { "0" => false, "1" => true, _ => unreachable!(), }) .collect() } }
25.657143
75
0.53118
e4ac023bede5d0d2482cfa047d3f1ea35e1dadc1
124,345
// automatically generated by the FlatBuffers compiler, do not modify #![allow(dead_code)] #![allow(unused_imports)] extern crate flatbuffers; pub mod ckb { #![allow(dead_code)] #![allow(unused_imports)] use std::mem; use std::cmp::Ordering; extern crate flatbuffers; use self::flatbuffers::EndianScalar; pub mod protocol { #![allow(dead_code)] #![allow(unused_imports)] use std::mem; use std::cmp::Ordering; extern crate flatbuffers; use self::flatbuffers::EndianScalar; #[allow(non_camel_case_types)] #[repr(u8)] #[derive(Clone, Copy, PartialEq, Debug)] pub enum SyncPayload { NONE = 0, GetHeaders = 1, Headers = 2, GetBlocks = 3, Block = 4, SetFilter = 5, AddFilter = 6, ClearFilter = 7, FilteredBlock = 8, } const ENUM_MIN_SYNC_PAYLOAD: u8 = 0; const ENUM_MAX_SYNC_PAYLOAD: u8 = 8; impl<'a> flatbuffers::Follow<'a> for SyncPayload { type Inner = Self; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { flatbuffers::read_scalar_at::<Self>(buf, loc) } } impl flatbuffers::EndianScalar for SyncPayload { #[inline] fn to_little_endian(self) -> Self { let n = u8::to_le(self as u8); let p = &n as *const u8 as *const SyncPayload; unsafe { *p } } #[inline] fn from_little_endian(self) -> Self { let n = u8::from_le(self as u8); let p = &n as *const u8 as *const SyncPayload; unsafe { *p } } } impl flatbuffers::Push for SyncPayload { type Output = SyncPayload; #[inline] fn push(&self, dst: &mut [u8], _rest: &[u8]) { flatbuffers::emplace_scalar::<SyncPayload>(dst, *self); } } #[allow(non_camel_case_types)] const ENUM_VALUES_SYNC_PAYLOAD:[SyncPayload; 9] = [ SyncPayload::NONE, SyncPayload::GetHeaders, SyncPayload::Headers, SyncPayload::GetBlocks, SyncPayload::Block, SyncPayload::SetFilter, SyncPayload::AddFilter, SyncPayload::ClearFilter, SyncPayload::FilteredBlock ]; #[allow(non_camel_case_types)] const ENUM_NAMES_SYNC_PAYLOAD:[&'static str; 9] = [ "NONE", "GetHeaders", "Headers", "GetBlocks", "Block", "SetFilter", "AddFilter", "ClearFilter", "FilteredBlock" ]; pub fn enum_name_sync_payload(e: SyncPayload) -> &'static str { let index: usize = e as usize; ENUM_NAMES_SYNC_PAYLOAD[index] } pub struct SyncPayloadUnionTableOffset {} #[allow(non_camel_case_types)] #[repr(u8)] #[derive(Clone, Copy, PartialEq, Debug)] pub enum RelayPayload { NONE = 0, CompactBlock = 1, RelayTransaction = 2, RelayTransactionHash = 3, GetRelayTransaction = 4, GetBlockTransactions = 5, BlockTransactions = 6, GetBlockProposal = 7, BlockProposal = 8, } const ENUM_MIN_RELAY_PAYLOAD: u8 = 0; const ENUM_MAX_RELAY_PAYLOAD: u8 = 8; impl<'a> flatbuffers::Follow<'a> for RelayPayload { type Inner = Self; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { flatbuffers::read_scalar_at::<Self>(buf, loc) } } impl flatbuffers::EndianScalar for RelayPayload { #[inline] fn to_little_endian(self) -> Self { let n = u8::to_le(self as u8); let p = &n as *const u8 as *const RelayPayload; unsafe { *p } } #[inline] fn from_little_endian(self) -> Self { let n = u8::from_le(self as u8); let p = &n as *const u8 as *const RelayPayload; unsafe { *p } } } impl flatbuffers::Push for RelayPayload { type Output = RelayPayload; #[inline] fn push(&self, dst: &mut [u8], _rest: &[u8]) { flatbuffers::emplace_scalar::<RelayPayload>(dst, *self); } } #[allow(non_camel_case_types)] const ENUM_VALUES_RELAY_PAYLOAD:[RelayPayload; 9] = [ RelayPayload::NONE, RelayPayload::CompactBlock, RelayPayload::RelayTransaction, RelayPayload::RelayTransactionHash, RelayPayload::GetRelayTransaction, RelayPayload::GetBlockTransactions, RelayPayload::BlockTransactions, RelayPayload::GetBlockProposal, RelayPayload::BlockProposal ]; #[allow(non_camel_case_types)] const ENUM_NAMES_RELAY_PAYLOAD:[&'static str; 9] = [ "NONE", "CompactBlock", "RelayTransaction", "RelayTransactionHash", "GetRelayTransaction", "GetBlockTransactions", "BlockTransactions", "GetBlockProposal", "BlockProposal" ]; pub fn enum_name_relay_payload(e: RelayPayload) -> &'static str { let index: usize = e as usize; ENUM_NAMES_RELAY_PAYLOAD[index] } pub struct RelayPayloadUnionTableOffset {} // struct ProposalShortId, aligned to 1 #[repr(C, align(1))] #[derive(Clone, Copy, Debug, PartialEq)] pub struct ProposalShortId { u0_: u8, u1_: u8, u2_: u8, u3_: u8, u4_: u8, u5_: u8, u6_: u8, u7_: u8, u8__: u8, u9_: u8, } // pub struct ProposalShortId impl flatbuffers::SafeSliceAccess for ProposalShortId {} impl<'a> flatbuffers::Follow<'a> for ProposalShortId { type Inner = &'a ProposalShortId; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { <&'a ProposalShortId>::follow(buf, loc) } } impl<'a> flatbuffers::Follow<'a> for &'a ProposalShortId { type Inner = &'a ProposalShortId; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { flatbuffers::follow_cast_ref::<ProposalShortId>(buf, loc) } } impl<'b> flatbuffers::Push for ProposalShortId { type Output = ProposalShortId; #[inline] fn push(&self, dst: &mut [u8], _rest: &[u8]) { let src = unsafe { ::std::slice::from_raw_parts(self as *const ProposalShortId as *const u8, Self::size()) }; dst.copy_from_slice(src); } } impl<'b> flatbuffers::Push for &'b ProposalShortId { type Output = ProposalShortId; #[inline] fn push(&self, dst: &mut [u8], _rest: &[u8]) { let src = unsafe { ::std::slice::from_raw_parts(*self as *const ProposalShortId as *const u8, Self::size()) }; dst.copy_from_slice(src); } } impl ProposalShortId { pub fn new<'a>(_u0: u8, _u1: u8, _u2: u8, _u3: u8, _u4: u8, _u5: u8, _u6: u8, _u7: u8, _u8_: u8, _u9: u8) -> Self { ProposalShortId { u0_: _u0.to_little_endian(), u1_: _u1.to_little_endian(), u2_: _u2.to_little_endian(), u3_: _u3.to_little_endian(), u4_: _u4.to_little_endian(), u5_: _u5.to_little_endian(), u6_: _u6.to_little_endian(), u7_: _u7.to_little_endian(), u8__: _u8_.to_little_endian(), u9_: _u9.to_little_endian(), } } pub fn u0<'a>(&'a self) -> u8 { self.u0_.from_little_endian() } pub fn u1<'a>(&'a self) -> u8 { self.u1_.from_little_endian() } pub fn u2<'a>(&'a self) -> u8 { self.u2_.from_little_endian() } pub fn u3<'a>(&'a self) -> u8 { self.u3_.from_little_endian() } pub fn u4<'a>(&'a self) -> u8 { self.u4_.from_little_endian() } pub fn u5<'a>(&'a self) -> u8 { self.u5_.from_little_endian() } pub fn u6<'a>(&'a self) -> u8 { self.u6_.from_little_endian() } pub fn u7<'a>(&'a self) -> u8 { self.u7_.from_little_endian() } pub fn u8_<'a>(&'a self) -> u8 { self.u8__.from_little_endian() } pub fn u9<'a>(&'a self) -> u8 { self.u9_.from_little_endian() } } // struct H256, aligned to 1 #[repr(C, align(1))] #[derive(Clone, Copy, Debug, PartialEq)] pub struct H256 { u0_: u8, u1_: u8, u2_: u8, u3_: u8, u4_: u8, u5_: u8, u6_: u8, u7_: u8, u8__: u8, u9_: u8, u10_: u8, u11_: u8, u12_: u8, u13_: u8, u14_: u8, u15_: u8, u16__: u8, u17_: u8, u18_: u8, u19_: u8, u20_: u8, u21_: u8, u22_: u8, u23_: u8, u24_: u8, u25_: u8, u26_: u8, u27_: u8, u28_: u8, u29_: u8, u30_: u8, u31_: u8, } // pub struct H256 impl flatbuffers::SafeSliceAccess for H256 {} impl<'a> flatbuffers::Follow<'a> for H256 { type Inner = &'a H256; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { <&'a H256>::follow(buf, loc) } } impl<'a> flatbuffers::Follow<'a> for &'a H256 { type Inner = &'a H256; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { flatbuffers::follow_cast_ref::<H256>(buf, loc) } } impl<'b> flatbuffers::Push for H256 { type Output = H256; #[inline] fn push(&self, dst: &mut [u8], _rest: &[u8]) { let src = unsafe { ::std::slice::from_raw_parts(self as *const H256 as *const u8, Self::size()) }; dst.copy_from_slice(src); } } impl<'b> flatbuffers::Push for &'b H256 { type Output = H256; #[inline] fn push(&self, dst: &mut [u8], _rest: &[u8]) { let src = unsafe { ::std::slice::from_raw_parts(*self as *const H256 as *const u8, Self::size()) }; dst.copy_from_slice(src); } } impl H256 { pub fn new<'a>(_u0: u8, _u1: u8, _u2: u8, _u3: u8, _u4: u8, _u5: u8, _u6: u8, _u7: u8, _u8_: u8, _u9: u8, _u10: u8, _u11: u8, _u12: u8, _u13: u8, _u14: u8, _u15: u8, _u16_: u8, _u17: u8, _u18: u8, _u19: u8, _u20: u8, _u21: u8, _u22: u8, _u23: u8, _u24: u8, _u25: u8, _u26: u8, _u27: u8, _u28: u8, _u29: u8, _u30: u8, _u31: u8) -> Self { H256 { u0_: _u0.to_little_endian(), u1_: _u1.to_little_endian(), u2_: _u2.to_little_endian(), u3_: _u3.to_little_endian(), u4_: _u4.to_little_endian(), u5_: _u5.to_little_endian(), u6_: _u6.to_little_endian(), u7_: _u7.to_little_endian(), u8__: _u8_.to_little_endian(), u9_: _u9.to_little_endian(), u10_: _u10.to_little_endian(), u11_: _u11.to_little_endian(), u12_: _u12.to_little_endian(), u13_: _u13.to_little_endian(), u14_: _u14.to_little_endian(), u15_: _u15.to_little_endian(), u16__: _u16_.to_little_endian(), u17_: _u17.to_little_endian(), u18_: _u18.to_little_endian(), u19_: _u19.to_little_endian(), u20_: _u20.to_little_endian(), u21_: _u21.to_little_endian(), u22_: _u22.to_little_endian(), u23_: _u23.to_little_endian(), u24_: _u24.to_little_endian(), u25_: _u25.to_little_endian(), u26_: _u26.to_little_endian(), u27_: _u27.to_little_endian(), u28_: _u28.to_little_endian(), u29_: _u29.to_little_endian(), u30_: _u30.to_little_endian(), u31_: _u31.to_little_endian(), } } pub fn u0<'a>(&'a self) -> u8 { self.u0_.from_little_endian() } pub fn u1<'a>(&'a self) -> u8 { self.u1_.from_little_endian() } pub fn u2<'a>(&'a self) -> u8 { self.u2_.from_little_endian() } pub fn u3<'a>(&'a self) -> u8 { self.u3_.from_little_endian() } pub fn u4<'a>(&'a self) -> u8 { self.u4_.from_little_endian() } pub fn u5<'a>(&'a self) -> u8 { self.u5_.from_little_endian() } pub fn u6<'a>(&'a self) -> u8 { self.u6_.from_little_endian() } pub fn u7<'a>(&'a self) -> u8 { self.u7_.from_little_endian() } pub fn u8_<'a>(&'a self) -> u8 { self.u8__.from_little_endian() } pub fn u9<'a>(&'a self) -> u8 { self.u9_.from_little_endian() } pub fn u10<'a>(&'a self) -> u8 { self.u10_.from_little_endian() } pub fn u11<'a>(&'a self) -> u8 { self.u11_.from_little_endian() } pub fn u12<'a>(&'a self) -> u8 { self.u12_.from_little_endian() } pub fn u13<'a>(&'a self) -> u8 { self.u13_.from_little_endian() } pub fn u14<'a>(&'a self) -> u8 { self.u14_.from_little_endian() } pub fn u15<'a>(&'a self) -> u8 { self.u15_.from_little_endian() } pub fn u16_<'a>(&'a self) -> u8 { self.u16__.from_little_endian() } pub fn u17<'a>(&'a self) -> u8 { self.u17_.from_little_endian() } pub fn u18<'a>(&'a self) -> u8 { self.u18_.from_little_endian() } pub fn u19<'a>(&'a self) -> u8 { self.u19_.from_little_endian() } pub fn u20<'a>(&'a self) -> u8 { self.u20_.from_little_endian() } pub fn u21<'a>(&'a self) -> u8 { self.u21_.from_little_endian() } pub fn u22<'a>(&'a self) -> u8 { self.u22_.from_little_endian() } pub fn u23<'a>(&'a self) -> u8 { self.u23_.from_little_endian() } pub fn u24<'a>(&'a self) -> u8 { self.u24_.from_little_endian() } pub fn u25<'a>(&'a self) -> u8 { self.u25_.from_little_endian() } pub fn u26<'a>(&'a self) -> u8 { self.u26_.from_little_endian() } pub fn u27<'a>(&'a self) -> u8 { self.u27_.from_little_endian() } pub fn u28<'a>(&'a self) -> u8 { self.u28_.from_little_endian() } pub fn u29<'a>(&'a self) -> u8 { self.u29_.from_little_endian() } pub fn u30<'a>(&'a self) -> u8 { self.u30_.from_little_endian() } pub fn u31<'a>(&'a self) -> u8 { self.u31_.from_little_endian() } } pub enum SyncMessageOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct SyncMessage<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for SyncMessage<'a> { type Inner = SyncMessage<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> SyncMessage<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { SyncMessage { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args SyncMessageArgs) -> flatbuffers::WIPOffset<SyncMessage<'bldr>> { let mut builder = SyncMessageBuilder::new(_fbb); if let Some(x) = args.payload { builder.add_payload(x); } builder.add_payload_type(args.payload_type); builder.finish() } pub const VT_PAYLOAD_TYPE: flatbuffers::VOffsetT = 4; pub const VT_PAYLOAD: flatbuffers::VOffsetT = 6; #[inline] pub fn payload_type(&self) -> SyncPayload { self._tab.get::<SyncPayload>(SyncMessage::VT_PAYLOAD_TYPE, Some(SyncPayload::NONE)).unwrap() } #[inline] pub fn payload(&self) -> Option<flatbuffers::Table<'a>> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Table<'a>>>(SyncMessage::VT_PAYLOAD, None) } #[inline] #[allow(non_snake_case)] pub fn payload_as_get_headers(&'a self) -> Option<GetHeaders> { if self.payload_type() == SyncPayload::GetHeaders { self.payload().map(|u| GetHeaders::init_from_table(u)) } else { None } } #[inline] #[allow(non_snake_case)] pub fn payload_as_headers(&'a self) -> Option<Headers> { if self.payload_type() == SyncPayload::Headers { self.payload().map(|u| Headers::init_from_table(u)) } else { None } } #[inline] #[allow(non_snake_case)] pub fn payload_as_get_blocks(&'a self) -> Option<GetBlocks> { if self.payload_type() == SyncPayload::GetBlocks { self.payload().map(|u| GetBlocks::init_from_table(u)) } else { None } } #[inline] #[allow(non_snake_case)] pub fn payload_as_block(&'a self) -> Option<Block> { if self.payload_type() == SyncPayload::Block { self.payload().map(|u| Block::init_from_table(u)) } else { None } } #[inline] #[allow(non_snake_case)] pub fn payload_as_set_filter(&'a self) -> Option<SetFilter> { if self.payload_type() == SyncPayload::SetFilter { self.payload().map(|u| SetFilter::init_from_table(u)) } else { None } } #[inline] #[allow(non_snake_case)] pub fn payload_as_add_filter(&'a self) -> Option<AddFilter> { if self.payload_type() == SyncPayload::AddFilter { self.payload().map(|u| AddFilter::init_from_table(u)) } else { None } } #[inline] #[allow(non_snake_case)] pub fn payload_as_clear_filter(&'a self) -> Option<ClearFilter> { if self.payload_type() == SyncPayload::ClearFilter { self.payload().map(|u| ClearFilter::init_from_table(u)) } else { None } } #[inline] #[allow(non_snake_case)] pub fn payload_as_filtered_block(&'a self) -> Option<FilteredBlock> { if self.payload_type() == SyncPayload::FilteredBlock { self.payload().map(|u| FilteredBlock::init_from_table(u)) } else { None } } } pub struct SyncMessageArgs { pub payload_type: SyncPayload, pub payload: Option<flatbuffers::WIPOffset<flatbuffers::UnionWIPOffset>>, } impl<'a> Default for SyncMessageArgs { #[inline] fn default() -> Self { SyncMessageArgs { payload_type: SyncPayload::NONE, payload: None, } } } pub struct SyncMessageBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> SyncMessageBuilder<'a, 'b> { #[inline] pub fn add_payload_type(&mut self, payload_type: SyncPayload) { self.fbb_.push_slot::<SyncPayload>(SyncMessage::VT_PAYLOAD_TYPE, payload_type, SyncPayload::NONE); } #[inline] pub fn add_payload(&mut self, payload: flatbuffers::WIPOffset<flatbuffers::UnionWIPOffset>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(SyncMessage::VT_PAYLOAD, payload); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> SyncMessageBuilder<'a, 'b> { let start = _fbb.start_table(); SyncMessageBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<SyncMessage<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum BytesOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct Bytes<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for Bytes<'a> { type Inner = Bytes<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> Bytes<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { Bytes { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args BytesArgs<'args>) -> flatbuffers::WIPOffset<Bytes<'bldr>> { let mut builder = BytesBuilder::new(_fbb); if let Some(x) = args.seq { builder.add_seq(x); } builder.finish() } pub const VT_SEQ: flatbuffers::VOffsetT = 4; #[inline] pub fn seq(&self) -> Option<&'a [u8]> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, u8>>>(Bytes::VT_SEQ, None).map(|v| v.safe_slice()) } } pub struct BytesArgs<'a> { pub seq: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , u8>>>, } impl<'a> Default for BytesArgs<'a> { #[inline] fn default() -> Self { BytesArgs { seq: None, } } } pub struct BytesBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> BytesBuilder<'a, 'b> { #[inline] pub fn add_seq(&mut self, seq: flatbuffers::WIPOffset<flatbuffers::Vector<'b , u8>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(Bytes::VT_SEQ, seq); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> BytesBuilder<'a, 'b> { let start = _fbb.start_table(); BytesBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<Bytes<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum GetHeadersOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct GetHeaders<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for GetHeaders<'a> { type Inner = GetHeaders<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> GetHeaders<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { GetHeaders { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args GetHeadersArgs<'args>) -> flatbuffers::WIPOffset<GetHeaders<'bldr>> { let mut builder = GetHeadersBuilder::new(_fbb); if let Some(x) = args.hash_stop { builder.add_hash_stop(x); } if let Some(x) = args.block_locator_hashes { builder.add_block_locator_hashes(x); } builder.add_version(args.version); builder.finish() } pub const VT_VERSION: flatbuffers::VOffsetT = 4; pub const VT_BLOCK_LOCATOR_HASHES: flatbuffers::VOffsetT = 6; pub const VT_HASH_STOP: flatbuffers::VOffsetT = 8; #[inline] pub fn version(&self) -> u32 { self._tab.get::<u32>(GetHeaders::VT_VERSION, Some(0)).unwrap() } #[inline] pub fn block_locator_hashes(&self) -> Option<&'a [H256]> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<H256>>>(GetHeaders::VT_BLOCK_LOCATOR_HASHES, None).map(|v| v.safe_slice() ) } #[inline] pub fn hash_stop(&self) -> Option<&'a H256> { self._tab.get::<H256>(GetHeaders::VT_HASH_STOP, None) } } pub struct GetHeadersArgs<'a> { pub version: u32, pub block_locator_hashes: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , H256>>>, pub hash_stop: Option<&'a H256>, } impl<'a> Default for GetHeadersArgs<'a> { #[inline] fn default() -> Self { GetHeadersArgs { version: 0, block_locator_hashes: None, hash_stop: None, } } } pub struct GetHeadersBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> GetHeadersBuilder<'a, 'b> { #[inline] pub fn add_version(&mut self, version: u32) { self.fbb_.push_slot::<u32>(GetHeaders::VT_VERSION, version, 0); } #[inline] pub fn add_block_locator_hashes(&mut self, block_locator_hashes: flatbuffers::WIPOffset<flatbuffers::Vector<'b , H256>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(GetHeaders::VT_BLOCK_LOCATOR_HASHES, block_locator_hashes); } #[inline] pub fn add_hash_stop(&mut self, hash_stop: &'b H256) { self.fbb_.push_slot_always::<&H256>(GetHeaders::VT_HASH_STOP, hash_stop); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> GetHeadersBuilder<'a, 'b> { let start = _fbb.start_table(); GetHeadersBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<GetHeaders<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum GetBlocksOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct GetBlocks<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for GetBlocks<'a> { type Inner = GetBlocks<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> GetBlocks<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { GetBlocks { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args GetBlocksArgs<'args>) -> flatbuffers::WIPOffset<GetBlocks<'bldr>> { let mut builder = GetBlocksBuilder::new(_fbb); if let Some(x) = args.block_hashes { builder.add_block_hashes(x); } builder.finish() } pub const VT_BLOCK_HASHES: flatbuffers::VOffsetT = 4; #[inline] pub fn block_hashes(&self) -> Option<&'a [H256]> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<H256>>>(GetBlocks::VT_BLOCK_HASHES, None).map(|v| v.safe_slice() ) } } pub struct GetBlocksArgs<'a> { pub block_hashes: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , H256>>>, } impl<'a> Default for GetBlocksArgs<'a> { #[inline] fn default() -> Self { GetBlocksArgs { block_hashes: None, } } } pub struct GetBlocksBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> GetBlocksBuilder<'a, 'b> { #[inline] pub fn add_block_hashes(&mut self, block_hashes: flatbuffers::WIPOffset<flatbuffers::Vector<'b , H256>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(GetBlocks::VT_BLOCK_HASHES, block_hashes); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> GetBlocksBuilder<'a, 'b> { let start = _fbb.start_table(); GetBlocksBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<GetBlocks<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum HeadersOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct Headers<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for Headers<'a> { type Inner = Headers<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> Headers<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { Headers { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args HeadersArgs<'args>) -> flatbuffers::WIPOffset<Headers<'bldr>> { let mut builder = HeadersBuilder::new(_fbb); if let Some(x) = args.headers { builder.add_headers(x); } builder.finish() } pub const VT_HEADERS: flatbuffers::VOffsetT = 4; #[inline] pub fn headers(&self) -> Option<flatbuffers::Vector<flatbuffers::ForwardsUOffset<Header<'a>>>> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<flatbuffers::ForwardsUOffset<Header<'a>>>>>(Headers::VT_HEADERS, None) } } pub struct HeadersArgs<'a> { pub headers: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , flatbuffers::ForwardsUOffset<Header<'a >>>>>, } impl<'a> Default for HeadersArgs<'a> { #[inline] fn default() -> Self { HeadersArgs { headers: None, } } } pub struct HeadersBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> HeadersBuilder<'a, 'b> { #[inline] pub fn add_headers(&mut self, headers: flatbuffers::WIPOffset<flatbuffers::Vector<'b , flatbuffers::ForwardsUOffset<Header<'b >>>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(Headers::VT_HEADERS, headers); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> HeadersBuilder<'a, 'b> { let start = _fbb.start_table(); HeadersBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<Headers<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum HeaderOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct Header<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for Header<'a> { type Inner = Header<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> Header<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { Header { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args HeaderArgs<'args>) -> flatbuffers::WIPOffset<Header<'bldr>> { let mut builder = HeaderBuilder::new(_fbb); builder.add_epoch(args.epoch); builder.add_nonce(args.nonce); builder.add_number(args.number); builder.add_timestamp(args.timestamp); if let Some(x) = args.dao { builder.add_dao(x); } builder.add_uncles_count(args.uncles_count); if let Some(x) = args.uncles_hash { builder.add_uncles_hash(x); } if let Some(x) = args.proof { builder.add_proof(x); } if let Some(x) = args.difficulty { builder.add_difficulty(x); } if let Some(x) = args.proposals_hash { builder.add_proposals_hash(x); } if let Some(x) = args.witnesses_root { builder.add_witnesses_root(x); } if let Some(x) = args.transactions_root { builder.add_transactions_root(x); } if let Some(x) = args.parent_hash { builder.add_parent_hash(x); } builder.add_version(args.version); builder.finish() } pub const VT_VERSION: flatbuffers::VOffsetT = 4; pub const VT_PARENT_HASH: flatbuffers::VOffsetT = 6; pub const VT_TIMESTAMP: flatbuffers::VOffsetT = 8; pub const VT_NUMBER: flatbuffers::VOffsetT = 10; pub const VT_TRANSACTIONS_ROOT: flatbuffers::VOffsetT = 12; pub const VT_WITNESSES_ROOT: flatbuffers::VOffsetT = 14; pub const VT_PROPOSALS_HASH: flatbuffers::VOffsetT = 16; pub const VT_DIFFICULTY: flatbuffers::VOffsetT = 18; pub const VT_NONCE: flatbuffers::VOffsetT = 20; pub const VT_PROOF: flatbuffers::VOffsetT = 22; pub const VT_UNCLES_HASH: flatbuffers::VOffsetT = 24; pub const VT_UNCLES_COUNT: flatbuffers::VOffsetT = 26; pub const VT_EPOCH: flatbuffers::VOffsetT = 28; pub const VT_DAO: flatbuffers::VOffsetT = 30; #[inline] pub fn version(&self) -> u32 { self._tab.get::<u32>(Header::VT_VERSION, Some(0)).unwrap() } #[inline] pub fn parent_hash(&self) -> Option<&'a H256> { self._tab.get::<H256>(Header::VT_PARENT_HASH, None) } #[inline] pub fn timestamp(&self) -> u64 { self._tab.get::<u64>(Header::VT_TIMESTAMP, Some(0)).unwrap() } #[inline] pub fn number(&self) -> u64 { self._tab.get::<u64>(Header::VT_NUMBER, Some(0)).unwrap() } #[inline] pub fn transactions_root(&self) -> Option<&'a H256> { self._tab.get::<H256>(Header::VT_TRANSACTIONS_ROOT, None) } #[inline] pub fn witnesses_root(&self) -> Option<&'a H256> { self._tab.get::<H256>(Header::VT_WITNESSES_ROOT, None) } #[inline] pub fn proposals_hash(&self) -> Option<&'a H256> { self._tab.get::<H256>(Header::VT_PROPOSALS_HASH, None) } #[inline] pub fn difficulty(&self) -> Option<Bytes<'a>> { self._tab.get::<flatbuffers::ForwardsUOffset<Bytes<'a>>>(Header::VT_DIFFICULTY, None) } #[inline] pub fn nonce(&self) -> u64 { self._tab.get::<u64>(Header::VT_NONCE, Some(0)).unwrap() } #[inline] pub fn proof(&self) -> Option<Bytes<'a>> { self._tab.get::<flatbuffers::ForwardsUOffset<Bytes<'a>>>(Header::VT_PROOF, None) } #[inline] pub fn uncles_hash(&self) -> Option<&'a H256> { self._tab.get::<H256>(Header::VT_UNCLES_HASH, None) } #[inline] pub fn uncles_count(&self) -> u32 { self._tab.get::<u32>(Header::VT_UNCLES_COUNT, Some(0)).unwrap() } #[inline] pub fn epoch(&self) -> u64 { self._tab.get::<u64>(Header::VT_EPOCH, Some(0)).unwrap() } #[inline] pub fn dao(&self) -> Option<Bytes<'a>> { self._tab.get::<flatbuffers::ForwardsUOffset<Bytes<'a>>>(Header::VT_DAO, None) } } pub struct HeaderArgs<'a> { pub version: u32, pub parent_hash: Option<&'a H256>, pub timestamp: u64, pub number: u64, pub transactions_root: Option<&'a H256>, pub witnesses_root: Option<&'a H256>, pub proposals_hash: Option<&'a H256>, pub difficulty: Option<flatbuffers::WIPOffset<Bytes<'a >>>, pub nonce: u64, pub proof: Option<flatbuffers::WIPOffset<Bytes<'a >>>, pub uncles_hash: Option<&'a H256>, pub uncles_count: u32, pub epoch: u64, pub dao: Option<flatbuffers::WIPOffset<Bytes<'a >>>, } impl<'a> Default for HeaderArgs<'a> { #[inline] fn default() -> Self { HeaderArgs { version: 0, parent_hash: None, timestamp: 0, number: 0, transactions_root: None, witnesses_root: None, proposals_hash: None, difficulty: None, nonce: 0, proof: None, uncles_hash: None, uncles_count: 0, epoch: 0, dao: None, } } } pub struct HeaderBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> HeaderBuilder<'a, 'b> { #[inline] pub fn add_version(&mut self, version: u32) { self.fbb_.push_slot::<u32>(Header::VT_VERSION, version, 0); } #[inline] pub fn add_parent_hash(&mut self, parent_hash: &'b H256) { self.fbb_.push_slot_always::<&H256>(Header::VT_PARENT_HASH, parent_hash); } #[inline] pub fn add_timestamp(&mut self, timestamp: u64) { self.fbb_.push_slot::<u64>(Header::VT_TIMESTAMP, timestamp, 0); } #[inline] pub fn add_number(&mut self, number: u64) { self.fbb_.push_slot::<u64>(Header::VT_NUMBER, number, 0); } #[inline] pub fn add_transactions_root(&mut self, transactions_root: &'b H256) { self.fbb_.push_slot_always::<&H256>(Header::VT_TRANSACTIONS_ROOT, transactions_root); } #[inline] pub fn add_witnesses_root(&mut self, witnesses_root: &'b H256) { self.fbb_.push_slot_always::<&H256>(Header::VT_WITNESSES_ROOT, witnesses_root); } #[inline] pub fn add_proposals_hash(&mut self, proposals_hash: &'b H256) { self.fbb_.push_slot_always::<&H256>(Header::VT_PROPOSALS_HASH, proposals_hash); } #[inline] pub fn add_difficulty(&mut self, difficulty: flatbuffers::WIPOffset<Bytes<'b >>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<Bytes>>(Header::VT_DIFFICULTY, difficulty); } #[inline] pub fn add_nonce(&mut self, nonce: u64) { self.fbb_.push_slot::<u64>(Header::VT_NONCE, nonce, 0); } #[inline] pub fn add_proof(&mut self, proof: flatbuffers::WIPOffset<Bytes<'b >>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<Bytes>>(Header::VT_PROOF, proof); } #[inline] pub fn add_uncles_hash(&mut self, uncles_hash: &'b H256) { self.fbb_.push_slot_always::<&H256>(Header::VT_UNCLES_HASH, uncles_hash); } #[inline] pub fn add_uncles_count(&mut self, uncles_count: u32) { self.fbb_.push_slot::<u32>(Header::VT_UNCLES_COUNT, uncles_count, 0); } #[inline] pub fn add_epoch(&mut self, epoch: u64) { self.fbb_.push_slot::<u64>(Header::VT_EPOCH, epoch, 0); } #[inline] pub fn add_dao(&mut self, dao: flatbuffers::WIPOffset<Bytes<'b >>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<Bytes>>(Header::VT_DAO, dao); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> HeaderBuilder<'a, 'b> { let start = _fbb.start_table(); HeaderBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<Header<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum BlockOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct Block<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for Block<'a> { type Inner = Block<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> Block<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { Block { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args BlockArgs<'args>) -> flatbuffers::WIPOffset<Block<'bldr>> { let mut builder = BlockBuilder::new(_fbb); if let Some(x) = args.proposals { builder.add_proposals(x); } if let Some(x) = args.transactions { builder.add_transactions(x); } if let Some(x) = args.uncles { builder.add_uncles(x); } if let Some(x) = args.header { builder.add_header(x); } builder.finish() } pub const VT_HEADER: flatbuffers::VOffsetT = 4; pub const VT_UNCLES: flatbuffers::VOffsetT = 6; pub const VT_TRANSACTIONS: flatbuffers::VOffsetT = 8; pub const VT_PROPOSALS: flatbuffers::VOffsetT = 10; #[inline] pub fn header(&self) -> Option<Header<'a>> { self._tab.get::<flatbuffers::ForwardsUOffset<Header<'a>>>(Block::VT_HEADER, None) } #[inline] pub fn uncles(&self) -> Option<flatbuffers::Vector<flatbuffers::ForwardsUOffset<UncleBlock<'a>>>> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<flatbuffers::ForwardsUOffset<UncleBlock<'a>>>>>(Block::VT_UNCLES, None) } #[inline] pub fn transactions(&self) -> Option<flatbuffers::Vector<flatbuffers::ForwardsUOffset<Transaction<'a>>>> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<flatbuffers::ForwardsUOffset<Transaction<'a>>>>>(Block::VT_TRANSACTIONS, None) } #[inline] pub fn proposals(&self) -> Option<&'a [ProposalShortId]> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<ProposalShortId>>>(Block::VT_PROPOSALS, None).map(|v| v.safe_slice() ) } } pub struct BlockArgs<'a> { pub header: Option<flatbuffers::WIPOffset<Header<'a >>>, pub uncles: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , flatbuffers::ForwardsUOffset<UncleBlock<'a >>>>>, pub transactions: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , flatbuffers::ForwardsUOffset<Transaction<'a >>>>>, pub proposals: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , ProposalShortId>>>, } impl<'a> Default for BlockArgs<'a> { #[inline] fn default() -> Self { BlockArgs { header: None, uncles: None, transactions: None, proposals: None, } } } pub struct BlockBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> BlockBuilder<'a, 'b> { #[inline] pub fn add_header(&mut self, header: flatbuffers::WIPOffset<Header<'b >>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<Header>>(Block::VT_HEADER, header); } #[inline] pub fn add_uncles(&mut self, uncles: flatbuffers::WIPOffset<flatbuffers::Vector<'b , flatbuffers::ForwardsUOffset<UncleBlock<'b >>>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(Block::VT_UNCLES, uncles); } #[inline] pub fn add_transactions(&mut self, transactions: flatbuffers::WIPOffset<flatbuffers::Vector<'b , flatbuffers::ForwardsUOffset<Transaction<'b >>>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(Block::VT_TRANSACTIONS, transactions); } #[inline] pub fn add_proposals(&mut self, proposals: flatbuffers::WIPOffset<flatbuffers::Vector<'b , ProposalShortId>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(Block::VT_PROPOSALS, proposals); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> BlockBuilder<'a, 'b> { let start = _fbb.start_table(); BlockBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<Block<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum UncleBlockOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct UncleBlock<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for UncleBlock<'a> { type Inner = UncleBlock<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> UncleBlock<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { UncleBlock { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args UncleBlockArgs<'args>) -> flatbuffers::WIPOffset<UncleBlock<'bldr>> { let mut builder = UncleBlockBuilder::new(_fbb); if let Some(x) = args.proposals { builder.add_proposals(x); } if let Some(x) = args.header { builder.add_header(x); } builder.finish() } pub const VT_HEADER: flatbuffers::VOffsetT = 4; pub const VT_PROPOSALS: flatbuffers::VOffsetT = 6; #[inline] pub fn header(&self) -> Option<Header<'a>> { self._tab.get::<flatbuffers::ForwardsUOffset<Header<'a>>>(UncleBlock::VT_HEADER, None) } #[inline] pub fn proposals(&self) -> Option<&'a [ProposalShortId]> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<ProposalShortId>>>(UncleBlock::VT_PROPOSALS, None).map(|v| v.safe_slice() ) } } pub struct UncleBlockArgs<'a> { pub header: Option<flatbuffers::WIPOffset<Header<'a >>>, pub proposals: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , ProposalShortId>>>, } impl<'a> Default for UncleBlockArgs<'a> { #[inline] fn default() -> Self { UncleBlockArgs { header: None, proposals: None, } } } pub struct UncleBlockBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> UncleBlockBuilder<'a, 'b> { #[inline] pub fn add_header(&mut self, header: flatbuffers::WIPOffset<Header<'b >>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<Header>>(UncleBlock::VT_HEADER, header); } #[inline] pub fn add_proposals(&mut self, proposals: flatbuffers::WIPOffset<flatbuffers::Vector<'b , ProposalShortId>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(UncleBlock::VT_PROPOSALS, proposals); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> UncleBlockBuilder<'a, 'b> { let start = _fbb.start_table(); UncleBlockBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<UncleBlock<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum TransactionOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct Transaction<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for Transaction<'a> { type Inner = Transaction<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> Transaction<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { Transaction { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args TransactionArgs<'args>) -> flatbuffers::WIPOffset<Transaction<'bldr>> { let mut builder = TransactionBuilder::new(_fbb); if let Some(x) = args.witnesses { builder.add_witnesses(x); } if let Some(x) = args.outputs { builder.add_outputs(x); } if let Some(x) = args.inputs { builder.add_inputs(x); } if let Some(x) = args.deps { builder.add_deps(x); } builder.add_version(args.version); builder.finish() } pub const VT_VERSION: flatbuffers::VOffsetT = 4; pub const VT_DEPS: flatbuffers::VOffsetT = 6; pub const VT_INPUTS: flatbuffers::VOffsetT = 8; pub const VT_OUTPUTS: flatbuffers::VOffsetT = 10; pub const VT_WITNESSES: flatbuffers::VOffsetT = 12; #[inline] pub fn version(&self) -> u32 { self._tab.get::<u32>(Transaction::VT_VERSION, Some(0)).unwrap() } #[inline] pub fn deps(&self) -> Option<flatbuffers::Vector<flatbuffers::ForwardsUOffset<OutPoint<'a>>>> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<flatbuffers::ForwardsUOffset<OutPoint<'a>>>>>(Transaction::VT_DEPS, None) } #[inline] pub fn inputs(&self) -> Option<flatbuffers::Vector<flatbuffers::ForwardsUOffset<CellInput<'a>>>> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<flatbuffers::ForwardsUOffset<CellInput<'a>>>>>(Transaction::VT_INPUTS, None) } #[inline] pub fn outputs(&self) -> Option<flatbuffers::Vector<flatbuffers::ForwardsUOffset<CellOutput<'a>>>> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<flatbuffers::ForwardsUOffset<CellOutput<'a>>>>>(Transaction::VT_OUTPUTS, None) } #[inline] pub fn witnesses(&self) -> Option<flatbuffers::Vector<flatbuffers::ForwardsUOffset<Witness<'a>>>> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<flatbuffers::ForwardsUOffset<Witness<'a>>>>>(Transaction::VT_WITNESSES, None) } } pub struct TransactionArgs<'a> { pub version: u32, pub deps: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , flatbuffers::ForwardsUOffset<OutPoint<'a >>>>>, pub inputs: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , flatbuffers::ForwardsUOffset<CellInput<'a >>>>>, pub outputs: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , flatbuffers::ForwardsUOffset<CellOutput<'a >>>>>, pub witnesses: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , flatbuffers::ForwardsUOffset<Witness<'a >>>>>, } impl<'a> Default for TransactionArgs<'a> { #[inline] fn default() -> Self { TransactionArgs { version: 0, deps: None, inputs: None, outputs: None, witnesses: None, } } } pub struct TransactionBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> TransactionBuilder<'a, 'b> { #[inline] pub fn add_version(&mut self, version: u32) { self.fbb_.push_slot::<u32>(Transaction::VT_VERSION, version, 0); } #[inline] pub fn add_deps(&mut self, deps: flatbuffers::WIPOffset<flatbuffers::Vector<'b , flatbuffers::ForwardsUOffset<OutPoint<'b >>>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(Transaction::VT_DEPS, deps); } #[inline] pub fn add_inputs(&mut self, inputs: flatbuffers::WIPOffset<flatbuffers::Vector<'b , flatbuffers::ForwardsUOffset<CellInput<'b >>>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(Transaction::VT_INPUTS, inputs); } #[inline] pub fn add_outputs(&mut self, outputs: flatbuffers::WIPOffset<flatbuffers::Vector<'b , flatbuffers::ForwardsUOffset<CellOutput<'b >>>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(Transaction::VT_OUTPUTS, outputs); } #[inline] pub fn add_witnesses(&mut self, witnesses: flatbuffers::WIPOffset<flatbuffers::Vector<'b , flatbuffers::ForwardsUOffset<Witness<'b >>>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(Transaction::VT_WITNESSES, witnesses); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> TransactionBuilder<'a, 'b> { let start = _fbb.start_table(); TransactionBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<Transaction<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum WitnessOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct Witness<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for Witness<'a> { type Inner = Witness<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> Witness<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { Witness { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args WitnessArgs<'args>) -> flatbuffers::WIPOffset<Witness<'bldr>> { let mut builder = WitnessBuilder::new(_fbb); if let Some(x) = args.data { builder.add_data(x); } builder.finish() } pub const VT_DATA: flatbuffers::VOffsetT = 4; #[inline] pub fn data(&self) -> Option<flatbuffers::Vector<flatbuffers::ForwardsUOffset<Bytes<'a>>>> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<flatbuffers::ForwardsUOffset<Bytes<'a>>>>>(Witness::VT_DATA, None) } } pub struct WitnessArgs<'a> { pub data: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , flatbuffers::ForwardsUOffset<Bytes<'a >>>>>, } impl<'a> Default for WitnessArgs<'a> { #[inline] fn default() -> Self { WitnessArgs { data: None, } } } pub struct WitnessBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> WitnessBuilder<'a, 'b> { #[inline] pub fn add_data(&mut self, data: flatbuffers::WIPOffset<flatbuffers::Vector<'b , flatbuffers::ForwardsUOffset<Bytes<'b >>>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(Witness::VT_DATA, data); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> WitnessBuilder<'a, 'b> { let start = _fbb.start_table(); WitnessBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<Witness<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum OutPointOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct OutPoint<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for OutPoint<'a> { type Inner = OutPoint<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> OutPoint<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { OutPoint { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args OutPointArgs<'args>) -> flatbuffers::WIPOffset<OutPoint<'bldr>> { let mut builder = OutPointBuilder::new(_fbb); builder.add_index(args.index); if let Some(x) = args.tx_hash { builder.add_tx_hash(x); } if let Some(x) = args.block_hash { builder.add_block_hash(x); } builder.finish() } pub const VT_BLOCK_HASH: flatbuffers::VOffsetT = 4; pub const VT_TX_HASH: flatbuffers::VOffsetT = 6; pub const VT_INDEX: flatbuffers::VOffsetT = 8; #[inline] pub fn block_hash(&self) -> Option<&'a H256> { self._tab.get::<H256>(OutPoint::VT_BLOCK_HASH, None) } #[inline] pub fn tx_hash(&self) -> Option<&'a H256> { self._tab.get::<H256>(OutPoint::VT_TX_HASH, None) } #[inline] pub fn index(&self) -> u32 { self._tab.get::<u32>(OutPoint::VT_INDEX, Some(0)).unwrap() } } pub struct OutPointArgs<'a> { pub block_hash: Option<&'a H256>, pub tx_hash: Option<&'a H256>, pub index: u32, } impl<'a> Default for OutPointArgs<'a> { #[inline] fn default() -> Self { OutPointArgs { block_hash: None, tx_hash: None, index: 0, } } } pub struct OutPointBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> OutPointBuilder<'a, 'b> { #[inline] pub fn add_block_hash(&mut self, block_hash: &'b H256) { self.fbb_.push_slot_always::<&H256>(OutPoint::VT_BLOCK_HASH, block_hash); } #[inline] pub fn add_tx_hash(&mut self, tx_hash: &'b H256) { self.fbb_.push_slot_always::<&H256>(OutPoint::VT_TX_HASH, tx_hash); } #[inline] pub fn add_index(&mut self, index: u32) { self.fbb_.push_slot::<u32>(OutPoint::VT_INDEX, index, 0); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> OutPointBuilder<'a, 'b> { let start = _fbb.start_table(); OutPointBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<OutPoint<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum CellInputOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct CellInput<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for CellInput<'a> { type Inner = CellInput<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> CellInput<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { CellInput { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args CellInputArgs<'args>) -> flatbuffers::WIPOffset<CellInput<'bldr>> { let mut builder = CellInputBuilder::new(_fbb); builder.add_since(args.since); builder.add_index(args.index); if let Some(x) = args.tx_hash { builder.add_tx_hash(x); } if let Some(x) = args.block_hash { builder.add_block_hash(x); } builder.finish() } pub const VT_BLOCK_HASH: flatbuffers::VOffsetT = 4; pub const VT_TX_HASH: flatbuffers::VOffsetT = 6; pub const VT_INDEX: flatbuffers::VOffsetT = 8; pub const VT_SINCE: flatbuffers::VOffsetT = 10; #[inline] pub fn block_hash(&self) -> Option<&'a H256> { self._tab.get::<H256>(CellInput::VT_BLOCK_HASH, None) } #[inline] pub fn tx_hash(&self) -> Option<&'a H256> { self._tab.get::<H256>(CellInput::VT_TX_HASH, None) } #[inline] pub fn index(&self) -> u32 { self._tab.get::<u32>(CellInput::VT_INDEX, Some(0)).unwrap() } #[inline] pub fn since(&self) -> u64 { self._tab.get::<u64>(CellInput::VT_SINCE, Some(0)).unwrap() } } pub struct CellInputArgs<'a> { pub block_hash: Option<&'a H256>, pub tx_hash: Option<&'a H256>, pub index: u32, pub since: u64, } impl<'a> Default for CellInputArgs<'a> { #[inline] fn default() -> Self { CellInputArgs { block_hash: None, tx_hash: None, index: 0, since: 0, } } } pub struct CellInputBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> CellInputBuilder<'a, 'b> { #[inline] pub fn add_block_hash(&mut self, block_hash: &'b H256) { self.fbb_.push_slot_always::<&H256>(CellInput::VT_BLOCK_HASH, block_hash); } #[inline] pub fn add_tx_hash(&mut self, tx_hash: &'b H256) { self.fbb_.push_slot_always::<&H256>(CellInput::VT_TX_HASH, tx_hash); } #[inline] pub fn add_index(&mut self, index: u32) { self.fbb_.push_slot::<u32>(CellInput::VT_INDEX, index, 0); } #[inline] pub fn add_since(&mut self, since: u64) { self.fbb_.push_slot::<u64>(CellInput::VT_SINCE, since, 0); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> CellInputBuilder<'a, 'b> { let start = _fbb.start_table(); CellInputBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<CellInput<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum CellOutputOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct CellOutput<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for CellOutput<'a> { type Inner = CellOutput<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> CellOutput<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { CellOutput { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args CellOutputArgs<'args>) -> flatbuffers::WIPOffset<CellOutput<'bldr>> { let mut builder = CellOutputBuilder::new(_fbb); builder.add_capacity(args.capacity); if let Some(x) = args.type_ { builder.add_type_(x); } if let Some(x) = args.lock { builder.add_lock(x); } if let Some(x) = args.data { builder.add_data(x); } builder.finish() } pub const VT_CAPACITY: flatbuffers::VOffsetT = 4; pub const VT_DATA: flatbuffers::VOffsetT = 6; pub const VT_LOCK: flatbuffers::VOffsetT = 8; pub const VT_TYPE_: flatbuffers::VOffsetT = 10; #[inline] pub fn capacity(&self) -> u64 { self._tab.get::<u64>(CellOutput::VT_CAPACITY, Some(0)).unwrap() } #[inline] pub fn data(&self) -> Option<Bytes<'a>> { self._tab.get::<flatbuffers::ForwardsUOffset<Bytes<'a>>>(CellOutput::VT_DATA, None) } #[inline] pub fn lock(&self) -> Option<Script<'a>> { self._tab.get::<flatbuffers::ForwardsUOffset<Script<'a>>>(CellOutput::VT_LOCK, None) } #[inline] pub fn type_(&self) -> Option<Script<'a>> { self._tab.get::<flatbuffers::ForwardsUOffset<Script<'a>>>(CellOutput::VT_TYPE_, None) } } pub struct CellOutputArgs<'a> { pub capacity: u64, pub data: Option<flatbuffers::WIPOffset<Bytes<'a >>>, pub lock: Option<flatbuffers::WIPOffset<Script<'a >>>, pub type_: Option<flatbuffers::WIPOffset<Script<'a >>>, } impl<'a> Default for CellOutputArgs<'a> { #[inline] fn default() -> Self { CellOutputArgs { capacity: 0, data: None, lock: None, type_: None, } } } pub struct CellOutputBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> CellOutputBuilder<'a, 'b> { #[inline] pub fn add_capacity(&mut self, capacity: u64) { self.fbb_.push_slot::<u64>(CellOutput::VT_CAPACITY, capacity, 0); } #[inline] pub fn add_data(&mut self, data: flatbuffers::WIPOffset<Bytes<'b >>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<Bytes>>(CellOutput::VT_DATA, data); } #[inline] pub fn add_lock(&mut self, lock: flatbuffers::WIPOffset<Script<'b >>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<Script>>(CellOutput::VT_LOCK, lock); } #[inline] pub fn add_type_(&mut self, type_: flatbuffers::WIPOffset<Script<'b >>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<Script>>(CellOutput::VT_TYPE_, type_); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> CellOutputBuilder<'a, 'b> { let start = _fbb.start_table(); CellOutputBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<CellOutput<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum ScriptOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct Script<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for Script<'a> { type Inner = Script<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> Script<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { Script { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args ScriptArgs<'args>) -> flatbuffers::WIPOffset<Script<'bldr>> { let mut builder = ScriptBuilder::new(_fbb); if let Some(x) = args.code_hash { builder.add_code_hash(x); } if let Some(x) = args.args { builder.add_args(x); } builder.finish() } pub const VT_ARGS: flatbuffers::VOffsetT = 4; pub const VT_CODE_HASH: flatbuffers::VOffsetT = 6; #[inline] pub fn args(&self) -> Option<flatbuffers::Vector<flatbuffers::ForwardsUOffset<Bytes<'a>>>> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<flatbuffers::ForwardsUOffset<Bytes<'a>>>>>(Script::VT_ARGS, None) } #[inline] pub fn code_hash(&self) -> Option<&'a H256> { self._tab.get::<H256>(Script::VT_CODE_HASH, None) } } pub struct ScriptArgs<'a> { pub args: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , flatbuffers::ForwardsUOffset<Bytes<'a >>>>>, pub code_hash: Option<&'a H256>, } impl<'a> Default for ScriptArgs<'a> { #[inline] fn default() -> Self { ScriptArgs { args: None, code_hash: None, } } } pub struct ScriptBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> ScriptBuilder<'a, 'b> { #[inline] pub fn add_args(&mut self, args: flatbuffers::WIPOffset<flatbuffers::Vector<'b , flatbuffers::ForwardsUOffset<Bytes<'b >>>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(Script::VT_ARGS, args); } #[inline] pub fn add_code_hash(&mut self, code_hash: &'b H256) { self.fbb_.push_slot_always::<&H256>(Script::VT_CODE_HASH, code_hash); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> ScriptBuilder<'a, 'b> { let start = _fbb.start_table(); ScriptBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<Script<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum RelayMessageOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct RelayMessage<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for RelayMessage<'a> { type Inner = RelayMessage<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> RelayMessage<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { RelayMessage { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args RelayMessageArgs) -> flatbuffers::WIPOffset<RelayMessage<'bldr>> { let mut builder = RelayMessageBuilder::new(_fbb); if let Some(x) = args.payload { builder.add_payload(x); } builder.add_payload_type(args.payload_type); builder.finish() } pub const VT_PAYLOAD_TYPE: flatbuffers::VOffsetT = 4; pub const VT_PAYLOAD: flatbuffers::VOffsetT = 6; #[inline] pub fn payload_type(&self) -> RelayPayload { self._tab.get::<RelayPayload>(RelayMessage::VT_PAYLOAD_TYPE, Some(RelayPayload::NONE)).unwrap() } #[inline] pub fn payload(&self) -> Option<flatbuffers::Table<'a>> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Table<'a>>>(RelayMessage::VT_PAYLOAD, None) } #[inline] #[allow(non_snake_case)] pub fn payload_as_compact_block(&'a self) -> Option<CompactBlock> { if self.payload_type() == RelayPayload::CompactBlock { self.payload().map(|u| CompactBlock::init_from_table(u)) } else { None } } #[inline] #[allow(non_snake_case)] pub fn payload_as_relay_transaction(&'a self) -> Option<RelayTransaction> { if self.payload_type() == RelayPayload::RelayTransaction { self.payload().map(|u| RelayTransaction::init_from_table(u)) } else { None } } #[inline] #[allow(non_snake_case)] pub fn payload_as_relay_transaction_hash(&'a self) -> Option<RelayTransactionHash> { if self.payload_type() == RelayPayload::RelayTransactionHash { self.payload().map(|u| RelayTransactionHash::init_from_table(u)) } else { None } } #[inline] #[allow(non_snake_case)] pub fn payload_as_get_relay_transaction(&'a self) -> Option<GetRelayTransaction> { if self.payload_type() == RelayPayload::GetRelayTransaction { self.payload().map(|u| GetRelayTransaction::init_from_table(u)) } else { None } } #[inline] #[allow(non_snake_case)] pub fn payload_as_get_block_transactions(&'a self) -> Option<GetBlockTransactions> { if self.payload_type() == RelayPayload::GetBlockTransactions { self.payload().map(|u| GetBlockTransactions::init_from_table(u)) } else { None } } #[inline] #[allow(non_snake_case)] pub fn payload_as_block_transactions(&'a self) -> Option<BlockTransactions> { if self.payload_type() == RelayPayload::BlockTransactions { self.payload().map(|u| BlockTransactions::init_from_table(u)) } else { None } } #[inline] #[allow(non_snake_case)] pub fn payload_as_get_block_proposal(&'a self) -> Option<GetBlockProposal> { if self.payload_type() == RelayPayload::GetBlockProposal { self.payload().map(|u| GetBlockProposal::init_from_table(u)) } else { None } } #[inline] #[allow(non_snake_case)] pub fn payload_as_block_proposal(&'a self) -> Option<BlockProposal> { if self.payload_type() == RelayPayload::BlockProposal { self.payload().map(|u| BlockProposal::init_from_table(u)) } else { None } } } pub struct RelayMessageArgs { pub payload_type: RelayPayload, pub payload: Option<flatbuffers::WIPOffset<flatbuffers::UnionWIPOffset>>, } impl<'a> Default for RelayMessageArgs { #[inline] fn default() -> Self { RelayMessageArgs { payload_type: RelayPayload::NONE, payload: None, } } } pub struct RelayMessageBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> RelayMessageBuilder<'a, 'b> { #[inline] pub fn add_payload_type(&mut self, payload_type: RelayPayload) { self.fbb_.push_slot::<RelayPayload>(RelayMessage::VT_PAYLOAD_TYPE, payload_type, RelayPayload::NONE); } #[inline] pub fn add_payload(&mut self, payload: flatbuffers::WIPOffset<flatbuffers::UnionWIPOffset>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(RelayMessage::VT_PAYLOAD, payload); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> RelayMessageBuilder<'a, 'b> { let start = _fbb.start_table(); RelayMessageBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<RelayMessage<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum CompactBlockOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct CompactBlock<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for CompactBlock<'a> { type Inner = CompactBlock<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> CompactBlock<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { CompactBlock { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args CompactBlockArgs<'args>) -> flatbuffers::WIPOffset<CompactBlock<'bldr>> { let mut builder = CompactBlockBuilder::new(_fbb); builder.add_nonce(args.nonce); if let Some(x) = args.proposals { builder.add_proposals(x); } if let Some(x) = args.uncles { builder.add_uncles(x); } if let Some(x) = args.prefilled_transactions { builder.add_prefilled_transactions(x); } if let Some(x) = args.short_ids { builder.add_short_ids(x); } if let Some(x) = args.header { builder.add_header(x); } builder.finish() } pub const VT_HEADER: flatbuffers::VOffsetT = 4; pub const VT_NONCE: flatbuffers::VOffsetT = 6; pub const VT_SHORT_IDS: flatbuffers::VOffsetT = 8; pub const VT_PREFILLED_TRANSACTIONS: flatbuffers::VOffsetT = 10; pub const VT_UNCLES: flatbuffers::VOffsetT = 12; pub const VT_PROPOSALS: flatbuffers::VOffsetT = 14; #[inline] pub fn header(&self) -> Option<Header<'a>> { self._tab.get::<flatbuffers::ForwardsUOffset<Header<'a>>>(CompactBlock::VT_HEADER, None) } #[inline] pub fn nonce(&self) -> u64 { self._tab.get::<u64>(CompactBlock::VT_NONCE, Some(0)).unwrap() } #[inline] pub fn short_ids(&self) -> Option<flatbuffers::Vector<flatbuffers::ForwardsUOffset<Bytes<'a>>>> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<flatbuffers::ForwardsUOffset<Bytes<'a>>>>>(CompactBlock::VT_SHORT_IDS, None) } #[inline] pub fn prefilled_transactions(&self) -> Option<flatbuffers::Vector<flatbuffers::ForwardsUOffset<IndexTransaction<'a>>>> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<flatbuffers::ForwardsUOffset<IndexTransaction<'a>>>>>(CompactBlock::VT_PREFILLED_TRANSACTIONS, None) } #[inline] pub fn uncles(&self) -> Option<flatbuffers::Vector<flatbuffers::ForwardsUOffset<UncleBlock<'a>>>> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<flatbuffers::ForwardsUOffset<UncleBlock<'a>>>>>(CompactBlock::VT_UNCLES, None) } #[inline] pub fn proposals(&self) -> Option<&'a [ProposalShortId]> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<ProposalShortId>>>(CompactBlock::VT_PROPOSALS, None).map(|v| v.safe_slice() ) } } pub struct CompactBlockArgs<'a> { pub header: Option<flatbuffers::WIPOffset<Header<'a >>>, pub nonce: u64, pub short_ids: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , flatbuffers::ForwardsUOffset<Bytes<'a >>>>>, pub prefilled_transactions: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , flatbuffers::ForwardsUOffset<IndexTransaction<'a >>>>>, pub uncles: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , flatbuffers::ForwardsUOffset<UncleBlock<'a >>>>>, pub proposals: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , ProposalShortId>>>, } impl<'a> Default for CompactBlockArgs<'a> { #[inline] fn default() -> Self { CompactBlockArgs { header: None, nonce: 0, short_ids: None, prefilled_transactions: None, uncles: None, proposals: None, } } } pub struct CompactBlockBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> CompactBlockBuilder<'a, 'b> { #[inline] pub fn add_header(&mut self, header: flatbuffers::WIPOffset<Header<'b >>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<Header>>(CompactBlock::VT_HEADER, header); } #[inline] pub fn add_nonce(&mut self, nonce: u64) { self.fbb_.push_slot::<u64>(CompactBlock::VT_NONCE, nonce, 0); } #[inline] pub fn add_short_ids(&mut self, short_ids: flatbuffers::WIPOffset<flatbuffers::Vector<'b , flatbuffers::ForwardsUOffset<Bytes<'b >>>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(CompactBlock::VT_SHORT_IDS, short_ids); } #[inline] pub fn add_prefilled_transactions(&mut self, prefilled_transactions: flatbuffers::WIPOffset<flatbuffers::Vector<'b , flatbuffers::ForwardsUOffset<IndexTransaction<'b >>>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(CompactBlock::VT_PREFILLED_TRANSACTIONS, prefilled_transactions); } #[inline] pub fn add_uncles(&mut self, uncles: flatbuffers::WIPOffset<flatbuffers::Vector<'b , flatbuffers::ForwardsUOffset<UncleBlock<'b >>>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(CompactBlock::VT_UNCLES, uncles); } #[inline] pub fn add_proposals(&mut self, proposals: flatbuffers::WIPOffset<flatbuffers::Vector<'b , ProposalShortId>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(CompactBlock::VT_PROPOSALS, proposals); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> CompactBlockBuilder<'a, 'b> { let start = _fbb.start_table(); CompactBlockBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<CompactBlock<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum IndexTransactionOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct IndexTransaction<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for IndexTransaction<'a> { type Inner = IndexTransaction<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> IndexTransaction<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { IndexTransaction { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args IndexTransactionArgs<'args>) -> flatbuffers::WIPOffset<IndexTransaction<'bldr>> { let mut builder = IndexTransactionBuilder::new(_fbb); if let Some(x) = args.transaction { builder.add_transaction(x); } builder.add_index(args.index); builder.finish() } pub const VT_INDEX: flatbuffers::VOffsetT = 4; pub const VT_TRANSACTION: flatbuffers::VOffsetT = 6; #[inline] pub fn index(&self) -> u32 { self._tab.get::<u32>(IndexTransaction::VT_INDEX, Some(0)).unwrap() } #[inline] pub fn transaction(&self) -> Option<Transaction<'a>> { self._tab.get::<flatbuffers::ForwardsUOffset<Transaction<'a>>>(IndexTransaction::VT_TRANSACTION, None) } } pub struct IndexTransactionArgs<'a> { pub index: u32, pub transaction: Option<flatbuffers::WIPOffset<Transaction<'a >>>, } impl<'a> Default for IndexTransactionArgs<'a> { #[inline] fn default() -> Self { IndexTransactionArgs { index: 0, transaction: None, } } } pub struct IndexTransactionBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> IndexTransactionBuilder<'a, 'b> { #[inline] pub fn add_index(&mut self, index: u32) { self.fbb_.push_slot::<u32>(IndexTransaction::VT_INDEX, index, 0); } #[inline] pub fn add_transaction(&mut self, transaction: flatbuffers::WIPOffset<Transaction<'b >>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<Transaction>>(IndexTransaction::VT_TRANSACTION, transaction); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> IndexTransactionBuilder<'a, 'b> { let start = _fbb.start_table(); IndexTransactionBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<IndexTransaction<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum RelayTransactionHashOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct RelayTransactionHash<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for RelayTransactionHash<'a> { type Inner = RelayTransactionHash<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> RelayTransactionHash<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { RelayTransactionHash { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args RelayTransactionHashArgs<'args>) -> flatbuffers::WIPOffset<RelayTransactionHash<'bldr>> { let mut builder = RelayTransactionHashBuilder::new(_fbb); if let Some(x) = args.tx_hash { builder.add_tx_hash(x); } builder.finish() } pub const VT_TX_HASH: flatbuffers::VOffsetT = 4; #[inline] pub fn tx_hash(&self) -> Option<&'a H256> { self._tab.get::<H256>(RelayTransactionHash::VT_TX_HASH, None) } } pub struct RelayTransactionHashArgs<'a> { pub tx_hash: Option<&'a H256>, } impl<'a> Default for RelayTransactionHashArgs<'a> { #[inline] fn default() -> Self { RelayTransactionHashArgs { tx_hash: None, } } } pub struct RelayTransactionHashBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> RelayTransactionHashBuilder<'a, 'b> { #[inline] pub fn add_tx_hash(&mut self, tx_hash: &'b H256) { self.fbb_.push_slot_always::<&H256>(RelayTransactionHash::VT_TX_HASH, tx_hash); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> RelayTransactionHashBuilder<'a, 'b> { let start = _fbb.start_table(); RelayTransactionHashBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<RelayTransactionHash<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum GetRelayTransactionOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct GetRelayTransaction<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for GetRelayTransaction<'a> { type Inner = GetRelayTransaction<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> GetRelayTransaction<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { GetRelayTransaction { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args GetRelayTransactionArgs<'args>) -> flatbuffers::WIPOffset<GetRelayTransaction<'bldr>> { let mut builder = GetRelayTransactionBuilder::new(_fbb); if let Some(x) = args.tx_hash { builder.add_tx_hash(x); } builder.finish() } pub const VT_TX_HASH: flatbuffers::VOffsetT = 4; #[inline] pub fn tx_hash(&self) -> Option<&'a H256> { self._tab.get::<H256>(GetRelayTransaction::VT_TX_HASH, None) } } pub struct GetRelayTransactionArgs<'a> { pub tx_hash: Option<&'a H256>, } impl<'a> Default for GetRelayTransactionArgs<'a> { #[inline] fn default() -> Self { GetRelayTransactionArgs { tx_hash: None, } } } pub struct GetRelayTransactionBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> GetRelayTransactionBuilder<'a, 'b> { #[inline] pub fn add_tx_hash(&mut self, tx_hash: &'b H256) { self.fbb_.push_slot_always::<&H256>(GetRelayTransaction::VT_TX_HASH, tx_hash); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> GetRelayTransactionBuilder<'a, 'b> { let start = _fbb.start_table(); GetRelayTransactionBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<GetRelayTransaction<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum RelayTransactionOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct RelayTransaction<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for RelayTransaction<'a> { type Inner = RelayTransaction<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> RelayTransaction<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { RelayTransaction { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args RelayTransactionArgs<'args>) -> flatbuffers::WIPOffset<RelayTransaction<'bldr>> { let mut builder = RelayTransactionBuilder::new(_fbb); builder.add_cycles(args.cycles); if let Some(x) = args.transaction { builder.add_transaction(x); } builder.finish() } pub const VT_CYCLES: flatbuffers::VOffsetT = 4; pub const VT_TRANSACTION: flatbuffers::VOffsetT = 6; #[inline] pub fn cycles(&self) -> u64 { self._tab.get::<u64>(RelayTransaction::VT_CYCLES, Some(0)).unwrap() } #[inline] pub fn transaction(&self) -> Option<Transaction<'a>> { self._tab.get::<flatbuffers::ForwardsUOffset<Transaction<'a>>>(RelayTransaction::VT_TRANSACTION, None) } } pub struct RelayTransactionArgs<'a> { pub cycles: u64, pub transaction: Option<flatbuffers::WIPOffset<Transaction<'a >>>, } impl<'a> Default for RelayTransactionArgs<'a> { #[inline] fn default() -> Self { RelayTransactionArgs { cycles: 0, transaction: None, } } } pub struct RelayTransactionBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> RelayTransactionBuilder<'a, 'b> { #[inline] pub fn add_cycles(&mut self, cycles: u64) { self.fbb_.push_slot::<u64>(RelayTransaction::VT_CYCLES, cycles, 0); } #[inline] pub fn add_transaction(&mut self, transaction: flatbuffers::WIPOffset<Transaction<'b >>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<Transaction>>(RelayTransaction::VT_TRANSACTION, transaction); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> RelayTransactionBuilder<'a, 'b> { let start = _fbb.start_table(); RelayTransactionBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<RelayTransaction<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum GetBlockTransactionsOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct GetBlockTransactions<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for GetBlockTransactions<'a> { type Inner = GetBlockTransactions<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> GetBlockTransactions<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { GetBlockTransactions { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args GetBlockTransactionsArgs<'args>) -> flatbuffers::WIPOffset<GetBlockTransactions<'bldr>> { let mut builder = GetBlockTransactionsBuilder::new(_fbb); if let Some(x) = args.indexes { builder.add_indexes(x); } if let Some(x) = args.block_hash { builder.add_block_hash(x); } builder.finish() } pub const VT_BLOCK_HASH: flatbuffers::VOffsetT = 4; pub const VT_INDEXES: flatbuffers::VOffsetT = 6; #[inline] pub fn block_hash(&self) -> Option<&'a H256> { self._tab.get::<H256>(GetBlockTransactions::VT_BLOCK_HASH, None) } #[inline] pub fn indexes(&self) -> Option<flatbuffers::Vector<'a, u32>> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, u32>>>(GetBlockTransactions::VT_INDEXES, None) } } pub struct GetBlockTransactionsArgs<'a> { pub block_hash: Option<&'a H256>, pub indexes: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , u32>>>, } impl<'a> Default for GetBlockTransactionsArgs<'a> { #[inline] fn default() -> Self { GetBlockTransactionsArgs { block_hash: None, indexes: None, } } } pub struct GetBlockTransactionsBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> GetBlockTransactionsBuilder<'a, 'b> { #[inline] pub fn add_block_hash(&mut self, block_hash: &'b H256) { self.fbb_.push_slot_always::<&H256>(GetBlockTransactions::VT_BLOCK_HASH, block_hash); } #[inline] pub fn add_indexes(&mut self, indexes: flatbuffers::WIPOffset<flatbuffers::Vector<'b , u32>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(GetBlockTransactions::VT_INDEXES, indexes); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> GetBlockTransactionsBuilder<'a, 'b> { let start = _fbb.start_table(); GetBlockTransactionsBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<GetBlockTransactions<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum BlockTransactionsOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct BlockTransactions<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for BlockTransactions<'a> { type Inner = BlockTransactions<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> BlockTransactions<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { BlockTransactions { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args BlockTransactionsArgs<'args>) -> flatbuffers::WIPOffset<BlockTransactions<'bldr>> { let mut builder = BlockTransactionsBuilder::new(_fbb); if let Some(x) = args.transactions { builder.add_transactions(x); } if let Some(x) = args.block_hash { builder.add_block_hash(x); } builder.finish() } pub const VT_BLOCK_HASH: flatbuffers::VOffsetT = 4; pub const VT_TRANSACTIONS: flatbuffers::VOffsetT = 6; #[inline] pub fn block_hash(&self) -> Option<&'a H256> { self._tab.get::<H256>(BlockTransactions::VT_BLOCK_HASH, None) } #[inline] pub fn transactions(&self) -> Option<flatbuffers::Vector<flatbuffers::ForwardsUOffset<Transaction<'a>>>> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<flatbuffers::ForwardsUOffset<Transaction<'a>>>>>(BlockTransactions::VT_TRANSACTIONS, None) } } pub struct BlockTransactionsArgs<'a> { pub block_hash: Option<&'a H256>, pub transactions: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , flatbuffers::ForwardsUOffset<Transaction<'a >>>>>, } impl<'a> Default for BlockTransactionsArgs<'a> { #[inline] fn default() -> Self { BlockTransactionsArgs { block_hash: None, transactions: None, } } } pub struct BlockTransactionsBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> BlockTransactionsBuilder<'a, 'b> { #[inline] pub fn add_block_hash(&mut self, block_hash: &'b H256) { self.fbb_.push_slot_always::<&H256>(BlockTransactions::VT_BLOCK_HASH, block_hash); } #[inline] pub fn add_transactions(&mut self, transactions: flatbuffers::WIPOffset<flatbuffers::Vector<'b , flatbuffers::ForwardsUOffset<Transaction<'b >>>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(BlockTransactions::VT_TRANSACTIONS, transactions); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> BlockTransactionsBuilder<'a, 'b> { let start = _fbb.start_table(); BlockTransactionsBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<BlockTransactions<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum GetBlockProposalOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct GetBlockProposal<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for GetBlockProposal<'a> { type Inner = GetBlockProposal<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> GetBlockProposal<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { GetBlockProposal { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args GetBlockProposalArgs<'args>) -> flatbuffers::WIPOffset<GetBlockProposal<'bldr>> { let mut builder = GetBlockProposalBuilder::new(_fbb); builder.add_block_number(args.block_number); if let Some(x) = args.proposals { builder.add_proposals(x); } builder.finish() } pub const VT_BLOCK_NUMBER: flatbuffers::VOffsetT = 4; pub const VT_PROPOSALS: flatbuffers::VOffsetT = 6; #[inline] pub fn block_number(&self) -> u64 { self._tab.get::<u64>(GetBlockProposal::VT_BLOCK_NUMBER, Some(0)).unwrap() } #[inline] pub fn proposals(&self) -> Option<&'a [ProposalShortId]> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<ProposalShortId>>>(GetBlockProposal::VT_PROPOSALS, None).map(|v| v.safe_slice() ) } } pub struct GetBlockProposalArgs<'a> { pub block_number: u64, pub proposals: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , ProposalShortId>>>, } impl<'a> Default for GetBlockProposalArgs<'a> { #[inline] fn default() -> Self { GetBlockProposalArgs { block_number: 0, proposals: None, } } } pub struct GetBlockProposalBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> GetBlockProposalBuilder<'a, 'b> { #[inline] pub fn add_block_number(&mut self, block_number: u64) { self.fbb_.push_slot::<u64>(GetBlockProposal::VT_BLOCK_NUMBER, block_number, 0); } #[inline] pub fn add_proposals(&mut self, proposals: flatbuffers::WIPOffset<flatbuffers::Vector<'b , ProposalShortId>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(GetBlockProposal::VT_PROPOSALS, proposals); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> GetBlockProposalBuilder<'a, 'b> { let start = _fbb.start_table(); GetBlockProposalBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<GetBlockProposal<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum BlockProposalOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct BlockProposal<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for BlockProposal<'a> { type Inner = BlockProposal<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> BlockProposal<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { BlockProposal { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args BlockProposalArgs<'args>) -> flatbuffers::WIPOffset<BlockProposal<'bldr>> { let mut builder = BlockProposalBuilder::new(_fbb); if let Some(x) = args.transactions { builder.add_transactions(x); } builder.finish() } pub const VT_TRANSACTIONS: flatbuffers::VOffsetT = 4; #[inline] pub fn transactions(&self) -> Option<flatbuffers::Vector<flatbuffers::ForwardsUOffset<Transaction<'a>>>> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<flatbuffers::ForwardsUOffset<Transaction<'a>>>>>(BlockProposal::VT_TRANSACTIONS, None) } } pub struct BlockProposalArgs<'a> { pub transactions: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , flatbuffers::ForwardsUOffset<Transaction<'a >>>>>, } impl<'a> Default for BlockProposalArgs<'a> { #[inline] fn default() -> Self { BlockProposalArgs { transactions: None, } } } pub struct BlockProposalBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> BlockProposalBuilder<'a, 'b> { #[inline] pub fn add_transactions(&mut self, transactions: flatbuffers::WIPOffset<flatbuffers::Vector<'b , flatbuffers::ForwardsUOffset<Transaction<'b >>>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(BlockProposal::VT_TRANSACTIONS, transactions); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> BlockProposalBuilder<'a, 'b> { let start = _fbb.start_table(); BlockProposalBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<BlockProposal<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum SetFilterOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct SetFilter<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for SetFilter<'a> { type Inner = SetFilter<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> SetFilter<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { SetFilter { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args SetFilterArgs<'args>) -> flatbuffers::WIPOffset<SetFilter<'bldr>> { let mut builder = SetFilterBuilder::new(_fbb); builder.add_hash_seed(args.hash_seed); if let Some(x) = args.filter { builder.add_filter(x); } builder.add_num_hashes(args.num_hashes); builder.finish() } pub const VT_FILTER: flatbuffers::VOffsetT = 4; pub const VT_NUM_HASHES: flatbuffers::VOffsetT = 6; pub const VT_HASH_SEED: flatbuffers::VOffsetT = 8; #[inline] pub fn filter(&self) -> Option<&'a [u8]> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, u8>>>(SetFilter::VT_FILTER, None).map(|v| v.safe_slice()) } #[inline] pub fn num_hashes(&self) -> u8 { self._tab.get::<u8>(SetFilter::VT_NUM_HASHES, Some(0)).unwrap() } #[inline] pub fn hash_seed(&self) -> u32 { self._tab.get::<u32>(SetFilter::VT_HASH_SEED, Some(0)).unwrap() } } pub struct SetFilterArgs<'a> { pub filter: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , u8>>>, pub num_hashes: u8, pub hash_seed: u32, } impl<'a> Default for SetFilterArgs<'a> { #[inline] fn default() -> Self { SetFilterArgs { filter: None, num_hashes: 0, hash_seed: 0, } } } pub struct SetFilterBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> SetFilterBuilder<'a, 'b> { #[inline] pub fn add_filter(&mut self, filter: flatbuffers::WIPOffset<flatbuffers::Vector<'b , u8>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(SetFilter::VT_FILTER, filter); } #[inline] pub fn add_num_hashes(&mut self, num_hashes: u8) { self.fbb_.push_slot::<u8>(SetFilter::VT_NUM_HASHES, num_hashes, 0); } #[inline] pub fn add_hash_seed(&mut self, hash_seed: u32) { self.fbb_.push_slot::<u32>(SetFilter::VT_HASH_SEED, hash_seed, 0); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> SetFilterBuilder<'a, 'b> { let start = _fbb.start_table(); SetFilterBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<SetFilter<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum AddFilterOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct AddFilter<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for AddFilter<'a> { type Inner = AddFilter<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> AddFilter<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { AddFilter { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args AddFilterArgs<'args>) -> flatbuffers::WIPOffset<AddFilter<'bldr>> { let mut builder = AddFilterBuilder::new(_fbb); if let Some(x) = args.filter { builder.add_filter(x); } builder.finish() } pub const VT_FILTER: flatbuffers::VOffsetT = 4; #[inline] pub fn filter(&self) -> Option<&'a [u8]> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, u8>>>(AddFilter::VT_FILTER, None).map(|v| v.safe_slice()) } } pub struct AddFilterArgs<'a> { pub filter: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , u8>>>, } impl<'a> Default for AddFilterArgs<'a> { #[inline] fn default() -> Self { AddFilterArgs { filter: None, } } } pub struct AddFilterBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> AddFilterBuilder<'a, 'b> { #[inline] pub fn add_filter(&mut self, filter: flatbuffers::WIPOffset<flatbuffers::Vector<'b , u8>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(AddFilter::VT_FILTER, filter); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> AddFilterBuilder<'a, 'b> { let start = _fbb.start_table(); AddFilterBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<AddFilter<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum ClearFilterOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct ClearFilter<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for ClearFilter<'a> { type Inner = ClearFilter<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> ClearFilter<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { ClearFilter { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, _args: &'args ClearFilterArgs) -> flatbuffers::WIPOffset<ClearFilter<'bldr>> { let mut builder = ClearFilterBuilder::new(_fbb); builder.finish() } } pub struct ClearFilterArgs { } impl<'a> Default for ClearFilterArgs { #[inline] fn default() -> Self { ClearFilterArgs { } } } pub struct ClearFilterBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> ClearFilterBuilder<'a, 'b> { #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> ClearFilterBuilder<'a, 'b> { let start = _fbb.start_table(); ClearFilterBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<ClearFilter<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum FilteredBlockOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct FilteredBlock<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for FilteredBlock<'a> { type Inner = FilteredBlock<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> FilteredBlock<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { FilteredBlock { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args FilteredBlockArgs<'args>) -> flatbuffers::WIPOffset<FilteredBlock<'bldr>> { let mut builder = FilteredBlockBuilder::new(_fbb); if let Some(x) = args.proof { builder.add_proof(x); } if let Some(x) = args.transactions { builder.add_transactions(x); } if let Some(x) = args.header { builder.add_header(x); } builder.finish() } pub const VT_HEADER: flatbuffers::VOffsetT = 4; pub const VT_TRANSACTIONS: flatbuffers::VOffsetT = 6; pub const VT_PROOF: flatbuffers::VOffsetT = 8; #[inline] pub fn header(&self) -> Option<Header<'a>> { self._tab.get::<flatbuffers::ForwardsUOffset<Header<'a>>>(FilteredBlock::VT_HEADER, None) } #[inline] pub fn transactions(&self) -> Option<flatbuffers::Vector<flatbuffers::ForwardsUOffset<Transaction<'a>>>> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<flatbuffers::ForwardsUOffset<Transaction<'a>>>>>(FilteredBlock::VT_TRANSACTIONS, None) } #[inline] pub fn proof(&self) -> Option<MerkleProof<'a>> { self._tab.get::<flatbuffers::ForwardsUOffset<MerkleProof<'a>>>(FilteredBlock::VT_PROOF, None) } } pub struct FilteredBlockArgs<'a> { pub header: Option<flatbuffers::WIPOffset<Header<'a >>>, pub transactions: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , flatbuffers::ForwardsUOffset<Transaction<'a >>>>>, pub proof: Option<flatbuffers::WIPOffset<MerkleProof<'a >>>, } impl<'a> Default for FilteredBlockArgs<'a> { #[inline] fn default() -> Self { FilteredBlockArgs { header: None, transactions: None, proof: None, } } } pub struct FilteredBlockBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> FilteredBlockBuilder<'a, 'b> { #[inline] pub fn add_header(&mut self, header: flatbuffers::WIPOffset<Header<'b >>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<Header>>(FilteredBlock::VT_HEADER, header); } #[inline] pub fn add_transactions(&mut self, transactions: flatbuffers::WIPOffset<flatbuffers::Vector<'b , flatbuffers::ForwardsUOffset<Transaction<'b >>>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(FilteredBlock::VT_TRANSACTIONS, transactions); } #[inline] pub fn add_proof(&mut self, proof: flatbuffers::WIPOffset<MerkleProof<'b >>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<MerkleProof>>(FilteredBlock::VT_PROOF, proof); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> FilteredBlockBuilder<'a, 'b> { let start = _fbb.start_table(); FilteredBlockBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<FilteredBlock<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum MerkleProofOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct MerkleProof<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for MerkleProof<'a> { type Inner = MerkleProof<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> MerkleProof<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { MerkleProof { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args MerkleProofArgs<'args>) -> flatbuffers::WIPOffset<MerkleProof<'bldr>> { let mut builder = MerkleProofBuilder::new(_fbb); if let Some(x) = args.lemmas { builder.add_lemmas(x); } if let Some(x) = args.indices { builder.add_indices(x); } builder.finish() } pub const VT_INDICES: flatbuffers::VOffsetT = 4; pub const VT_LEMMAS: flatbuffers::VOffsetT = 6; #[inline] pub fn indices(&self) -> Option<flatbuffers::Vector<'a, u32>> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<'a, u32>>>(MerkleProof::VT_INDICES, None) } #[inline] pub fn lemmas(&self) -> Option<&'a [H256]> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<H256>>>(MerkleProof::VT_LEMMAS, None).map(|v| v.safe_slice() ) } } pub struct MerkleProofArgs<'a> { pub indices: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , u32>>>, pub lemmas: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , H256>>>, } impl<'a> Default for MerkleProofArgs<'a> { #[inline] fn default() -> Self { MerkleProofArgs { indices: None, lemmas: None, } } } pub struct MerkleProofBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> MerkleProofBuilder<'a, 'b> { #[inline] pub fn add_indices(&mut self, indices: flatbuffers::WIPOffset<flatbuffers::Vector<'b , u32>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(MerkleProof::VT_INDICES, indices); } #[inline] pub fn add_lemmas(&mut self, lemmas: flatbuffers::WIPOffset<flatbuffers::Vector<'b , H256>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(MerkleProof::VT_LEMMAS, lemmas); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> MerkleProofBuilder<'a, 'b> { let start = _fbb.start_table(); MerkleProofBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<MerkleProof<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum TimeMessageOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct TimeMessage<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for TimeMessage<'a> { type Inner = TimeMessage<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> TimeMessage<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { TimeMessage { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args TimeMessageArgs<'args>) -> flatbuffers::WIPOffset<TimeMessage<'bldr>> { let mut builder = TimeMessageBuilder::new(_fbb); if let Some(x) = args.payload { builder.add_payload(x); } builder.finish() } pub const VT_PAYLOAD: flatbuffers::VOffsetT = 4; #[inline] pub fn payload(&self) -> Option<Time<'a>> { self._tab.get::<flatbuffers::ForwardsUOffset<Time<'a>>>(TimeMessage::VT_PAYLOAD, None) } } pub struct TimeMessageArgs<'a> { pub payload: Option<flatbuffers::WIPOffset<Time<'a >>>, } impl<'a> Default for TimeMessageArgs<'a> { #[inline] fn default() -> Self { TimeMessageArgs { payload: None, } } } pub struct TimeMessageBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> TimeMessageBuilder<'a, 'b> { #[inline] pub fn add_payload(&mut self, payload: flatbuffers::WIPOffset<Time<'b >>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<Time>>(TimeMessage::VT_PAYLOAD, payload); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> TimeMessageBuilder<'a, 'b> { let start = _fbb.start_table(); TimeMessageBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<TimeMessage<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum TimeOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct Time<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for Time<'a> { type Inner = Time<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> Time<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { Time { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args TimeArgs) -> flatbuffers::WIPOffset<Time<'bldr>> { let mut builder = TimeBuilder::new(_fbb); builder.add_timestamp(args.timestamp); builder.finish() } pub const VT_TIMESTAMP: flatbuffers::VOffsetT = 4; #[inline] pub fn timestamp(&self) -> u64 { self._tab.get::<u64>(Time::VT_TIMESTAMP, Some(0)).unwrap() } } pub struct TimeArgs { pub timestamp: u64, } impl<'a> Default for TimeArgs { #[inline] fn default() -> Self { TimeArgs { timestamp: 0, } } } pub struct TimeBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> TimeBuilder<'a, 'b> { #[inline] pub fn add_timestamp(&mut self, timestamp: u64) { self.fbb_.push_slot::<u64>(Time::VT_TIMESTAMP, timestamp, 0); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> TimeBuilder<'a, 'b> { let start = _fbb.start_table(); TimeBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<Time<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum AlertMessageOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct AlertMessage<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for AlertMessage<'a> { type Inner = AlertMessage<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> AlertMessage<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { AlertMessage { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args AlertMessageArgs<'args>) -> flatbuffers::WIPOffset<AlertMessage<'bldr>> { let mut builder = AlertMessageBuilder::new(_fbb); if let Some(x) = args.payload { builder.add_payload(x); } builder.finish() } pub const VT_PAYLOAD: flatbuffers::VOffsetT = 4; #[inline] pub fn payload(&self) -> Option<Alert<'a>> { self._tab.get::<flatbuffers::ForwardsUOffset<Alert<'a>>>(AlertMessage::VT_PAYLOAD, None) } } pub struct AlertMessageArgs<'a> { pub payload: Option<flatbuffers::WIPOffset<Alert<'a >>>, } impl<'a> Default for AlertMessageArgs<'a> { #[inline] fn default() -> Self { AlertMessageArgs { payload: None, } } } pub struct AlertMessageBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> AlertMessageBuilder<'a, 'b> { #[inline] pub fn add_payload(&mut self, payload: flatbuffers::WIPOffset<Alert<'b >>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<Alert>>(AlertMessage::VT_PAYLOAD, payload); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> AlertMessageBuilder<'a, 'b> { let start = _fbb.start_table(); AlertMessageBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<AlertMessage<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum AlertOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct Alert<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for Alert<'a> { type Inner = Alert<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> Alert<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { Alert { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args AlertArgs<'args>) -> flatbuffers::WIPOffset<Alert<'bldr>> { let mut builder = AlertBuilder::new(_fbb); builder.add_notice_until(args.notice_until); if let Some(x) = args.message { builder.add_message(x); } if let Some(x) = args.signatures { builder.add_signatures(x); } builder.add_priority(args.priority); if let Some(x) = args.max_version { builder.add_max_version(x); } if let Some(x) = args.min_version { builder.add_min_version(x); } builder.add_cancel(args.cancel); builder.add_id(args.id); builder.finish() } pub const VT_ID: flatbuffers::VOffsetT = 4; pub const VT_CANCEL: flatbuffers::VOffsetT = 6; pub const VT_MIN_VERSION: flatbuffers::VOffsetT = 8; pub const VT_MAX_VERSION: flatbuffers::VOffsetT = 10; pub const VT_PRIORITY: flatbuffers::VOffsetT = 12; pub const VT_SIGNATURES: flatbuffers::VOffsetT = 14; pub const VT_NOTICE_UNTIL: flatbuffers::VOffsetT = 16; pub const VT_MESSAGE: flatbuffers::VOffsetT = 18; #[inline] pub fn id(&self) -> u32 { self._tab.get::<u32>(Alert::VT_ID, Some(0)).unwrap() } #[inline] pub fn cancel(&self) -> u32 { self._tab.get::<u32>(Alert::VT_CANCEL, Some(0)).unwrap() } #[inline] pub fn min_version(&self) -> Option<Bytes<'a>> { self._tab.get::<flatbuffers::ForwardsUOffset<Bytes<'a>>>(Alert::VT_MIN_VERSION, None) } #[inline] pub fn max_version(&self) -> Option<Bytes<'a>> { self._tab.get::<flatbuffers::ForwardsUOffset<Bytes<'a>>>(Alert::VT_MAX_VERSION, None) } #[inline] pub fn priority(&self) -> u32 { self._tab.get::<u32>(Alert::VT_PRIORITY, Some(0)).unwrap() } #[inline] pub fn signatures(&self) -> Option<flatbuffers::Vector<flatbuffers::ForwardsUOffset<Bytes<'a>>>> { self._tab.get::<flatbuffers::ForwardsUOffset<flatbuffers::Vector<flatbuffers::ForwardsUOffset<Bytes<'a>>>>>(Alert::VT_SIGNATURES, None) } #[inline] pub fn notice_until(&self) -> u64 { self._tab.get::<u64>(Alert::VT_NOTICE_UNTIL, Some(0)).unwrap() } #[inline] pub fn message(&self) -> Option<Bytes<'a>> { self._tab.get::<flatbuffers::ForwardsUOffset<Bytes<'a>>>(Alert::VT_MESSAGE, None) } } pub struct AlertArgs<'a> { pub id: u32, pub cancel: u32, pub min_version: Option<flatbuffers::WIPOffset<Bytes<'a >>>, pub max_version: Option<flatbuffers::WIPOffset<Bytes<'a >>>, pub priority: u32, pub signatures: Option<flatbuffers::WIPOffset<flatbuffers::Vector<'a , flatbuffers::ForwardsUOffset<Bytes<'a >>>>>, pub notice_until: u64, pub message: Option<flatbuffers::WIPOffset<Bytes<'a >>>, } impl<'a> Default for AlertArgs<'a> { #[inline] fn default() -> Self { AlertArgs { id: 0, cancel: 0, min_version: None, max_version: None, priority: 0, signatures: None, notice_until: 0, message: None, } } } pub struct AlertBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> AlertBuilder<'a, 'b> { #[inline] pub fn add_id(&mut self, id: u32) { self.fbb_.push_slot::<u32>(Alert::VT_ID, id, 0); } #[inline] pub fn add_cancel(&mut self, cancel: u32) { self.fbb_.push_slot::<u32>(Alert::VT_CANCEL, cancel, 0); } #[inline] pub fn add_min_version(&mut self, min_version: flatbuffers::WIPOffset<Bytes<'b >>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<Bytes>>(Alert::VT_MIN_VERSION, min_version); } #[inline] pub fn add_max_version(&mut self, max_version: flatbuffers::WIPOffset<Bytes<'b >>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<Bytes>>(Alert::VT_MAX_VERSION, max_version); } #[inline] pub fn add_priority(&mut self, priority: u32) { self.fbb_.push_slot::<u32>(Alert::VT_PRIORITY, priority, 0); } #[inline] pub fn add_signatures(&mut self, signatures: flatbuffers::WIPOffset<flatbuffers::Vector<'b , flatbuffers::ForwardsUOffset<Bytes<'b >>>>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(Alert::VT_SIGNATURES, signatures); } #[inline] pub fn add_notice_until(&mut self, notice_until: u64) { self.fbb_.push_slot::<u64>(Alert::VT_NOTICE_UNTIL, notice_until, 0); } #[inline] pub fn add_message(&mut self, message: flatbuffers::WIPOffset<Bytes<'b >>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<Bytes>>(Alert::VT_MESSAGE, message); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> AlertBuilder<'a, 'b> { let start = _fbb.start_table(); AlertBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<Alert<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } pub enum IdentifyOffset {} #[derive(Copy, Clone, Debug, PartialEq)] pub struct Identify<'a> { pub _tab: flatbuffers::Table<'a>, } impl<'a> flatbuffers::Follow<'a> for Identify<'a> { type Inner = Identify<'a>; #[inline] fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { Self { _tab: flatbuffers::Table { buf: buf, loc: loc }, } } } impl<'a> Identify<'a> { #[inline] pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self { Identify { _tab: table, } } #[allow(unused_mut)] pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>( _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>, args: &'args IdentifyArgs<'args>) -> flatbuffers::WIPOffset<Identify<'bldr>> { let mut builder = IdentifyBuilder::new(_fbb); builder.add_flag(args.flag); if let Some(x) = args.name { builder.add_name(x); } builder.finish() } pub const VT_NAME: flatbuffers::VOffsetT = 4; pub const VT_FLAG: flatbuffers::VOffsetT = 6; #[inline] pub fn name(&self) -> Option<&'a str> { self._tab.get::<flatbuffers::ForwardsUOffset<&str>>(Identify::VT_NAME, None) } #[inline] pub fn flag(&self) -> u64 { self._tab.get::<u64>(Identify::VT_FLAG, Some(0)).unwrap() } } pub struct IdentifyArgs<'a> { pub name: Option<flatbuffers::WIPOffset<&'a str>>, pub flag: u64, } impl<'a> Default for IdentifyArgs<'a> { #[inline] fn default() -> Self { IdentifyArgs { name: None, flag: 0, } } } pub struct IdentifyBuilder<'a: 'b, 'b> { fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>, start_: flatbuffers::WIPOffset<flatbuffers::TableUnfinishedWIPOffset>, } impl<'a: 'b, 'b> IdentifyBuilder<'a, 'b> { #[inline] pub fn add_name(&mut self, name: flatbuffers::WIPOffset<&'b str>) { self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>(Identify::VT_NAME, name); } #[inline] pub fn add_flag(&mut self, flag: u64) { self.fbb_.push_slot::<u64>(Identify::VT_FLAG, flag, 0); } #[inline] pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> IdentifyBuilder<'a, 'b> { let start = _fbb.start_table(); IdentifyBuilder { fbb_: _fbb, start_: start, } } #[inline] pub fn finish(self) -> flatbuffers::WIPOffset<Identify<'a>> { let o = self.fbb_.end_table(self.start_); flatbuffers::WIPOffset::new(o.value()) } } #[inline] pub fn get_root_as_sync_message<'a>(buf: &'a [u8]) -> SyncMessage<'a> { flatbuffers::get_root::<SyncMessage<'a>>(buf) } #[inline] pub fn get_size_prefixed_root_as_sync_message<'a>(buf: &'a [u8]) -> SyncMessage<'a> { flatbuffers::get_size_prefixed_root::<SyncMessage<'a>>(buf) } #[inline] pub fn finish_sync_message_buffer<'a, 'b>( fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>, root: flatbuffers::WIPOffset<SyncMessage<'a>>) { fbb.finish(root, None); } #[inline] pub fn finish_size_prefixed_sync_message_buffer<'a, 'b>(fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>, root: flatbuffers::WIPOffset<SyncMessage<'a>>) { fbb.finish_size_prefixed(root, None); } } // pub mod Protocol } // pub mod Ckb
31.543633
338
0.640018
fe2ba914b5d8a9b7df638a18c0b272669a2cb1bd
4,947
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 #![forbid(unsafe_code)] use anyhow::{anyhow, bail, format_err, Result}; use diem_logger::{info, warn}; use rusoto_autoscaling::{ AutoScalingGroupNamesType, Autoscaling, AutoscalingClient, SetDesiredCapacityType, }; use rusoto_core::Region; use rusoto_sts::WebIdentityProvider; /// set_asg_size sets the size of the given autoscaling group #[allow(clippy::collapsible_if)] pub async fn set_asg_size( desired_capacity: i64, buffer_percent: f64, asg_name: &str, wait_for_completion: bool, scaling_down: bool, ) -> Result<()> { let buffer = if scaling_down { 0 } else { ((desired_capacity as f64 * buffer_percent) / 100_f64).ceil() as i64 }; info!( "Scaling to desired_capacity : {}, buffer: {}, asg_name: {}", desired_capacity, buffer, asg_name ); let set_desired_capacity_type = SetDesiredCapacityType { auto_scaling_group_name: asg_name.to_string(), desired_capacity: desired_capacity + buffer, honor_cooldown: Some(false), }; let credentials_provider = WebIdentityProvider::from_k8s_env(); let dispatcher = rusoto_core::HttpClient::new() .map_err(|e| anyhow!("Failed to create request dispatcher, met Error:{}", e))?; let asc = AutoscalingClient::new_with(dispatcher, credentials_provider, Region::UsWest2); diem_retrier::retry_async(diem_retrier::fixed_retry_strategy(10_000, 60), || { let asc = asc.clone(); let set_desired_capacity_type = set_desired_capacity_type.clone(); Box::pin(async move { asc.set_desired_capacity(set_desired_capacity_type) .await .map_err(|e| { warn!("set_desired_capacity failed: {}, retrying", e); format_err!("set_desired_capacity failed: {}", e) }) }) }) .await?; if !wait_for_completion { return Ok(()); } diem_retrier::retry_async(diem_retrier::fixed_retry_strategy(10_000, 60), || { let asc_clone = asc.clone(); Box::pin(async move { let mut total = 0; let mut current_token = None; loop { let current_token_clone = current_token.clone(); let auto_scaling_group_names_type = AutoScalingGroupNamesType { auto_scaling_group_names: Some(vec![asg_name.to_string()]), // https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAutoScalingGroups.html // max value is 100 max_records: Some(100), next_token: current_token_clone, }; let asgs = asc_clone .describe_auto_scaling_groups(auto_scaling_group_names_type) .await?; if asgs.auto_scaling_groups.is_empty() { bail!("asgs.auto_scaling_groups.is_empty()"); } let asg = &asgs.auto_scaling_groups[0]; if scaling_down { total += asg .instances .clone() .ok_or_else(|| format_err!("instances not found for auto_scaling_group"))? .len() as i64; } else { total += asg .instances .clone() .ok_or_else(|| format_err!("instances not found for auto_scaling_group"))? .iter() .filter(|instance| instance.lifecycle_state == "InService") .count() as i64; } if asgs.next_token.is_none() { break; } current_token = asgs.next_token; } info!( "Waiting for scaling to complete. Current size: {}, Min Desired Size: {}", total, desired_capacity ); if scaling_down { if total > desired_capacity { bail!( "Waiting for scale-down to complete. Current size: {}, Min Desired Size: {}", total, desired_capacity ); } else { info!("Scale down completed"); Ok(()) } } else { if total < desired_capacity { bail!( "Waiting for scale-up to complete. Current size: {}, Min Desired Size: {}", total, desired_capacity ); } else { info!("Scale up completed"); Ok(()) } } }) }) .await }
38.053846
114
0.519507
26905f3a5bd0484af177c8357367a67d0477bce7
166
pub mod contract; pub mod msg; pub mod state; #[cfg(test)] mod testing; #[cfg(target_arch = "wasm32")] cosmwasm_std::create_entry_points_with_migration!(contract);
16.6
60
0.753012
f5a227577176d9815acc9fb05719911211154970
358
use aoc::Result; use aoc_2019_day_16::*; #[test] fn part_one_answer() -> Result<()> { let input = include_str!("../input/input.txt"); assert_eq!(part_one(input)?, input.len()); Ok(()) } #[test] fn part_two_answer() -> Result<()> { let input = include_str!("../input/input.txt"); assert_eq!(part_two(input)?, input.len()); Ok(()) }
19.888889
51
0.592179
4a810a0116e9b6a41cb6e830b035fd12d467691e
252
use rocket_contrib::json::JsonValue; use std::fs; pub fn get_projects() -> Option<JsonValue> { Some(JsonValue( serde_json::from_str( &fs::read_to_string("templates/projects.json").ok()?[..] ).ok()? )) }
22.909091
68
0.563492
8955c3cb93e971f85c3637bb4b89df09fe50c0a1
1,690
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ access_path::AccessPath, account_config::constants::{xus_tag, ACCOUNT_MODULE_NAME, CORE_CODE_ADDRESS}, }; use move_core_types::{ language_storage::{StructTag, TypeTag}, move_resource::MoveResource, }; #[cfg(any(test, feature = "fuzzing"))] use proptest_derive::Arbitrary; use serde::{Deserialize, Serialize}; use std::vec::Vec; /// The balance resource held under an account. #[derive(Debug, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "fuzzing"), derive(Arbitrary))] pub struct BalanceResource { coin: u64, } impl BalanceResource { pub fn new(coin: u64) -> Self { Self { coin } } pub fn coin(&self) -> u64 { self.coin } // TODO/XXX: remove this once the MoveResource trait allows type arguments to `struct_tag`. pub fn struct_tag_for_currency(currency_typetag: TypeTag) -> StructTag { StructTag { address: CORE_CODE_ADDRESS, name: BalanceResource::struct_identifier(), module: BalanceResource::module_identifier(), type_params: vec![currency_typetag], } } // TODO: remove this once the MoveResource trait allows type arguments to `resource_path`. pub fn access_path_for(currency_typetag: TypeTag) -> Vec<u8> { AccessPath::resource_access_vec(BalanceResource::struct_tag_for_currency(currency_typetag)) } } impl MoveResource for BalanceResource { const MODULE_NAME: &'static str = ACCOUNT_MODULE_NAME; const STRUCT_NAME: &'static str = "Balance"; fn type_params() -> Vec<TypeTag> { vec![xus_tag()] } }
29.649123
99
0.680473
2824da33676b778bf197edd9d7fa92692baf0477
14,007
use crate::{ container::ContainerProcessState, process::{args::ContainerArgs, channel, container_intermediate_process, fork}, rootless::Rootless, seccomp, utils, }; use anyhow::{Context, Result}; use nix::{ sys::{socket, uio}, unistd::{self, Pid}, }; use oci_spec::runtime; use std::path::Path; pub fn container_main_process(container_args: &ContainerArgs) -> Result<Pid> { // We use a set of channels to communicate between parent and child process. // Each channel is uni-directional. Because we will pass these channel to // forked process, we have to be deligent about closing any unused channel. // At minimum, we have to close down any unused senders. The corresponding // receivers will be cleaned up once the senders are closed down. let (main_sender, main_receiver) = &mut channel::main_channel()?; let (intermediate_sender, intermediate_receiver) = &mut channel::intermediate_channel()?; let (init_sender, init_receiver) = &mut channel::init_channel()?; let intermediate_pid = fork::container_fork(|| { container_intermediate_process::container_intermediate_process( container_args, intermediate_sender, intermediate_receiver, init_sender, init_receiver, main_sender, ) })?; // Close down unused fds. The corresponding fds are duplicated to the // child process during fork. main_sender .close() .context("failed to close unused sender")?; // If creating a rootless container, the intermediate process will ask // the main process to set up uid and gid mapping, once the intermediate // process enters into a new user namespace. if let Some(rootless) = &container_args.rootless { main_receiver.wait_for_mapping_request()?; setup_mapping(rootless, intermediate_pid)?; intermediate_sender.mapping_written()?; } // At this point, we don't need to send any message to intermediate process anymore, // so we want to close this sender at the earliest point. intermediate_sender .close() .context("failed to close unused intermediate sender")?; // The intermediate process will send the init pid once it forks the init // process. The intermediate process should exit after this point. let init_pid = main_receiver.wait_for_intermediate_ready()?; if let Some(linux) = container_args.spec.linux() { if let Some(seccomp) = linux.seccomp() { let state = ContainerProcessState { oci_version: container_args.spec.version().to_string(), // runc hardcode the `seccompFd` name for fds. fds: vec![String::from("seccompFd")], pid: init_pid.as_raw(), metadata: seccomp.listener_metadata().to_owned().unwrap_or_default(), state: container_args .container .as_ref() .context("container state is required")? .state .clone(), }; sync_seccomp(seccomp, &state, init_sender, main_receiver) .context("failed to sync seccomp with init")?; } } // We don't need to send anything to the init process after this point, so // close the sender. init_sender .close() .context("failed to close unused init sender")?; main_receiver .wait_for_init_ready() .context("failed to wait for init ready")?; log::debug!("init pid is {:?}", init_pid); Ok(init_pid) } fn sync_seccomp( seccomp: &runtime::LinuxSeccomp, state: &ContainerProcessState, init_sender: &mut channel::InitSender, main_receiver: &mut channel::MainReceiver, ) -> Result<()> { if seccomp::is_notify(seccomp) { log::debug!("main process waiting for sync seccomp"); let seccomp_fd = main_receiver.wait_for_seccomp_request()?; let listener_path = seccomp .listener_path() .as_ref() .context("notify will require seccomp listener path to be set")?; let encoded_state = serde_json::to_vec(state).context("failed to encode container process state")?; sync_seccomp_send_msg(listener_path, &encoded_state, seccomp_fd) .context("failed to send msg to seccomp listener")?; init_sender.seccomp_notify_done()?; // Once we sent the seccomp notify fd to the seccomp listener, we can // safely close the fd. The SCM_RIGHTS msg will duplicate the fd to the // process on the other end of the listener. let _ = unistd::close(seccomp_fd); } Ok(()) } fn sync_seccomp_send_msg(listener_path: &Path, msg: &[u8], fd: i32) -> Result<()> { // The seccomp listener has specific instructions on how to transmit the // information through seccomp listener. Therefore, we have to use // libc/nix APIs instead of Rust std lib APIs to maintain flexibility. let socket = socket::socket( socket::AddressFamily::Unix, socket::SockType::Stream, socket::SockFlag::empty(), None, ) .context("failed to create unix domain socket for seccomp listener")?; let unix_addr = socket::SockAddr::new_unix(listener_path).context("failed to create unix addr")?; socket::connect(socket, &unix_addr).with_context(|| { format!( "failed to connect to seccomp notify listerner path: {:?}", listener_path ) })?; // We have to use sendmsg here because the spec requires us to send seccomp notify fds through // SCM_RIGHTS message. // Ref: https://man7.org/linux/man-pages/man3/sendmsg.3p.html // Ref: https://man7.org/linux/man-pages/man3/cmsg.3.html let iov = [uio::IoVec::from_slice(msg)]; let fds = [fd]; let cmsgs = socket::ControlMessage::ScmRights(&fds); socket::sendmsg(socket, &iov, &[cmsgs], socket::MsgFlags::empty(), None) .context("failed to write container state to seccomp listener")?; // The spec requires the listener socket to be closed immediately after sending. let _ = unistd::close(socket); Ok(()) } fn setup_mapping(rootless: &Rootless, pid: Pid) -> Result<()> { log::debug!("write mapping for pid {:?}", pid); if !rootless.privileged { // The main process is running as an unprivileged user and cannot write the mapping // until "deny" has been written to setgroups. See CVE-2014-8989. utils::write_file(format!("/proc/{}/setgroups", pid), "deny")?; } rootless .write_uid_mapping(pid) .context(format!("failed to map uid of pid {}", pid))?; rootless .write_gid_mapping(pid) .context(format!("failed to map gid of pid {}", pid))?; Ok(()) } #[cfg(test)] mod tests { use super::*; use crate::process::channel::{intermediate_channel, main_channel}; use crate::rootless::{get_gid_path, get_uid_path}; use nix::{ sched::{unshare, CloneFlags}, unistd::{self, getgid, getuid}, }; use oci_spec::runtime::{ LinuxIdMappingBuilder, LinuxSeccompAction, LinuxSeccompBuilder, LinuxSyscallBuilder, }; use serial_test::serial; use std::fs; use crate::utils::TempDir; #[test] #[serial] fn setup_uid_mapping_should_succeed() -> Result<()> { let uid_mapping = LinuxIdMappingBuilder::default() .host_id(getuid()) .container_id(0u32) .size(1u32) .build()?; let uid_mappings = vec![uid_mapping]; let rootless = Rootless { uid_mappings: Some(&uid_mappings), privileged: true, ..Default::default() }; let (mut parent_sender, mut parent_receiver) = main_channel()?; let (mut child_sender, mut child_receiver) = intermediate_channel()?; match unsafe { unistd::fork()? } { unistd::ForkResult::Parent { child } => { parent_receiver.wait_for_mapping_request()?; parent_receiver.close()?; let tempdir = TempDir::new(get_uid_path(&child).parent().unwrap())?; let uid_map_path = tempdir.join("uid_map"); let _ = fs::File::create(&uid_map_path)?; let tempdir = TempDir::new(get_gid_path(&child).parent().unwrap())?; let gid_map_path = tempdir.join("gid_map"); let _ = fs::File::create(&gid_map_path)?; setup_mapping(&rootless, child)?; let line = fs::read_to_string(uid_map_path)?; let line_splited = line.split_whitespace(); for (act, expect) in line_splited.zip([ uid_mapping.container_id().to_string(), uid_mapping.host_id().to_string(), uid_mapping.size().to_string(), ]) { assert_eq!(act, expect); } child_sender.mapping_written()?; child_sender.close()?; } unistd::ForkResult::Child => { prctl::set_dumpable(true).unwrap(); unshare(CloneFlags::CLONE_NEWUSER)?; parent_sender.identifier_mapping_request()?; parent_sender.close()?; child_receiver.wait_for_mapping_ack()?; child_receiver.close()?; std::process::exit(0); } } Ok(()) } #[test] #[serial] fn setup_gid_mapping_should_successed() -> Result<()> { let gid_mapping = LinuxIdMappingBuilder::default() .host_id(getgid()) .container_id(0u32) .size(1u32) .build()?; let gid_mappings = vec![gid_mapping]; let rootless = Rootless { gid_mappings: Some(&gid_mappings), ..Default::default() }; let (mut parent_sender, mut parent_receiver) = main_channel()?; let (mut child_sender, mut child_receiver) = intermediate_channel()?; match unsafe { unistd::fork()? } { unistd::ForkResult::Parent { child } => { parent_receiver.wait_for_mapping_request()?; parent_receiver.close()?; let tempdir = TempDir::new(get_uid_path(&child).parent().unwrap())?; let uid_map_path = tempdir.join("uid_map"); let _ = fs::File::create(&uid_map_path)?; let tempdir = TempDir::new(get_gid_path(&child).parent().unwrap())?; let gid_map_path = tempdir.join("gid_map"); let _ = fs::File::create(&gid_map_path)?; setup_mapping(&rootless, child)?; let line = fs::read_to_string(gid_map_path)?; let line_splited = line.split_whitespace(); for (act, expect) in line_splited.zip([ gid_mapping.container_id().to_string(), gid_mapping.host_id().to_string(), gid_mapping.size().to_string(), ]) { assert_eq!(act, expect); } assert_eq!( fs::read_to_string(format!("/proc/{}/setgroups", child.as_raw()))?, "deny\n", ); child_sender.mapping_written()?; child_sender.close()?; } unistd::ForkResult::Child => { prctl::set_dumpable(true).unwrap(); unshare(CloneFlags::CLONE_NEWUSER)?; parent_sender.identifier_mapping_request()?; parent_sender.close()?; child_receiver.wait_for_mapping_ack()?; child_receiver.close()?; std::process::exit(0); } } Ok(()) } #[test] #[serial] fn test_sync_seccomp() -> Result<()> { use std::io::Read; use std::os::unix::io::IntoRawFd; use std::os::unix::net::UnixListener; use std::thread; use utils::create_temp_dir; let tmp_dir = create_temp_dir("test_sync_seccomp")?; let scmp_file = std::fs::OpenOptions::new() .write(true) .create(true) .open(tmp_dir.path().join("scmp_file"))?; std::fs::OpenOptions::new() .write(true) .create(true) .open(tmp_dir.path().join("socket_file.sock"))?; let (mut main_sender, mut main_receiver) = channel::main_channel()?; let (mut init_sender, mut init_receiver) = channel::init_channel()?; let socket_path = tmp_dir.path().join("socket_file.sock"); let socket_path_seccomp_th = socket_path.clone(); let state = ContainerProcessState::default(); let want = serde_json::to_string(&state)?; let th = thread::spawn(move || { sync_seccomp( &LinuxSeccompBuilder::default() .listener_path(socket_path_seccomp_th) .syscalls(vec![LinuxSyscallBuilder::default() .action(LinuxSeccompAction::ScmpActNotify) .build() .unwrap()]) .build() .unwrap(), &state, &mut init_sender, &mut main_receiver, ) .unwrap(); }); let fd = scmp_file.into_raw_fd(); assert!(main_sender.seccomp_notify_request(fd).is_ok()); fs::remove_file(socket_path.clone())?; let lis = UnixListener::bind(socket_path)?; let (mut socket, _) = lis.accept()?; let mut got = String::new(); socket.read_to_string(&mut got)?; assert!(init_receiver.wait_for_seccomp_request_done().is_ok()); assert_eq!(want, got); assert!(th.join().is_ok()); Ok(()) } }
38.69337
98
0.581852
8a0ed9125f8bbd46b654876f0df07a0971846cd2
1,595
use crate::{ tendermint::{types::*, Client}, ErrorKind, Result, }; use chain_core::state::ChainState; /// `Client` which returns `PermissionDenied` error for each function call. #[derive(Debug, Default, Clone, Copy)] pub struct UnauthorizedClient; impl Client for UnauthorizedClient { fn genesis(&self) -> Result<Genesis> { Err(ErrorKind::PermissionDenied.into()) } fn status(&self) -> Result<StatusResponse> { Err(ErrorKind::PermissionDenied.into()) } fn block(&self, _height: u64) -> Result<Block> { Err(ErrorKind::PermissionDenied.into()) } fn block_batch<'a, T: Iterator<Item = &'a u64>>(&self, _heights: T) -> Result<Vec<Block>> { Err(ErrorKind::PermissionDenied.into()) } fn block_results(&self, _height: u64) -> Result<BlockResultsResponse> { Err(ErrorKind::PermissionDenied.into()) } fn block_results_batch<'a, T: Iterator<Item = &'a u64>>( &self, _heights: T, ) -> Result<Vec<BlockResultsResponse>> { Err(ErrorKind::PermissionDenied.into()) } fn broadcast_transaction(&self, _transaction: &[u8]) -> Result<BroadcastTxResponse> { Err(ErrorKind::PermissionDenied.into()) } fn query( &self, _path: &str, _data: &[u8], _height: Option<Height>, _prove: bool, ) -> Result<AbciQuery> { Err(ErrorKind::PermissionDenied.into()) } fn query_state_batch<T: Iterator<Item = u64>>(&self, _heights: T) -> Result<Vec<ChainState>> { Err(ErrorKind::PermissionDenied.into()) } }
27.982456
98
0.62069
2942438b42c0ba7248a229936f4374bf354efc7a
4,065
pub mod bitpacker; mod bitset; mod composite_file; mod counting_writer; mod serialize; mod vint; pub use self::bitset::BitSet; pub(crate) use self::bitset::TinySet; pub(crate) use self::composite_file::{CompositeFile, CompositeWrite}; pub use self::counting_writer::CountingWriter; pub use self::serialize::{BinarySerializable, FixedSize}; pub use self::vint::VInt; pub use byteorder::LittleEndian as Endianness; use std::io; /// Computes the number of bits that will be used for bitpacking. /// /// In general the target is the minimum number of bits /// required to express the amplitude given in argument. /// /// e.g. If the amplitude is 10, we can store all ints on simply 4bits. /// /// The logic is slightly more convoluted here as for optimization /// reasons, we want to ensure that a value spawns over at most 8 bytes /// of aligns bytes. /// /// Spanning over 9 bytes is possible for instance, if we do /// bitpacking with an amplitude of 63 bits. /// In this case, the second int will start on bit /// 63 (which belongs to byte 7) and ends at byte 15; /// Hence 9 bytes (from byte 7 to byte 15 included). /// /// To avoid this, we force the number of bits to 64bits /// when the result is greater than `64-8 = 56 bits`. /// /// Note that this only affects rare use cases spawning over /// a very large range of values. Even in this case, it results /// in an extra cost of at most 12% compared to the optimal /// number of bits. pub(crate) fn compute_num_bits(n: u64) -> u8 { let amplitude = (64u32 - n.leading_zeros()) as u8; if amplitude <= 64 - 8 { amplitude } else { 64 } } pub(crate) fn is_power_of_2(n: usize) -> bool { (n > 0) && (n & (n - 1) == 0) } /// Create a default io error given a string. pub(crate) fn make_io_err(msg: String) -> io::Error { io::Error::new(io::ErrorKind::Other, msg) } /// Has length trait pub trait HasLen { /// Return length fn len(&self) -> usize; /// Returns true iff empty. fn is_empty(&self) -> bool { self.len() == 0 } } const HIGHEST_BIT: u64 = 1 << 63; /// Maps a `i64` to `u64` /// /// For simplicity, tantivy internally handles `i64` as `u64`. /// The mapping is defined by this function. /// /// Maps `i64` to `u64` so that /// `-2^63 .. 2^63-1` is mapped /// to /// `0 .. 2^64-1` /// in that order. /// /// This is more suited than simply casting (`val as u64`) /// because of bitpacking. /// /// Imagine a list of `i64` ranging from -10 to 10. /// When casting negative values, the negative values are projected /// to values over 2^63, and all values end up requiring 64 bits. /// /// # See also /// The [reverse mapping is `u64_to_i64`](./fn.u64_to_i64.html). #[inline(always)] pub fn i64_to_u64(val: i64) -> u64 { (val as u64) ^ HIGHEST_BIT } /// Reverse the mapping given by [`i64_to_u64`](./fn.i64_to_u64.html). #[inline(always)] pub fn u64_to_i64(val: u64) -> i64 { (val ^ HIGHEST_BIT) as i64 } #[cfg(test)] pub(crate) mod test { pub use super::serialize::test::fixed_size_test; use super::{compute_num_bits, i64_to_u64, u64_to_i64}; fn test_i64_converter_helper(val: i64) { assert_eq!(u64_to_i64(i64_to_u64(val)), val); } #[test] fn test_i64_converter() { assert_eq!(i64_to_u64(i64::min_value()), u64::min_value()); assert_eq!(i64_to_u64(i64::max_value()), u64::max_value()); test_i64_converter_helper(0i64); test_i64_converter_helper(i64::min_value()); test_i64_converter_helper(i64::max_value()); for i in -1000i64..1000i64 { test_i64_converter_helper(i); } } #[test] fn test_compute_num_bits() { assert_eq!(compute_num_bits(1), 1u8); assert_eq!(compute_num_bits(0), 0u8); assert_eq!(compute_num_bits(2), 2u8); assert_eq!(compute_num_bits(3), 2u8); assert_eq!(compute_num_bits(4), 3u8); assert_eq!(compute_num_bits(255), 8u8); assert_eq!(compute_num_bits(256), 9u8); assert_eq!(compute_num_bits(5_000_000_000), 33u8); } }
29.456522
71
0.654613
76744f112e606e0420e09b3e11184646db116b22
65,670
//! //! Take an AST and transform it into bytecode //! //! Inspirational code: //! https://github.com/python/cpython/blob/master/Python/compile.c //! https://github.com/micropython/micropython/blob/master/py/compile.c use crate::bytecode::{self, CallType, CodeObject, Instruction, Varargs}; use crate::error::{CompileError, CompileErrorType}; use crate::obj::objcode; use crate::obj::objcode::PyCodeRef; use crate::pyobject::PyValue; use crate::symboltable::{make_symbol_table, statements_to_symbol_table, SymbolRole, SymbolScope}; use crate::VirtualMachine; use num_complex::Complex64; use rustpython_parser::{ast, parser}; struct Compiler { code_object_stack: Vec<CodeObject>, scope_stack: Vec<SymbolScope>, nxt_label: usize, source_path: Option<String>, current_source_location: ast::Location, current_qualified_path: Option<String>, in_loop: bool, in_function_def: bool, } /// Compile a given sourcecode into a bytecode object. pub fn compile( vm: &VirtualMachine, source: &str, mode: &Mode, source_path: String, ) -> Result<PyCodeRef, CompileError> { let mut compiler = Compiler::new(); compiler.source_path = Some(source_path); compiler.push_new_code_object("<module>".to_string()); match mode { Mode::Exec => { let ast = parser::parse_program(source)?; let symbol_table = make_symbol_table(&ast)?; compiler.compile_program(&ast, symbol_table) } Mode::Eval => { let statement = parser::parse_statement(source)?; let symbol_table = statements_to_symbol_table(&statement)?; compiler.compile_statement_eval(&statement, symbol_table) } Mode::Single => { let ast = parser::parse_program(source)?; let symbol_table = make_symbol_table(&ast)?; compiler.compile_program_single(&ast, symbol_table) } }?; let code = compiler.pop_code_object(); trace!("Compilation completed: {:?}", code); Ok(objcode::PyCode::new(code).into_ref(vm)) } pub enum Mode { Exec, Eval, Single, } #[derive(Clone, Copy)] enum EvalContext { Statement, Expression, } type Label = usize; impl Compiler { fn new() -> Self { Compiler { code_object_stack: Vec::new(), scope_stack: Vec::new(), nxt_label: 0, source_path: None, current_source_location: ast::Location::default(), current_qualified_path: None, in_loop: false, in_function_def: false, } } fn push_new_code_object(&mut self, obj_name: String) { let line_number = self.get_source_line_number(); self.code_object_stack.push(CodeObject::new( Vec::new(), Varargs::None, Vec::new(), Varargs::None, self.source_path.clone().unwrap(), line_number, obj_name, )); } fn pop_code_object(&mut self) -> CodeObject { // self.scope_stack.pop().unwrap(); self.code_object_stack.pop().unwrap() } fn compile_program( &mut self, program: &ast::Program, symbol_scope: SymbolScope, ) -> Result<(), CompileError> { let size_before = self.code_object_stack.len(); self.scope_stack.push(symbol_scope); self.compile_statements(&program.statements)?; assert!(self.code_object_stack.len() == size_before); // Emit None at end: self.emit(Instruction::LoadConst { value: bytecode::Constant::None, }); self.emit(Instruction::ReturnValue); Ok(()) } fn compile_program_single( &mut self, program: &ast::Program, symbol_scope: SymbolScope, ) -> Result<(), CompileError> { self.scope_stack.push(symbol_scope); for statement in &program.statements { if let ast::Statement::Expression { ref expression } = statement.node { self.compile_expression(expression)?; self.emit(Instruction::PrintExpr); } else { self.compile_statement(&statement)?; } } self.emit(Instruction::LoadConst { value: bytecode::Constant::None, }); self.emit(Instruction::ReturnValue); Ok(()) } // Compile statement in eval mode: fn compile_statement_eval( &mut self, statements: &[ast::LocatedStatement], symbol_table: SymbolScope, ) -> Result<(), CompileError> { self.scope_stack.push(symbol_table); for statement in statements { if let ast::Statement::Expression { ref expression } = statement.node { self.compile_expression(expression)?; } else { return Err(CompileError { error: CompileErrorType::ExpectExpr, location: statement.location.clone(), }); } } self.emit(Instruction::ReturnValue); Ok(()) } fn compile_statements( &mut self, statements: &[ast::LocatedStatement], ) -> Result<(), CompileError> { for statement in statements { self.compile_statement(statement)? } Ok(()) } fn scope_for_name(&self, name: &str) -> bytecode::NameScope { let role = self.lookup_name(name); match role { SymbolRole::Global => bytecode::NameScope::Global, SymbolRole::Nonlocal => bytecode::NameScope::NonLocal, _ => bytecode::NameScope::Local, } } fn load_name(&mut self, name: &str) { let scope = self.scope_for_name(name); self.emit(Instruction::LoadName { name: name.to_string(), scope, }); } fn store_name(&mut self, name: &str) { let scope = self.scope_for_name(name); self.emit(Instruction::StoreName { name: name.to_string(), scope, }); } fn compile_statement(&mut self, statement: &ast::LocatedStatement) -> Result<(), CompileError> { trace!("Compiling {:?}", statement); self.set_source_location(&statement.location); match &statement.node { ast::Statement::Import { import_parts } => { for ast::SingleImport { module, symbol, alias, } in import_parts { match symbol { Some(name) if name == "*" => { self.emit(Instruction::ImportStar { name: module.clone(), }); } _ => { self.emit(Instruction::Import { name: module.clone(), symbol: symbol.clone(), }); let name = match alias { Some(alias) => alias.clone(), None => match symbol { Some(symbol) => symbol.clone(), None => module.clone(), }, }; self.store_name(&name); } } } } ast::Statement::Expression { expression } => { self.compile_expression(expression)?; // Pop result of stack, since we not use it: self.emit(Instruction::Pop); } ast::Statement::Global { .. } | ast::Statement::Nonlocal { .. } => { // Handled during symbol table construction. } ast::Statement::If { test, body, orelse } => { let end_label = self.new_label(); match orelse { None => { // Only if: self.compile_test(test, None, Some(end_label), EvalContext::Statement)?; self.compile_statements(body)?; self.set_label(end_label); } Some(statements) => { // if - else: let else_label = self.new_label(); self.compile_test(test, None, Some(else_label), EvalContext::Statement)?; self.compile_statements(body)?; self.emit(Instruction::Jump { target: end_label }); // else: self.set_label(else_label); self.compile_statements(statements)?; } } self.set_label(end_label); } ast::Statement::While { test, body, orelse } => { let start_label = self.new_label(); let else_label = self.new_label(); let end_label = self.new_label(); self.emit(Instruction::SetupLoop { start: start_label, end: end_label, }); self.set_label(start_label); self.compile_test(test, None, Some(else_label), EvalContext::Statement)?; let was_in_loop = self.in_loop; self.in_loop = true; self.compile_statements(body)?; self.in_loop = was_in_loop; self.emit(Instruction::Jump { target: start_label, }); self.set_label(else_label); self.emit(Instruction::PopBlock); if let Some(orelse) = orelse { self.compile_statements(orelse)?; } self.set_label(end_label); } ast::Statement::With { items, body } => { let end_label = self.new_label(); for item in items { self.compile_expression(&item.context_expr)?; self.emit(Instruction::SetupWith { end: end_label }); match &item.optional_vars { Some(var) => { self.compile_store(var)?; } None => { self.emit(Instruction::Pop); } } } self.compile_statements(body)?; for _ in 0..items.len() { self.emit(Instruction::CleanupWith { end: end_label }); } self.set_label(end_label); } ast::Statement::For { target, iter, body, orelse, } => self.compile_for(target, iter, body, orelse)?, ast::Statement::Raise { exception, cause } => match exception { Some(value) => { self.compile_expression(value)?; match cause { Some(cause) => { self.compile_expression(cause)?; self.emit(Instruction::Raise { argc: 2 }); } None => { self.emit(Instruction::Raise { argc: 1 }); } } } None => { self.emit(Instruction::Raise { argc: 0 }); } }, ast::Statement::Try { body, handlers, orelse, finalbody, } => self.compile_try_statement(body, handlers, orelse, finalbody)?, ast::Statement::FunctionDef { name, args, body, decorator_list, returns, } => self.compile_function_def(name, args, body, decorator_list, returns)?, ast::Statement::ClassDef { name, body, bases, keywords, decorator_list, } => self.compile_class_def(name, body, bases, keywords, decorator_list)?, ast::Statement::Assert { test, msg } => { // TODO: if some flag, ignore all assert statements! let end_label = self.new_label(); self.compile_test(test, Some(end_label), None, EvalContext::Statement)?; self.emit(Instruction::LoadName { name: String::from("AssertionError"), scope: bytecode::NameScope::Local, }); match msg { Some(e) => { self.compile_expression(e)?; self.emit(Instruction::CallFunction { typ: CallType::Positional(1), }); } None => { self.emit(Instruction::CallFunction { typ: CallType::Positional(0), }); } } self.emit(Instruction::Raise { argc: 1 }); self.set_label(end_label); } ast::Statement::Break => { if !self.in_loop { return Err(CompileError { error: CompileErrorType::InvalidBreak, location: statement.location.clone(), }); } self.emit(Instruction::Break); } ast::Statement::Continue => { if !self.in_loop { return Err(CompileError { error: CompileErrorType::InvalidContinue, location: statement.location.clone(), }); } self.emit(Instruction::Continue); } ast::Statement::Return { value } => { if !self.in_function_def { return Err(CompileError { error: CompileErrorType::InvalidReturn, location: statement.location.clone(), }); } match value { Some(v) => { self.compile_expression(v)?; } None => { self.emit(Instruction::LoadConst { value: bytecode::Constant::None, }); } } self.emit(Instruction::ReturnValue); } ast::Statement::Assign { targets, value } => { self.compile_expression(value)?; for (i, target) in targets.iter().enumerate() { if i + 1 != targets.len() { self.emit(Instruction::Duplicate); } self.compile_store(target)?; } } ast::Statement::AugAssign { target, op, value } => { self.compile_expression(target)?; self.compile_expression(value)?; // Perform operation: self.compile_op(op, true); self.compile_store(target)?; } ast::Statement::Delete { targets } => { for target in targets { self.compile_delete(target)?; } } ast::Statement::Pass => { self.emit(Instruction::Pass); } } Ok(()) } fn compile_delete(&mut self, expression: &ast::Expression) -> Result<(), CompileError> { match expression { ast::Expression::Identifier { name } => { self.emit(Instruction::DeleteName { name: name.to_string(), }); } ast::Expression::Attribute { value, name } => { self.compile_expression(value)?; self.emit(Instruction::DeleteAttr { name: name.to_string(), }); } ast::Expression::Subscript { a, b } => { self.compile_expression(a)?; self.compile_expression(b)?; self.emit(Instruction::DeleteSubscript); } ast::Expression::Tuple { elements } => { for element in elements { self.compile_delete(element)?; } } _ => { return Err(CompileError { error: CompileErrorType::Delete(expression.name()), location: self.current_source_location.clone(), }); } } Ok(()) } fn enter_function( &mut self, name: &str, args: &ast::Parameters, ) -> Result<bytecode::FunctionOpArg, CompileError> { let have_defaults = !args.defaults.is_empty(); if have_defaults { // Construct a tuple: let size = args.defaults.len(); for element in &args.defaults { self.compile_expression(element)?; } self.emit(Instruction::BuildTuple { size, unpack: false, }); } let mut num_kw_only_defaults = 0; for (kw, default) in args.kwonlyargs.iter().zip(&args.kw_defaults) { if let Some(default) = default { self.emit(Instruction::LoadConst { value: bytecode::Constant::String { value: kw.arg.clone(), }, }); self.compile_expression(default)?; num_kw_only_defaults += 1; } } if num_kw_only_defaults > 0 { self.emit(Instruction::BuildMap { size: num_kw_only_defaults, unpack: false, }); } let line_number = self.get_source_line_number(); self.code_object_stack.push(CodeObject::new( args.args.iter().map(|a| a.arg.clone()).collect(), Varargs::from(&args.vararg), args.kwonlyargs.iter().map(|a| a.arg.clone()).collect(), Varargs::from(&args.kwarg), self.source_path.clone().unwrap(), line_number, name.to_string(), )); self.enter_scope(); let mut flags = bytecode::FunctionOpArg::empty(); if have_defaults { flags |= bytecode::FunctionOpArg::HAS_DEFAULTS; } if num_kw_only_defaults > 0 { flags |= bytecode::FunctionOpArg::HAS_KW_ONLY_DEFAULTS; } Ok(flags) } fn prepare_decorators( &mut self, decorator_list: &[ast::Expression], ) -> Result<(), CompileError> { for decorator in decorator_list { self.compile_expression(decorator)?; } Ok(()) } fn apply_decorators(&mut self, decorator_list: &[ast::Expression]) { // Apply decorators: for _ in decorator_list { self.emit(Instruction::CallFunction { typ: CallType::Positional(1), }); } } fn compile_try_statement( &mut self, body: &[ast::LocatedStatement], handlers: &[ast::ExceptHandler], orelse: &Option<Vec<ast::LocatedStatement>>, finalbody: &Option<Vec<ast::LocatedStatement>>, ) -> Result<(), CompileError> { let mut handler_label = self.new_label(); let finally_label = self.new_label(); let else_label = self.new_label(); // try: self.emit(Instruction::SetupExcept { handler: handler_label, }); self.compile_statements(body)?; self.emit(Instruction::PopBlock); self.emit(Instruction::Jump { target: else_label }); // except handlers: self.set_label(handler_label); // Exception is on top of stack now handler_label = self.new_label(); for handler in handlers { // If we gave a typ, // check if this handler can handle the exception: if let Some(exc_type) = &handler.typ { // Duplicate exception for test: self.emit(Instruction::Duplicate); // Check exception type: self.emit(Instruction::LoadName { name: String::from("isinstance"), scope: bytecode::NameScope::Local, }); self.emit(Instruction::Rotate { amount: 2 }); self.compile_expression(exc_type)?; self.emit(Instruction::CallFunction { typ: CallType::Positional(2), }); // We cannot handle this exception type: self.emit(Instruction::JumpIfFalse { target: handler_label, }); // We have a match, store in name (except x as y) if let Some(alias) = &handler.name { self.store_name(alias); } else { // Drop exception from top of stack: self.emit(Instruction::Pop); } } else { // Catch all! // Drop exception from top of stack: self.emit(Instruction::Pop); } // Handler code: self.compile_statements(&handler.body)?; self.emit(Instruction::PopException); self.emit(Instruction::Jump { target: finally_label, }); // Emit a new label for the next handler self.set_label(handler_label); handler_label = self.new_label(); } self.emit(Instruction::Jump { target: handler_label, }); self.set_label(handler_label); // If code flows here, we have an unhandled exception, // emit finally code and raise again! // Duplicate finally code here: // TODO: this bytecode is now duplicate, could this be // improved? if let Some(statements) = finalbody { self.compile_statements(statements)?; } self.emit(Instruction::Raise { argc: 0 }); // We successfully ran the try block: // else: self.set_label(else_label); if let Some(statements) = orelse { self.compile_statements(statements)?; } // finally: self.set_label(finally_label); if let Some(statements) = finalbody { self.compile_statements(statements)?; } // unimplemented!(); Ok(()) } fn compile_function_def( &mut self, name: &str, args: &ast::Parameters, body: &[ast::LocatedStatement], decorator_list: &[ast::Expression], returns: &Option<ast::Expression>, // TODO: use type hint somehow.. ) -> Result<(), CompileError> { // Create bytecode for this function: // remember to restore self.in_loop to the original after the function is compiled let was_in_loop = self.in_loop; let was_in_function_def = self.in_function_def; self.in_loop = false; self.in_function_def = true; let old_qualified_path = self.current_qualified_path.clone(); let qualified_name = self.create_qualified_name(name, ""); self.current_qualified_path = Some(self.create_qualified_name(name, ".<locals>")); self.prepare_decorators(decorator_list)?; let mut flags = self.enter_function(name, args)?; let (new_body, doc_str) = get_doc(body); self.compile_statements(new_body)?; // Emit None at end: self.emit(Instruction::LoadConst { value: bytecode::Constant::None, }); self.emit(Instruction::ReturnValue); let code = self.pop_code_object(); self.leave_scope(); // Prepare type annotations: let mut num_annotations = 0; // Return annotation: if let Some(annotation) = returns { // key: self.emit(Instruction::LoadConst { value: bytecode::Constant::String { value: "return".to_string(), }, }); // value: self.compile_expression(annotation)?; num_annotations += 1; } for arg in args.args.iter() { if let Some(annotation) = &arg.annotation { self.emit(Instruction::LoadConst { value: bytecode::Constant::String { value: arg.arg.to_string(), }, }); self.compile_expression(&annotation)?; num_annotations += 1; } } if num_annotations > 0 { flags |= bytecode::FunctionOpArg::HAS_ANNOTATIONS; self.emit(Instruction::BuildMap { size: num_annotations, unpack: false, }); } self.emit(Instruction::LoadConst { value: bytecode::Constant::Code { code: Box::new(code), }, }); self.emit(Instruction::LoadConst { value: bytecode::Constant::String { value: qualified_name, }, }); // Turn code object into function object: self.emit(Instruction::MakeFunction { flags }); self.store_docstring(doc_str); self.apply_decorators(decorator_list); self.store_name(name); self.current_qualified_path = old_qualified_path; self.in_loop = was_in_loop; self.in_function_def = was_in_function_def; Ok(()) } fn compile_class_def( &mut self, name: &str, body: &[ast::LocatedStatement], bases: &[ast::Expression], keywords: &[ast::Keyword], decorator_list: &[ast::Expression], ) -> Result<(), CompileError> { let was_in_loop = self.in_loop; self.in_loop = false; let old_qualified_path = self.current_qualified_path.clone(); let qualified_name = self.create_qualified_name(name, ""); self.current_qualified_path = Some(qualified_name.clone()); self.prepare_decorators(decorator_list)?; self.emit(Instruction::LoadBuildClass); let line_number = self.get_source_line_number(); self.code_object_stack.push(CodeObject::new( vec![], Varargs::None, vec![], Varargs::None, self.source_path.clone().unwrap(), line_number, name.to_string(), )); self.enter_scope(); let (new_body, doc_str) = get_doc(body); self.emit(Instruction::LoadName { name: "__name__".to_string(), scope: bytecode::NameScope::Local, }); self.emit(Instruction::StoreName { name: "__module__".to_string(), scope: bytecode::NameScope::Local, }); self.compile_statements(new_body)?; self.emit(Instruction::LoadConst { value: bytecode::Constant::None, }); self.emit(Instruction::ReturnValue); let code = self.pop_code_object(); self.leave_scope(); self.emit(Instruction::LoadConst { value: bytecode::Constant::Code { code: Box::new(code), }, }); self.emit(Instruction::LoadConst { value: bytecode::Constant::String { value: name.to_string(), }, }); // Turn code object into function object: self.emit(Instruction::MakeFunction { flags: bytecode::FunctionOpArg::empty(), }); self.emit(Instruction::LoadConst { value: bytecode::Constant::String { value: qualified_name, }, }); for base in bases { self.compile_expression(base)?; } if !keywords.is_empty() { let mut kwarg_names = vec![]; for keyword in keywords { if let Some(name) = &keyword.name { kwarg_names.push(bytecode::Constant::String { value: name.to_string(), }); } else { // This means **kwargs! panic!("name must be set"); } self.compile_expression(&keyword.value)?; } self.emit(Instruction::LoadConst { value: bytecode::Constant::Tuple { elements: kwarg_names, }, }); self.emit(Instruction::CallFunction { typ: CallType::Keyword(2 + keywords.len() + bases.len()), }); } else { self.emit(Instruction::CallFunction { typ: CallType::Positional(2 + bases.len()), }); } self.store_docstring(doc_str); self.apply_decorators(decorator_list); self.store_name(name); self.current_qualified_path = old_qualified_path; self.in_loop = was_in_loop; Ok(()) } fn store_docstring(&mut self, doc_str: Option<String>) { if let Some(doc_string) = doc_str { // Duplicate top of stack (the function or class object) self.emit(Instruction::Duplicate); // Doc string value: self.emit(Instruction::LoadConst { value: bytecode::Constant::String { value: doc_string.to_string(), }, }); self.emit(Instruction::Rotate { amount: 2 }); self.emit(Instruction::StoreAttr { name: "__doc__".to_string(), }); } } fn compile_for( &mut self, target: &ast::Expression, iter: &ast::Expression, body: &[ast::LocatedStatement], orelse: &Option<Vec<ast::LocatedStatement>>, ) -> Result<(), CompileError> { // Start loop let start_label = self.new_label(); let else_label = self.new_label(); let end_label = self.new_label(); self.emit(Instruction::SetupLoop { start: start_label, end: end_label, }); // The thing iterated: self.compile_expression(iter)?; // Retrieve Iterator self.emit(Instruction::GetIter); self.set_label(start_label); self.emit(Instruction::ForIter { target: else_label }); // Start of loop iteration, set targets: self.compile_store(target)?; let was_in_loop = self.in_loop; self.in_loop = true; self.compile_statements(body)?; self.in_loop = was_in_loop; self.emit(Instruction::Jump { target: start_label, }); self.set_label(else_label); self.emit(Instruction::PopBlock); if let Some(orelse) = orelse { self.compile_statements(orelse)?; } self.set_label(end_label); Ok(()) } fn compile_chained_comparison( &mut self, vals: &[ast::Expression], ops: &[ast::Comparison], ) -> Result<(), CompileError> { assert!(!ops.is_empty()); assert_eq!(vals.len(), ops.len() + 1); let to_operator = |op: &ast::Comparison| match op { ast::Comparison::Equal => bytecode::ComparisonOperator::Equal, ast::Comparison::NotEqual => bytecode::ComparisonOperator::NotEqual, ast::Comparison::Less => bytecode::ComparisonOperator::Less, ast::Comparison::LessOrEqual => bytecode::ComparisonOperator::LessOrEqual, ast::Comparison::Greater => bytecode::ComparisonOperator::Greater, ast::Comparison::GreaterOrEqual => bytecode::ComparisonOperator::GreaterOrEqual, ast::Comparison::In => bytecode::ComparisonOperator::In, ast::Comparison::NotIn => bytecode::ComparisonOperator::NotIn, ast::Comparison::Is => bytecode::ComparisonOperator::Is, ast::Comparison::IsNot => bytecode::ComparisonOperator::IsNot, }; // a == b == c == d // compile into (pseudocode): // result = a == b // if result: // result = b == c // if result: // result = c == d // initialize lhs outside of loop self.compile_expression(&vals[0])?; let break_label = self.new_label(); let last_label = self.new_label(); // for all comparisons except the last (as the last one doesn't need a conditional jump) let ops_slice = &ops[0..ops.len()]; let vals_slice = &vals[1..ops.len()]; for (op, val) in ops_slice.iter().zip(vals_slice.iter()) { self.compile_expression(val)?; // store rhs for the next comparison in chain self.emit(Instruction::Duplicate); self.emit(Instruction::Rotate { amount: 3 }); self.emit(Instruction::CompareOperation { op: to_operator(op), }); // if comparison result is false, we break with this value; if true, try the next one. // (CPython compresses these three opcodes into JUMP_IF_FALSE_OR_POP) self.emit(Instruction::Duplicate); self.emit(Instruction::JumpIfFalse { target: break_label, }); self.emit(Instruction::Pop); } // handle the last comparison self.compile_expression(vals.last().unwrap())?; self.emit(Instruction::CompareOperation { op: to_operator(ops.last().unwrap()), }); self.emit(Instruction::Jump { target: last_label }); // early exit left us with stack: `rhs, comparison_result`. We need to clean up rhs. self.set_label(break_label); self.emit(Instruction::Rotate { amount: 2 }); self.emit(Instruction::Pop); self.set_label(last_label); Ok(()) } fn compile_store(&mut self, target: &ast::Expression) -> Result<(), CompileError> { match target { ast::Expression::Identifier { name } => { self.store_name(name); } ast::Expression::Subscript { a, b } => { self.compile_expression(a)?; self.compile_expression(b)?; self.emit(Instruction::StoreSubscript); } ast::Expression::Attribute { value, name } => { self.compile_expression(value)?; self.emit(Instruction::StoreAttr { name: name.to_string(), }); } ast::Expression::List { elements } | ast::Expression::Tuple { elements } => { let mut seen_star = false; // Scan for star args: for (i, element) in elements.iter().enumerate() { if let ast::Expression::Starred { .. } = element { if seen_star { return Err(CompileError { error: CompileErrorType::StarArgs, location: self.current_source_location.clone(), }); } else { seen_star = true; self.emit(Instruction::UnpackEx { before: i, after: elements.len() - i - 1, }); } } } if !seen_star { self.emit(Instruction::UnpackSequence { size: elements.len(), }); } for element in elements { if let ast::Expression::Starred { value } = element { self.compile_store(value)?; } else { self.compile_store(element)?; } } } _ => { return Err(CompileError { error: CompileErrorType::Assign(target.name()), location: self.current_source_location.clone(), }); } } Ok(()) } fn compile_op(&mut self, op: &ast::Operator, inplace: bool) { let i = match op { ast::Operator::Add => bytecode::BinaryOperator::Add, ast::Operator::Sub => bytecode::BinaryOperator::Subtract, ast::Operator::Mult => bytecode::BinaryOperator::Multiply, ast::Operator::MatMult => bytecode::BinaryOperator::MatrixMultiply, ast::Operator::Div => bytecode::BinaryOperator::Divide, ast::Operator::FloorDiv => bytecode::BinaryOperator::FloorDivide, ast::Operator::Mod => bytecode::BinaryOperator::Modulo, ast::Operator::Pow => bytecode::BinaryOperator::Power, ast::Operator::LShift => bytecode::BinaryOperator::Lshift, ast::Operator::RShift => bytecode::BinaryOperator::Rshift, ast::Operator::BitOr => bytecode::BinaryOperator::Or, ast::Operator::BitXor => bytecode::BinaryOperator::Xor, ast::Operator::BitAnd => bytecode::BinaryOperator::And, }; self.emit(Instruction::BinaryOperation { op: i, inplace }); } fn compile_test( &mut self, expression: &ast::Expression, true_label: Option<Label>, false_label: Option<Label>, context: EvalContext, ) -> Result<(), CompileError> { // Compile expression for test, and jump to label if false match expression { ast::Expression::BoolOp { a, op, b } => match op { ast::BooleanOperator::And => { let f = false_label.unwrap_or_else(|| self.new_label()); self.compile_test(a, None, Some(f), context)?; self.compile_test(b, true_label, false_label, context)?; if false_label.is_none() { self.set_label(f); } } ast::BooleanOperator::Or => { let t = true_label.unwrap_or_else(|| self.new_label()); self.compile_test(a, Some(t), None, context)?; self.compile_test(b, true_label, false_label, context)?; if true_label.is_none() { self.set_label(t); } } }, _ => { self.compile_expression(expression)?; match context { EvalContext::Statement => { if let Some(true_label) = true_label { self.emit(Instruction::JumpIf { target: true_label }); } if let Some(false_label) = false_label { self.emit(Instruction::JumpIfFalse { target: false_label, }); } } EvalContext::Expression => { if let Some(true_label) = true_label { self.emit(Instruction::Duplicate); self.emit(Instruction::JumpIf { target: true_label }); self.emit(Instruction::Pop); } if let Some(false_label) = false_label { self.emit(Instruction::Duplicate); self.emit(Instruction::JumpIfFalse { target: false_label, }); self.emit(Instruction::Pop); } } } } } Ok(()) } fn compile_expression(&mut self, expression: &ast::Expression) -> Result<(), CompileError> { trace!("Compiling {:?}", expression); match expression { ast::Expression::Call { function, args, keywords, } => self.compile_call(function, args, keywords)?, ast::Expression::BoolOp { .. } => { self.compile_test(expression, None, None, EvalContext::Expression)? } ast::Expression::Binop { a, op, b } => { self.compile_expression(a)?; self.compile_expression(b)?; // Perform operation: self.compile_op(op, false); } ast::Expression::Subscript { a, b } => { self.compile_expression(a)?; self.compile_expression(b)?; self.emit(Instruction::BinaryOperation { op: bytecode::BinaryOperator::Subscript, inplace: false, }); } ast::Expression::Unop { op, a } => { self.compile_expression(a)?; // Perform operation: let i = match op { ast::UnaryOperator::Pos => bytecode::UnaryOperator::Plus, ast::UnaryOperator::Neg => bytecode::UnaryOperator::Minus, ast::UnaryOperator::Not => bytecode::UnaryOperator::Not, ast::UnaryOperator::Inv => bytecode::UnaryOperator::Invert, }; let i = Instruction::UnaryOperation { op: i }; self.emit(i); } ast::Expression::Attribute { value, name } => { self.compile_expression(value)?; self.emit(Instruction::LoadAttr { name: name.to_string(), }); } ast::Expression::Compare { vals, ops } => { self.compile_chained_comparison(vals, ops)?; } ast::Expression::Number { value } => { let const_value = match value { ast::Number::Integer { value } => bytecode::Constant::Integer { value: value.clone(), }, ast::Number::Float { value } => bytecode::Constant::Float { value: *value }, ast::Number::Complex { real, imag } => bytecode::Constant::Complex { value: Complex64::new(*real, *imag), }, }; self.emit(Instruction::LoadConst { value: const_value }); } ast::Expression::List { elements } => { let size = elements.len(); let must_unpack = self.gather_elements(elements)?; self.emit(Instruction::BuildList { size, unpack: must_unpack, }); } ast::Expression::Tuple { elements } => { let size = elements.len(); let must_unpack = self.gather_elements(elements)?; self.emit(Instruction::BuildTuple { size, unpack: must_unpack, }); } ast::Expression::Set { elements } => { let size = elements.len(); let must_unpack = self.gather_elements(elements)?; self.emit(Instruction::BuildSet { size, unpack: must_unpack, }); } ast::Expression::Dict { elements } => { let size = elements.len(); for (key, value) in elements { self.compile_expression(key)?; self.compile_expression(value)?; } self.emit(Instruction::BuildMap { size, unpack: false, }); } ast::Expression::Slice { elements } => { let size = elements.len(); for element in elements { self.compile_expression(element)?; } self.emit(Instruction::BuildSlice { size }); } ast::Expression::Yield { value } => { if !self.in_function_def { return Err(CompileError { error: CompileErrorType::InvalidYield, location: self.current_source_location.clone(), }); } self.mark_generator(); match value { Some(expression) => self.compile_expression(expression)?, None => self.emit(Instruction::LoadConst { value: bytecode::Constant::None, }), }; self.emit(Instruction::YieldValue); } ast::Expression::YieldFrom { value } => { self.mark_generator(); self.compile_expression(value)?; self.emit(Instruction::GetIter); self.emit(Instruction::LoadConst { value: bytecode::Constant::None, }); self.emit(Instruction::YieldFrom); } ast::Expression::True => { self.emit(Instruction::LoadConst { value: bytecode::Constant::Boolean { value: true }, }); } ast::Expression::False => { self.emit(Instruction::LoadConst { value: bytecode::Constant::Boolean { value: false }, }); } ast::Expression::None => { self.emit(Instruction::LoadConst { value: bytecode::Constant::None, }); } ast::Expression::Ellipsis => { self.emit(Instruction::LoadConst { value: bytecode::Constant::Ellipsis, }); } ast::Expression::String { value } => { self.compile_string(value)?; } ast::Expression::Bytes { value } => { self.emit(Instruction::LoadConst { value: bytecode::Constant::Bytes { value: value.clone(), }, }); } ast::Expression::Identifier { name } => { self.load_name(name); } ast::Expression::Lambda { args, body } => { let name = "<lambda>".to_string(); // no need to worry about the self.loop_depth because there are no loops in lambda expressions let flags = self.enter_function(&name, args)?; self.compile_expression(body)?; self.emit(Instruction::ReturnValue); let code = self.pop_code_object(); self.leave_scope(); self.emit(Instruction::LoadConst { value: bytecode::Constant::Code { code: Box::new(code), }, }); self.emit(Instruction::LoadConst { value: bytecode::Constant::String { value: name }, }); // Turn code object into function object: self.emit(Instruction::MakeFunction { flags }); } ast::Expression::Comprehension { kind, generators } => { self.compile_comprehension(kind, generators)?; } ast::Expression::Starred { value } => { self.compile_expression(value)?; self.emit(Instruction::Unpack); panic!("We should not just unpack a starred args, since the size is unknown."); } ast::Expression::IfExpression { test, body, orelse } => { let no_label = self.new_label(); let end_label = self.new_label(); self.compile_test(test, None, Some(no_label), EvalContext::Expression)?; self.compile_expression(body)?; self.emit(Instruction::Jump { target: end_label }); self.set_label(no_label); self.compile_expression(orelse)?; self.set_label(end_label); } } Ok(()) } fn compile_call( &mut self, function: &ast::Expression, args: &[ast::Expression], keywords: &[ast::Keyword], ) -> Result<(), CompileError> { self.compile_expression(function)?; let count = args.len() + keywords.len(); // Normal arguments: let must_unpack = self.gather_elements(args)?; let has_double_star = keywords.iter().any(|k| k.name.is_none()); if must_unpack || has_double_star { // Create a tuple with positional args: self.emit(Instruction::BuildTuple { size: args.len(), unpack: must_unpack, }); // Create an optional map with kw-args: if !keywords.is_empty() { for keyword in keywords { if let Some(name) = &keyword.name { self.emit(Instruction::LoadConst { value: bytecode::Constant::String { value: name.to_string(), }, }); self.compile_expression(&keyword.value)?; if has_double_star { self.emit(Instruction::BuildMap { size: 1, unpack: false, }); } } else { // This means **kwargs! self.compile_expression(&keyword.value)?; } } self.emit(Instruction::BuildMap { size: keywords.len(), unpack: has_double_star, }); self.emit(Instruction::CallFunction { typ: CallType::Ex(true), }); } else { self.emit(Instruction::CallFunction { typ: CallType::Ex(false), }); } } else { // Keyword arguments: if !keywords.is_empty() { let mut kwarg_names = vec![]; for keyword in keywords { if let Some(name) = &keyword.name { kwarg_names.push(bytecode::Constant::String { value: name.to_string(), }); } else { // This means **kwargs! panic!("name must be set"); } self.compile_expression(&keyword.value)?; } self.emit(Instruction::LoadConst { value: bytecode::Constant::Tuple { elements: kwarg_names, }, }); self.emit(Instruction::CallFunction { typ: CallType::Keyword(count), }); } else { self.emit(Instruction::CallFunction { typ: CallType::Positional(count), }); } } Ok(()) } // Given a vector of expr / star expr generate code which gives either // a list of expressions on the stack, or a list of tuples. fn gather_elements(&mut self, elements: &[ast::Expression]) -> Result<bool, CompileError> { // First determine if we have starred elements: let has_stars = elements.iter().any(|e| { if let ast::Expression::Starred { .. } = e { true } else { false } }); for element in elements { if let ast::Expression::Starred { value } = element { self.compile_expression(value)?; } else { self.compile_expression(element)?; if has_stars { self.emit(Instruction::BuildTuple { size: 1, unpack: false, }); } } } Ok(has_stars) } fn compile_comprehension( &mut self, kind: &ast::ComprehensionKind, generators: &[ast::Comprehension], ) -> Result<(), CompileError> { // We must have at least one generator: assert!(!generators.is_empty()); let name = match kind { ast::ComprehensionKind::GeneratorExpression { .. } => "<genexpr>", ast::ComprehensionKind::List { .. } => "<listcomp>", ast::ComprehensionKind::Set { .. } => "<setcomp>", ast::ComprehensionKind::Dict { .. } => "<dictcomp>", } .to_string(); let line_number = self.get_source_line_number(); // Create magnificent function <listcomp>: self.code_object_stack.push(CodeObject::new( vec![".0".to_string()], Varargs::None, vec![], Varargs::None, self.source_path.clone().unwrap(), line_number, name.clone(), )); // Create empty object of proper type: match kind { ast::ComprehensionKind::GeneratorExpression { .. } => {} ast::ComprehensionKind::List { .. } => { self.emit(Instruction::BuildList { size: 0, unpack: false, }); } ast::ComprehensionKind::Set { .. } => { self.emit(Instruction::BuildSet { size: 0, unpack: false, }); } ast::ComprehensionKind::Dict { .. } => { self.emit(Instruction::BuildMap { size: 0, unpack: false, }); } } let mut loop_labels = vec![]; for generator in generators { if loop_labels.is_empty() { // Load iterator onto stack (passed as first argument): self.emit(Instruction::LoadName { name: String::from(".0"), scope: bytecode::NameScope::Local, }); } else { // Evaluate iterated item: self.compile_expression(&generator.iter)?; // Get iterator / turn item into an iterator self.emit(Instruction::GetIter); } // Setup for loop: let start_label = self.new_label(); let end_label = self.new_label(); loop_labels.push((start_label, end_label)); self.emit(Instruction::SetupLoop { start: start_label, end: end_label, }); self.set_label(start_label); self.emit(Instruction::ForIter { target: end_label }); self.compile_store(&generator.target)?; // Now evaluate the ifs: for if_condition in &generator.ifs { self.compile_test( if_condition, None, Some(start_label), EvalContext::Statement, )? } } match kind { ast::ComprehensionKind::GeneratorExpression { element } => { self.compile_expression(element)?; self.mark_generator(); self.emit(Instruction::YieldValue); self.emit(Instruction::Pop); } ast::ComprehensionKind::List { element } => { self.compile_expression(element)?; self.emit(Instruction::ListAppend { i: 1 + generators.len(), }); } ast::ComprehensionKind::Set { element } => { self.compile_expression(element)?; self.emit(Instruction::SetAdd { i: 1 + generators.len(), }); } ast::ComprehensionKind::Dict { key, value } => { self.compile_expression(value)?; self.compile_expression(key)?; self.emit(Instruction::MapAdd { i: 1 + generators.len(), }); } } for (start_label, end_label) in loop_labels.iter().rev() { // Repeat: self.emit(Instruction::Jump { target: *start_label, }); // End of for loop: self.set_label(*end_label); self.emit(Instruction::PopBlock); } // Return freshly filled list: self.emit(Instruction::ReturnValue); // Fetch code for listcomp function: let code = self.pop_code_object(); // List comprehension code: self.emit(Instruction::LoadConst { value: bytecode::Constant::Code { code: Box::new(code), }, }); // List comprehension function name: self.emit(Instruction::LoadConst { value: bytecode::Constant::String { value: name }, }); // Turn code object into function object: self.emit(Instruction::MakeFunction { flags: bytecode::FunctionOpArg::empty(), }); // Evaluate iterated item: self.compile_expression(&generators[0].iter)?; // Get iterator / turn item into an iterator self.emit(Instruction::GetIter); // Call just created <listcomp> function: self.emit(Instruction::CallFunction { typ: CallType::Positional(1), }); Ok(()) } fn compile_string(&mut self, string: &ast::StringGroup) -> Result<(), CompileError> { match string { ast::StringGroup::Joined { values } => { for value in values { self.compile_string(value)?; } self.emit(Instruction::BuildString { size: values.len() }) } ast::StringGroup::Constant { value } => { self.emit(Instruction::LoadConst { value: bytecode::Constant::String { value: value.to_string(), }, }); } ast::StringGroup::FormattedValue { value, conversion, spec, } => { self.compile_expression(value)?; self.emit(Instruction::FormatValue { conversion: *conversion, spec: spec.clone(), }); } } Ok(()) } // Scope helpers: fn enter_scope(&mut self) { // println!("Enter scope {:?}", self.scope_stack); // Enter first subscope! let scope = self.scope_stack.last_mut().unwrap().sub_scopes.remove(0); self.scope_stack.push(scope); } fn leave_scope(&mut self) { // println!("Leave scope {:?}", self.scope_stack); let scope = self.scope_stack.pop().unwrap(); assert!(scope.sub_scopes.is_empty()); } fn lookup_name(&self, name: &str) -> &SymbolRole { // println!("Looking up {:?}", name); let scope = self.scope_stack.last().unwrap(); scope.lookup(name).unwrap() } // Low level helper functions: fn emit(&mut self, instruction: Instruction) { let location = self.current_source_location.clone(); let cur_code_obj = self.current_code_object(); cur_code_obj.instructions.push(instruction); cur_code_obj.locations.push(location); // TODO: insert source filename } fn current_code_object(&mut self) -> &mut CodeObject { self.code_object_stack.last_mut().unwrap() } // Generate a new label fn new_label(&mut self) -> Label { let l = self.nxt_label; self.nxt_label += 1; l } // Assign current position the given label fn set_label(&mut self, label: Label) { let position = self.current_code_object().instructions.len(); // assert!(label not in self.label_map) self.current_code_object().label_map.insert(label, position); } fn set_source_location(&mut self, location: &ast::Location) { self.current_source_location = location.clone(); } fn get_source_line_number(&mut self) -> usize { self.current_source_location.get_row() } fn create_qualified_name(&self, name: &str, suffix: &str) -> String { if let Some(ref qualified_path) = self.current_qualified_path { format!("{}.{}{}", qualified_path, name, suffix) } else { format!("{}{}", name, suffix) } } fn mark_generator(&mut self) { self.current_code_object().is_generator = true; } } fn get_doc(body: &[ast::LocatedStatement]) -> (&[ast::LocatedStatement], Option<String>) { if let Some(val) = body.get(0) { if let ast::Statement::Expression { ref expression } = val.node { if let ast::Expression::String { ref value } = expression { if let ast::StringGroup::Constant { ref value } = value { if let Some((_, body_rest)) = body.split_first() { return (body_rest, Some(value.to_string())); } } } } } (body, None) } #[cfg(test)] mod tests { use super::Compiler; use crate::bytecode::CodeObject; use crate::bytecode::Constant::*; use crate::bytecode::Instruction::*; use crate::symboltable::make_symbol_table; use rustpython_parser::parser; fn compile_exec(source: &str) -> CodeObject { let mut compiler = Compiler::new(); compiler.source_path = Some("source_path".to_string()); compiler.push_new_code_object("<module>".to_string()); let ast = parser::parse_program(&source.to_string()).unwrap(); let symbol_scope = make_symbol_table(&ast).unwrap(); compiler.compile_program(&ast, symbol_scope).unwrap(); compiler.pop_code_object() } #[test] fn test_if_ors() { let code = compile_exec("if True or False or False:\n pass\n"); assert_eq!( vec![ LoadConst { value: Boolean { value: true } }, JumpIf { target: 1 }, LoadConst { value: Boolean { value: false } }, JumpIf { target: 1 }, LoadConst { value: Boolean { value: false } }, JumpIfFalse { target: 0 }, Pass, LoadConst { value: None }, ReturnValue ], code.instructions ); } #[test] fn test_if_ands() { let code = compile_exec("if True and False and False:\n pass\n"); assert_eq!( vec![ LoadConst { value: Boolean { value: true } }, JumpIfFalse { target: 0 }, LoadConst { value: Boolean { value: false } }, JumpIfFalse { target: 0 }, LoadConst { value: Boolean { value: false } }, JumpIfFalse { target: 0 }, Pass, LoadConst { value: None }, ReturnValue ], code.instructions ); } #[test] fn test_if_mixed() { let code = compile_exec("if (True and False) or (False and True):\n pass\n"); assert_eq!( vec![ LoadConst { value: Boolean { value: true } }, JumpIfFalse { target: 2 }, LoadConst { value: Boolean { value: false } }, JumpIf { target: 1 }, LoadConst { value: Boolean { value: false } }, JumpIfFalse { target: 0 }, LoadConst { value: Boolean { value: true } }, JumpIfFalse { target: 0 }, Pass, LoadConst { value: None }, ReturnValue ], code.instructions ); } }
35.574215
110
0.48095
dec09052041120e2fce56f83087f3d95ef29b55e
6,969
use super::{ dot::{Arrow, Directions, Dot, DotElement, DotShape, Style}, shared::{ElementDetails, LabeledElement, NoteProps}, }; use itertools::Itertools; use std::cell::RefCell; #[derive(Debug)] pub enum Element<'a> { StartTag, EndTag, Activity(ElementProps<'a>), Parallel(ElementProps<'a>), Decision(ElementProps<'a>), Arrow(ArrowProps<'a>), Note(NoteProps<'a>), } pub fn as_note<'a>(note: (&'a str, Option<&'a str>)) -> Element { let label = note.0; let attributes = note.1; Element::Note(NoteProps { label, attributes }) } impl<'a> Element<'a> { pub fn is_note(&self) -> bool { matches!(self, Element::Note(_)) } } impl<'a> LabeledElement for Element<'a> { fn label(&self) -> &'a str { match self { Element::StartTag => "start", Element::EndTag => "end", Element::Activity(props) | Element::Parallel(props) | Element::Decision(props) => props.label, Element::Arrow(details) => details.label.unwrap_or_default(), Element::Note(props) => props.label, } } fn is_connection(&self) -> bool { matches!(self, Element::Arrow(_)) } } #[derive(Debug)] pub struct ElementProps<'a> { pub label: &'a str, pub incoming_connections: RefCell<u8>, } #[derive(Debug)] pub struct ArrowProps<'a> { pub label: Option<&'a str>, pub target_connection_id: RefCell<u8>, pub dashed: RefCell<bool>, pub chart_direction: Directions, pub has_tail: bool, } impl<'a> ElementProps<'a> { pub fn new(label: &'a str) -> Self { Self { label, incoming_connections: RefCell::new(0), } } } impl<'a> ArrowProps<'a> { pub fn new(label: Option<&'a str>, chart_direction: &Directions, has_tail: bool) -> Self { Self { label, target_connection_id: RefCell::new(0), dashed: RefCell::new(false), chart_direction: *chart_direction, has_tail, } } } impl<'a> From<&ElementDetails<'a, Element<'a>>> for DotElement { fn from(e: &ElementDetails<'a, Element<'a>>) -> Self { match e.element { Element::StartTag | Element::EndTag => DotElement { dot: Dot::from(e.element), uid: format!("A{}", e.id.unwrap_or_default()), uid2: None, }, Element::Activity(_) | Element::Parallel(_) | Element::Decision(_) | Element::Note(_) => DotElement { dot: Dot::from(e.element), uid: format!("A{}", e.id.unwrap_or_default()), uid2: None, }, Element::Arrow(props) => { let target_connection_id = *(props.target_connection_id.borrow()); let (uid1, uid2) = if let Some(relation) = &e.relation { let uid1 = format!("A{}", relation.previous_id); let uid2 = if target_connection_id > 0 { format!( "A{}:f{}:{}", relation.next_id, target_connection_id, props.chart_direction.head_port() ) } else { format!("A{}", relation.next_id) }; (uid1, uid2) } else { ("A0".to_string(), "A0".to_string()) }; DotElement { dot: Dot::from(e.element), uid: uid1, uid2: Some(uid2), } } } } } impl<'a> From<&Element<'a>> for Dot { fn from(e: &Element<'a>) -> Self { match e { Element::StartTag => Dot { shape: DotShape::Circle, height: Some(0.3), width: Some(0.3), ..Dot::default() }, Element::EndTag => Dot { shape: DotShape::DoubleCircle, height: Some(0.3), width: Some(0.3), ..Dot::default() }, Element::Activity(props) => Dot { shape: DotShape::Rectangle, height: Some(0.5), margin: Some("0.20,0.05".to_string()), label: Some(props.label.to_string()), style: vec![Style::Rounded], fontsize: Some(10), ..Dot::default() }, Element::Parallel(props) => { let incoming_connections = *props.incoming_connections.borrow(); let label = (1..=incoming_connections).map(|i| format!("<f{}>", i)).join("|"); Dot { shape: DotShape::Record, height: Some(0.05), width: Some(0.5), penwidth: Some(4), label: Some(label), style: vec![Style::Filled], fontsize: Some(1), ..Dot::default() } } Element::Decision(props) => Dot { shape: DotShape::Diamond, height: Some(0.5), width: Some(0.5), label: Some(props.label.to_string()), fontsize: Some(0), ..Dot::default() }, Element::Arrow(props) => Dot { shape: DotShape::Edge, style: vec![Style::Solid], dir: Some("both".to_string()), arrowhead: if props.has_tail { Some(Arrow::Vee) } else { None }, fontsize: Some(10), labeldistance: Some(1), label: props.label.as_ref().map(|s| s.to_string()), ..Dot::default() }, // A1 [shape="note" , margin="0.20,0.05" , label="You can stick notes on diagrams too!\\{bg:cornsilk\\}" , style="filled" , fillcolor="cornsilk" , fontcolor="black" , arrowtail="none" , arrowhead="none" , height=0.5 , fontsize=10 , ] Element::Note(props) => { let (fillcolor, style) = if let Some(attr) = &props.attributes { if attr.starts_with("bg:") { (Some(attr.trim_start_matches("bg:").to_string()), vec![Style::Filled]) } else { (None, vec![]) } } else { (None, vec![]) }; Dot { shape: DotShape::Note, height: Some(0.5), margin: Some("0.20,0.05".to_string()), label: Some(props.label.to_string()), fontsize: Some(10), fillcolor, style, ..Dot::default() } } } } }
33.504808
245
0.454872
d703e9475905788b33f46f85261ce39397bc14ff
2,390
use crate::UserConfig; type Stream = tokio::net::TcpStream; /// Connect to Twitch without TLS. Using the provided [`UserConfig`][UserConfig]. /// /// This registers with the connection before returning it. /// /// To connect using TLS: /// /// enable one of: /// * `tokio_rustls` /// * `tokio_native_tls` /// /// and then use the respective: /// * [`twitchchat::native_tls::connect`][native_tls_connect] /// * [`twitchchat::rustls::connect`][rustls_tls_connect] /// /// [native_tls_connect]: ./native_tls/fn.connect.html /// [rustls_tls_connect]: ./rustls/fn.connect.html /// [UserConfig]: ./struct.UserConfig.html /// /// # Example /// ```rust,no_run /// # use twitchchat::*; /// # tokio::runtime::Runtime::new().unwrap().block_on(async move { /// let user_config = UserConfig::builder().anonymous().build()?; /// let mut stream = twitchchat::connect(&user_config).await?; /// # Ok::<_, Box<dyn std::error::Error>>(()) /// # }).unwrap(); /// ``` pub async fn connect(config: &UserConfig) -> std::io::Result<Stream> { let mut stream = tokio::net::TcpStream::connect(crate::TWITCH_IRC_ADDRESS_TLS).await?; crate::register(config, &mut stream).await?; Ok(stream) } /// Connect to Twitch without TLS. Using the provided `name`, `token`. /// /// This registers with the connection before returning it. /// /// To connect using TLS: /// /// enable one of: /// * `tokio_rustls` /// * `tokio_native_tls` /// /// and then use the respective: /// * [`twitchchat::native_tls::connect`][native_tls_connect] /// * [`twitchchat::rustls::connect`][rustls_tls_connect] /// /// [native_tls_connect]: ./native_tls/fn.connect.html /// [rustls_tls_connect]: ./rustls/fn.connect.html /// /// # Example /// ```rust,no_run /// # use twitchchat::*; /// # tokio::runtime::Runtime::new().unwrap().block_on(async move { /// let (name, token) = ANONYMOUS_LOGIN; /// let mut stream = twitchchat::connect_easy(&name, &token).await?; /// # Ok::<_, Box<dyn std::error::Error>>(()) /// # }).unwrap(); /// ``` pub async fn connect_easy(name: &str, token: &str) -> std::io::Result<Stream> { use std::io::{Error, ErrorKind}; let mut stream = tokio::net::TcpStream::connect(crate::TWITCH_IRC_ADDRESS_TLS).await?; let config = crate::simple_user_config(name, token) // .map_err(|err| Error::new(ErrorKind::Other, err))?; crate::register(&config, &mut stream).await?; Ok(stream) }
31.447368
90
0.651464
8ff4adda606e204b2e050827a933252a6242bd6e
80,959
use crate::middle::cstore::{ExternCrate, ExternCrateSource}; use crate::mir::interpret::{AllocId, ConstValue, GlobalAlloc, Pointer, Scalar}; use crate::ty::subst::{GenericArg, GenericArgKind, Subst}; use crate::ty::{self, ConstInt, DefIdTree, ParamConst, ScalarInt, Ty, TyCtxt, TypeFoldable}; use rustc_apfloat::ieee::{Double, Single}; use rustc_ast as ast; use rustc_data_structures::fx::FxHashMap; use rustc_hir as hir; use rustc_hir::def::{self, CtorKind, DefKind, Namespace}; use rustc_hir::def_id::{CrateNum, DefId, DefIdSet, CRATE_DEF_INDEX, LOCAL_CRATE}; use rustc_hir::definitions::{DefPathData, DefPathDataName, DisambiguatedDefPathData}; use rustc_hir::ItemKind; use rustc_session::config::TrimmedDefPaths; use rustc_span::symbol::{kw, Ident, Symbol}; use rustc_target::abi::Size; use rustc_target::spec::abi::Abi; use std::cell::Cell; use std::char; use std::collections::BTreeMap; use std::convert::TryFrom; use std::fmt::{self, Write as _}; use std::ops::{ControlFlow, Deref, DerefMut}; // `pretty` is a separate module only for organization. use super::*; macro_rules! p { (@$lit:literal) => { write!(scoped_cx!(), $lit)? }; (@write($($data:expr),+)) => { write!(scoped_cx!(), $($data),+)? }; (@print($x:expr)) => { scoped_cx!() = $x.print(scoped_cx!())? }; (@$method:ident($($arg:expr),*)) => { scoped_cx!() = scoped_cx!().$method($($arg),*)? }; ($($elem:tt $(($($args:tt)*))?),+) => {{ $(p!(@ $elem $(($($args)*))?);)+ }}; } macro_rules! define_scoped_cx { ($cx:ident) => { #[allow(unused_macros)] macro_rules! scoped_cx { () => { $cx }; } }; } thread_local! { static FORCE_IMPL_FILENAME_LINE: Cell<bool> = Cell::new(false); static SHOULD_PREFIX_WITH_CRATE: Cell<bool> = Cell::new(false); static NO_TRIMMED_PATH: Cell<bool> = Cell::new(false); static NO_QUERIES: Cell<bool> = Cell::new(false); } /// Avoids running any queries during any prints that occur /// during the closure. This may alter the appearance of some /// types (e.g. forcing verbose printing for opaque types). /// This method is used during some queries (e.g. `predicates_of` /// for opaque types), to ensure that any debug printing that /// occurs during the query computation does not end up recursively /// calling the same query. pub fn with_no_queries<F: FnOnce() -> R, R>(f: F) -> R { NO_QUERIES.with(|no_queries| { let old = no_queries.replace(true); let result = f(); no_queries.set(old); result }) } /// Force us to name impls with just the filename/line number. We /// normally try to use types. But at some points, notably while printing /// cycle errors, this can result in extra or suboptimal error output, /// so this variable disables that check. pub fn with_forced_impl_filename_line<F: FnOnce() -> R, R>(f: F) -> R { FORCE_IMPL_FILENAME_LINE.with(|force| { let old = force.replace(true); let result = f(); force.set(old); result }) } /// Adds the `crate::` prefix to paths where appropriate. pub fn with_crate_prefix<F: FnOnce() -> R, R>(f: F) -> R { SHOULD_PREFIX_WITH_CRATE.with(|flag| { let old = flag.replace(true); let result = f(); flag.set(old); result }) } /// Prevent path trimming if it is turned on. Path trimming affects `Display` impl /// of various rustc types, for example `std::vec::Vec` would be trimmed to `Vec`, /// if no other `Vec` is found. pub fn with_no_trimmed_paths<F: FnOnce() -> R, R>(f: F) -> R { NO_TRIMMED_PATH.with(|flag| { let old = flag.replace(true); let result = f(); flag.set(old); result }) } /// The "region highlights" are used to control region printing during /// specific error messages. When a "region highlight" is enabled, it /// gives an alternate way to print specific regions. For now, we /// always print those regions using a number, so something like "`'0`". /// /// Regions not selected by the region highlight mode are presently /// unaffected. #[derive(Copy, Clone, Default)] pub struct RegionHighlightMode { /// If enabled, when we see the selected region, use "`'N`" /// instead of the ordinary behavior. highlight_regions: [Option<(ty::RegionKind, usize)>; 3], /// If enabled, when printing a "free region" that originated from /// the given `ty::BoundRegion`, print it as "`'1`". Free regions that would ordinarily /// have names print as normal. /// /// This is used when you have a signature like `fn foo(x: &u32, /// y: &'a u32)` and we want to give a name to the region of the /// reference `x`. highlight_bound_region: Option<(ty::BoundRegion, usize)>, } impl RegionHighlightMode { /// If `region` and `number` are both `Some`, invokes /// `highlighting_region`. pub fn maybe_highlighting_region( &mut self, region: Option<ty::Region<'_>>, number: Option<usize>, ) { if let Some(k) = region { if let Some(n) = number { self.highlighting_region(k, n); } } } /// Highlights the region inference variable `vid` as `'N`. pub fn highlighting_region(&mut self, region: ty::Region<'_>, number: usize) { let num_slots = self.highlight_regions.len(); let first_avail_slot = self.highlight_regions.iter_mut().find(|s| s.is_none()).unwrap_or_else(|| { bug!("can only highlight {} placeholders at a time", num_slots,) }); *first_avail_slot = Some((*region, number)); } /// Convenience wrapper for `highlighting_region`. pub fn highlighting_region_vid(&mut self, vid: ty::RegionVid, number: usize) { self.highlighting_region(&ty::ReVar(vid), number) } /// Returns `Some(n)` with the number to use for the given region, if any. fn region_highlighted(&self, region: ty::Region<'_>) -> Option<usize> { self.highlight_regions.iter().find_map(|h| match h { Some((r, n)) if r == region => Some(*n), _ => None, }) } /// Highlight the given bound region. /// We can only highlight one bound region at a time. See /// the field `highlight_bound_region` for more detailed notes. pub fn highlighting_bound_region(&mut self, br: ty::BoundRegion, number: usize) { assert!(self.highlight_bound_region.is_none()); self.highlight_bound_region = Some((br, number)); } } /// Trait for printers that pretty-print using `fmt::Write` to the printer. pub trait PrettyPrinter<'tcx>: Printer< 'tcx, Error = fmt::Error, Path = Self, Region = Self, Type = Self, DynExistential = Self, Const = Self, > + fmt::Write { /// Like `print_def_path` but for value paths. fn print_value_path( self, def_id: DefId, substs: &'tcx [GenericArg<'tcx>], ) -> Result<Self::Path, Self::Error> { self.print_def_path(def_id, substs) } fn in_binder<T>(self, value: &ty::Binder<T>) -> Result<Self, Self::Error> where T: Print<'tcx, Self, Output = Self, Error = Self::Error> + TypeFoldable<'tcx>, { value.as_ref().skip_binder().print(self) } /// Prints comma-separated elements. fn comma_sep<T>(mut self, mut elems: impl Iterator<Item = T>) -> Result<Self, Self::Error> where T: Print<'tcx, Self, Output = Self, Error = Self::Error>, { if let Some(first) = elems.next() { self = first.print(self)?; for elem in elems { self.write_str(", ")?; self = elem.print(self)?; } } Ok(self) } /// Prints `{f: t}` or `{f as t}` depending on the `cast` argument fn typed_value( mut self, f: impl FnOnce(Self) -> Result<Self, Self::Error>, t: impl FnOnce(Self) -> Result<Self, Self::Error>, conversion: &str, ) -> Result<Self::Const, Self::Error> { self.write_str("{")?; self = f(self)?; self.write_str(conversion)?; self = t(self)?; self.write_str("}")?; Ok(self) } /// Prints `<...>` around what `f` prints. fn generic_delimiters( self, f: impl FnOnce(Self) -> Result<Self, Self::Error>, ) -> Result<Self, Self::Error>; /// Returns `true` if the region should be printed in /// optional positions, e.g., `&'a T` or `dyn Tr + 'b`. /// This is typically the case for all non-`'_` regions. fn region_should_not_be_omitted(&self, region: ty::Region<'_>) -> bool; // Defaults (should not be overridden): /// If possible, this returns a global path resolving to `def_id` that is visible /// from at least one local module, and returns `true`. If the crate defining `def_id` is /// declared with an `extern crate`, the path is guaranteed to use the `extern crate`. fn try_print_visible_def_path(self, def_id: DefId) -> Result<(Self, bool), Self::Error> { let mut callers = Vec::new(); self.try_print_visible_def_path_recur(def_id, &mut callers) } /// Try to see if this path can be trimmed to a unique symbol name. fn try_print_trimmed_def_path( mut self, def_id: DefId, ) -> Result<(Self::Path, bool), Self::Error> { if !self.tcx().sess.opts.debugging_opts.trim_diagnostic_paths || matches!(self.tcx().sess.opts.trimmed_def_paths, TrimmedDefPaths::Never) || NO_TRIMMED_PATH.with(|flag| flag.get()) || SHOULD_PREFIX_WITH_CRATE.with(|flag| flag.get()) { return Ok((self, false)); } match self.tcx().trimmed_def_paths(LOCAL_CRATE).get(&def_id) { None => Ok((self, false)), Some(symbol) => { self.write_str(&symbol.as_str())?; Ok((self, true)) } } } /// Does the work of `try_print_visible_def_path`, building the /// full definition path recursively before attempting to /// post-process it into the valid and visible version that /// accounts for re-exports. /// /// This method should only be called by itself or /// `try_print_visible_def_path`. /// /// `callers` is a chain of visible_parent's leading to `def_id`, /// to support cycle detection during recursion. fn try_print_visible_def_path_recur( mut self, def_id: DefId, callers: &mut Vec<DefId>, ) -> Result<(Self, bool), Self::Error> { define_scoped_cx!(self); debug!("try_print_visible_def_path: def_id={:?}", def_id); // If `def_id` is a direct or injected extern crate, return the // path to the crate followed by the path to the item within the crate. if def_id.index == CRATE_DEF_INDEX { let cnum = def_id.krate; if cnum == LOCAL_CRATE { return Ok((self.path_crate(cnum)?, true)); } // In local mode, when we encounter a crate other than // LOCAL_CRATE, execution proceeds in one of two ways: // // 1. For a direct dependency, where user added an // `extern crate` manually, we put the `extern // crate` as the parent. So you wind up with // something relative to the current crate. // 2. For an extern inferred from a path or an indirect crate, // where there is no explicit `extern crate`, we just prepend // the crate name. match self.tcx().extern_crate(def_id) { Some(&ExternCrate { src, dependency_of, span, .. }) => match (src, dependency_of) { (ExternCrateSource::Extern(def_id), LOCAL_CRATE) => { debug!("try_print_visible_def_path: def_id={:?}", def_id); return Ok(( if !span.is_dummy() { self.print_def_path(def_id, &[])? } else { self.path_crate(cnum)? }, true, )); } (ExternCrateSource::Path, LOCAL_CRATE) => { debug!("try_print_visible_def_path: def_id={:?}", def_id); return Ok((self.path_crate(cnum)?, true)); } _ => {} }, None => { return Ok((self.path_crate(cnum)?, true)); } } } if def_id.is_local() { return Ok((self, false)); } let visible_parent_map = self.tcx().visible_parent_map(LOCAL_CRATE); let mut cur_def_key = self.tcx().def_key(def_id); debug!("try_print_visible_def_path: cur_def_key={:?}", cur_def_key); // For a constructor, we want the name of its parent rather than <unnamed>. if let DefPathData::Ctor = cur_def_key.disambiguated_data.data { let parent = DefId { krate: def_id.krate, index: cur_def_key .parent .expect("`DefPathData::Ctor` / `VariantData` missing a parent"), }; cur_def_key = self.tcx().def_key(parent); } let visible_parent = match visible_parent_map.get(&def_id).cloned() { Some(parent) => parent, None => return Ok((self, false)), }; if callers.contains(&visible_parent) { return Ok((self, false)); } callers.push(visible_parent); // HACK(eddyb) this bypasses `path_append`'s prefix printing to avoid // knowing ahead of time whether the entire path will succeed or not. // To support printers that do not implement `PrettyPrinter`, a `Vec` or // linked list on the stack would need to be built, before any printing. match self.try_print_visible_def_path_recur(visible_parent, callers)? { (cx, false) => return Ok((cx, false)), (cx, true) => self = cx, } callers.pop(); let actual_parent = self.tcx().parent(def_id); debug!( "try_print_visible_def_path: visible_parent={:?} actual_parent={:?}", visible_parent, actual_parent, ); let mut data = cur_def_key.disambiguated_data.data; debug!( "try_print_visible_def_path: data={:?} visible_parent={:?} actual_parent={:?}", data, visible_parent, actual_parent, ); match data { // In order to output a path that could actually be imported (valid and visible), // we need to handle re-exports correctly. // // For example, take `std::os::unix::process::CommandExt`, this trait is actually // defined at `std::sys::unix::ext::process::CommandExt` (at time of writing). // // `std::os::unix` rexports the contents of `std::sys::unix::ext`. `std::sys` is // private so the "true" path to `CommandExt` isn't accessible. // // In this case, the `visible_parent_map` will look something like this: // // (child) -> (parent) // `std::sys::unix::ext::process::CommandExt` -> `std::sys::unix::ext::process` // `std::sys::unix::ext::process` -> `std::sys::unix::ext` // `std::sys::unix::ext` -> `std::os` // // This is correct, as the visible parent of `std::sys::unix::ext` is in fact // `std::os`. // // When printing the path to `CommandExt` and looking at the `cur_def_key` that // corresponds to `std::sys::unix::ext`, we would normally print `ext` and then go // to the parent - resulting in a mangled path like // `std::os::ext::process::CommandExt`. // // Instead, we must detect that there was a re-export and instead print `unix` // (which is the name `std::sys::unix::ext` was re-exported as in `std::os`). To // do this, we compare the parent of `std::sys::unix::ext` (`std::sys::unix`) with // the visible parent (`std::os`). If these do not match, then we iterate over // the children of the visible parent (as was done when computing // `visible_parent_map`), looking for the specific child we currently have and then // have access to the re-exported name. DefPathData::TypeNs(ref mut name) if Some(visible_parent) != actual_parent => { let reexport = self .tcx() .item_children(visible_parent) .iter() .find(|child| child.res.opt_def_id() == Some(def_id)) .map(|child| child.ident.name); if let Some(reexport) = reexport { *name = reexport; } } // Re-exported `extern crate` (#43189). DefPathData::CrateRoot => { data = DefPathData::TypeNs(self.tcx().original_crate_name(def_id.krate)); } _ => {} } debug!("try_print_visible_def_path: data={:?}", data); Ok((self.path_append(Ok, &DisambiguatedDefPathData { data, disambiguator: 0 })?, true)) } fn pretty_path_qualified( self, self_ty: Ty<'tcx>, trait_ref: Option<ty::TraitRef<'tcx>>, ) -> Result<Self::Path, Self::Error> { if trait_ref.is_none() { // Inherent impls. Try to print `Foo::bar` for an inherent // impl on `Foo`, but fallback to `<Foo>::bar` if self-type is // anything other than a simple path. match self_ty.kind() { ty::Adt(..) | ty::Foreign(_) | ty::Bool | ty::Char | ty::Str | ty::Int(_) | ty::Uint(_) | ty::Float(_) => { return self_ty.print(self); } _ => {} } } self.generic_delimiters(|mut cx| { define_scoped_cx!(cx); p!(print(self_ty)); if let Some(trait_ref) = trait_ref { p!(" as ", print(trait_ref.print_only_trait_path())); } Ok(cx) }) } fn pretty_path_append_impl( mut self, print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>, self_ty: Ty<'tcx>, trait_ref: Option<ty::TraitRef<'tcx>>, ) -> Result<Self::Path, Self::Error> { self = print_prefix(self)?; self.generic_delimiters(|mut cx| { define_scoped_cx!(cx); p!("impl "); if let Some(trait_ref) = trait_ref { p!(print(trait_ref.print_only_trait_path()), " for "); } p!(print(self_ty)); Ok(cx) }) } fn pretty_print_type(mut self, ty: Ty<'tcx>) -> Result<Self::Type, Self::Error> { define_scoped_cx!(self); match *ty.kind() { ty::Bool => p!("bool"), ty::Char => p!("char"), ty::Int(t) => p!(write("{}", t.name_str())), ty::Uint(t) => p!(write("{}", t.name_str())), ty::Float(t) => p!(write("{}", t.name_str())), ty::RawPtr(ref tm) => { p!(write( "*{} ", match tm.mutbl { hir::Mutability::Mut => "mut", hir::Mutability::Not => "const", } )); p!(print(tm.ty)) } ty::Ref(r, ty, mutbl) => { p!("&"); if self.region_should_not_be_omitted(r) { p!(print(r), " "); } p!(print(ty::TypeAndMut { ty, mutbl })) } ty::Never => p!("!"), ty::Tuple(ref tys) => { p!("(", comma_sep(tys.iter())); if tys.len() == 1 { p!(","); } p!(")") } ty::FnDef(def_id, substs) => { let sig = self.tcx().fn_sig(def_id).subst(self.tcx(), substs); p!(print(sig), " {{", print_value_path(def_id, substs), "}}"); } ty::FnPtr(ref bare_fn) => p!(print(bare_fn)), ty::Infer(infer_ty) => { if let ty::TyVar(ty_vid) = infer_ty { if let Some(name) = self.infer_ty_name(ty_vid) { p!(write("{}", name)) } else { p!(write("{}", infer_ty)) } } else { p!(write("{}", infer_ty)) } } ty::Error(_) => p!("[type error]"), ty::Param(ref param_ty) => p!(write("{}", param_ty)), ty::Bound(debruijn, bound_ty) => match bound_ty.kind { ty::BoundTyKind::Anon => self.pretty_print_bound_var(debruijn, bound_ty.var)?, ty::BoundTyKind::Param(p) => p!(write("{}", p)), }, ty::Adt(def, substs) => { p!(print_def_path(def.did, substs)); } ty::Dynamic(data, r) => { let print_r = self.region_should_not_be_omitted(r); if print_r { p!("("); } p!("dyn ", print(data)); if print_r { p!(" + ", print(r), ")"); } } ty::Foreign(def_id) => { p!(print_def_path(def_id, &[])); } ty::Projection(ref data) => p!(print(data)), ty::Placeholder(placeholder) => p!(write("Placeholder({:?})", placeholder)), ty::Opaque(def_id, substs) => { // FIXME(eddyb) print this with `print_def_path`. // We use verbose printing in 'NO_QUERIES' mode, to // avoid needing to call `predicates_of`. This should // only affect certain debug messages (e.g. messages printed // from `rustc_middle::ty` during the computation of `tcx.predicates_of`), // and should have no effect on any compiler output. if self.tcx().sess.verbose() || NO_QUERIES.with(|q| q.get()) { p!(write("Opaque({:?}, {:?})", def_id, substs)); return Ok(self); } return Ok(with_no_queries(|| { let def_key = self.tcx().def_key(def_id); if let Some(name) = def_key.disambiguated_data.data.get_opt_name() { p!(write("{}", name)); // FIXME(eddyb) print this with `print_def_path`. if !substs.is_empty() { p!("::"); p!(generic_delimiters(|cx| cx.comma_sep(substs.iter()))); } return Ok(self); } // Grab the "TraitA + TraitB" from `impl TraitA + TraitB`, // by looking up the projections associated with the def_id. let bounds = self.tcx().explicit_item_bounds(def_id); let mut first = true; let mut is_sized = false; p!("impl"); for (predicate, _) in bounds { let predicate = predicate.subst(self.tcx(), substs); // Note: We can't use `to_opt_poly_trait_ref` here as `predicate` // may contain unbound variables. We therefore do this manually. // // FIXME(lcnr): Find out why exactly this is the case :) let bound_predicate = predicate.bound_atom_with_opt_escaping(self.tcx()); if let ty::PredicateAtom::Trait(pred, _) = bound_predicate.skip_binder() { let trait_ref = bound_predicate.rebind(pred.trait_ref); // Don't print +Sized, but rather +?Sized if absent. if Some(trait_ref.def_id()) == self.tcx().lang_items().sized_trait() { is_sized = true; continue; } p!( write("{}", if first { " " } else { "+" }), print(trait_ref.print_only_trait_path()) ); first = false; } } if !is_sized { p!(write("{}?Sized", if first { " " } else { "+" })); } else if first { p!(" Sized"); } Ok(self) })?); } ty::Str => p!("str"), ty::Generator(did, substs, movability) => { p!(write("[")); match movability { hir::Movability::Movable => {} hir::Movability::Static => p!("static "), } if !self.tcx().sess.verbose() { p!("generator"); // FIXME(eddyb) should use `def_span`. if let Some(did) = did.as_local() { let hir_id = self.tcx().hir().local_def_id_to_hir_id(did); let span = self.tcx().hir().span(hir_id); p!(write("@{}", self.tcx().sess.source_map().span_to_string(span))); } else { p!(write("@"), print_def_path(did, substs)); } } else { p!(print_def_path(did, substs)); p!(" upvar_tys=("); if !substs.as_generator().is_valid() { p!("unavailable"); } else { self = self.comma_sep(substs.as_generator().upvar_tys())?; } p!(")"); } if substs.as_generator().is_valid() { p!(" ", print(substs.as_generator().witness())); } p!("]") } ty::GeneratorWitness(types) => { p!(in_binder(&types)); } ty::Closure(did, substs) => { p!(write("[")); if !self.tcx().sess.verbose() { p!(write("closure")); // FIXME(eddyb) should use `def_span`. if let Some(did) = did.as_local() { let hir_id = self.tcx().hir().local_def_id_to_hir_id(did); if self.tcx().sess.opts.debugging_opts.span_free_formats { p!("@", print_def_path(did.to_def_id(), substs)); } else { let span = self.tcx().hir().span(hir_id); p!(write("@{}", self.tcx().sess.source_map().span_to_string(span))); } } else { p!(write("@"), print_def_path(did, substs)); } } else { p!(print_def_path(did, substs)); if !substs.as_closure().is_valid() { p!(" closure_substs=(unavailable)"); } else { p!(" closure_kind_ty=", print(substs.as_closure().kind_ty())); p!( " closure_sig_as_fn_ptr_ty=", print(substs.as_closure().sig_as_fn_ptr_ty()) ); p!(" upvar_tys=("); self = self.comma_sep(substs.as_closure().upvar_tys())?; p!(")"); } } p!("]"); } ty::Array(ty, sz) => { p!("[", print(ty), "; "); if self.tcx().sess.verbose() { p!(write("{:?}", sz)); } else if let ty::ConstKind::Unevaluated(..) = sz.val { // Do not try to evaluate unevaluated constants. If we are const evaluating an // array length anon const, rustc will (with debug assertions) print the // constant's path. Which will end up here again. p!("_"); } else if let Some(n) = sz.val.try_to_bits(self.tcx().data_layout.pointer_size) { p!(write("{}", n)); } else if let ty::ConstKind::Param(param) = sz.val { p!(write("{}", param)); } else { p!("_"); } p!("]") } ty::Slice(ty) => p!("[", print(ty), "]"), } Ok(self) } fn pretty_print_bound_var( &mut self, debruijn: ty::DebruijnIndex, var: ty::BoundVar, ) -> Result<(), Self::Error> { if debruijn == ty::INNERMOST { write!(self, "^{}", var.index()) } else { write!(self, "^{}_{}", debruijn.index(), var.index()) } } fn infer_ty_name(&self, _: ty::TyVid) -> Option<String> { None } fn pretty_print_dyn_existential( mut self, predicates: &'tcx ty::List<ty::ExistentialPredicate<'tcx>>, ) -> Result<Self::DynExistential, Self::Error> { define_scoped_cx!(self); // Generate the main trait ref, including associated types. let mut first = true; if let Some(principal) = predicates.principal() { p!(print_def_path(principal.def_id, &[])); let mut resugared = false; // Special-case `Fn(...) -> ...` and resugar it. let fn_trait_kind = self.tcx().fn_trait_kind_from_lang_item(principal.def_id); if !self.tcx().sess.verbose() && fn_trait_kind.is_some() { if let ty::Tuple(ref args) = principal.substs.type_at(0).kind() { let mut projections = predicates.projection_bounds(); if let (Some(proj), None) = (projections.next(), projections.next()) { let tys: Vec<_> = args.iter().map(|k| k.expect_ty()).collect(); p!(pretty_fn_sig(&tys, false, proj.ty)); resugared = true; } } } // HACK(eddyb) this duplicates `FmtPrinter`'s `path_generic_args`, // in order to place the projections inside the `<...>`. if !resugared { // Use a type that can't appear in defaults of type parameters. let dummy_self = self.tcx().mk_ty_infer(ty::FreshTy(0)); let principal = principal.with_self_ty(self.tcx(), dummy_self); let args = self.generic_args_to_print( self.tcx().generics_of(principal.def_id), principal.substs, ); // Don't print `'_` if there's no unerased regions. let print_regions = args.iter().any(|arg| match arg.unpack() { GenericArgKind::Lifetime(r) => *r != ty::ReErased, _ => false, }); let mut args = args.iter().cloned().filter(|arg| match arg.unpack() { GenericArgKind::Lifetime(_) => print_regions, _ => true, }); let mut projections = predicates.projection_bounds(); let arg0 = args.next(); let projection0 = projections.next(); if arg0.is_some() || projection0.is_some() { let args = arg0.into_iter().chain(args); let projections = projection0.into_iter().chain(projections); p!(generic_delimiters(|mut cx| { cx = cx.comma_sep(args)?; if arg0.is_some() && projection0.is_some() { write!(cx, ", ")?; } cx.comma_sep(projections) })); } } first = false; } // Builtin bounds. // FIXME(eddyb) avoid printing twice (needed to ensure // that the auto traits are sorted *and* printed via cx). let mut auto_traits: Vec<_> = predicates.auto_traits().map(|did| (self.tcx().def_path_str(did), did)).collect(); // The auto traits come ordered by `DefPathHash`. While // `DefPathHash` is *stable* in the sense that it depends on // neither the host nor the phase of the moon, it depends // "pseudorandomly" on the compiler version and the target. // // To avoid that causing instabilities in compiletest // output, sort the auto-traits alphabetically. auto_traits.sort(); for (_, def_id) in auto_traits { if !first { p!(" + "); } first = false; p!(print_def_path(def_id, &[])); } Ok(self) } fn pretty_fn_sig( mut self, inputs: &[Ty<'tcx>], c_variadic: bool, output: Ty<'tcx>, ) -> Result<Self, Self::Error> { define_scoped_cx!(self); p!("(", comma_sep(inputs.iter().copied())); if c_variadic { if !inputs.is_empty() { p!(", "); } p!("..."); } p!(")"); if !output.is_unit() { p!(" -> ", print(output)); } Ok(self) } fn pretty_print_const( mut self, ct: &'tcx ty::Const<'tcx>, print_ty: bool, ) -> Result<Self::Const, Self::Error> { define_scoped_cx!(self); if self.tcx().sess.verbose() { p!(write("Const({:?}: {:?})", ct.val, ct.ty)); return Ok(self); } macro_rules! print_underscore { () => {{ if print_ty { self = self.typed_value( |mut this| { write!(this, "_")?; Ok(this) }, |this| this.print_type(ct.ty), ": ", )?; } else { write!(self, "_")?; } }}; } match ct.val { ty::ConstKind::Unevaluated(def, substs, promoted) => { if let Some(promoted) = promoted { p!(print_value_path(def.did, substs)); p!(write("::{:?}", promoted)); } else { match self.tcx().def_kind(def.did) { DefKind::Static | DefKind::Const | DefKind::AssocConst => { p!(print_value_path(def.did, substs)) } _ => { if def.is_local() { let span = self.tcx().def_span(def.did); if let Ok(snip) = self.tcx().sess.source_map().span_to_snippet(span) { p!(write("{}", snip)) } else { print_underscore!() } } else { print_underscore!() } } } } } ty::ConstKind::Infer(..) => print_underscore!(), ty::ConstKind::Param(ParamConst { name, .. }) => p!(write("{}", name)), ty::ConstKind::Value(value) => { return self.pretty_print_const_value(value, ct.ty, print_ty); } ty::ConstKind::Bound(debruijn, bound_var) => { self.pretty_print_bound_var(debruijn, bound_var)? } ty::ConstKind::Placeholder(placeholder) => p!(write("Placeholder({:?})", placeholder)), ty::ConstKind::Error(_) => p!("[const error]"), }; Ok(self) } fn pretty_print_const_scalar( mut self, scalar: Scalar, ty: Ty<'tcx>, print_ty: bool, ) -> Result<Self::Const, Self::Error> { define_scoped_cx!(self); match (scalar, &ty.kind()) { // Byte strings (&[u8; N]) ( Scalar::Ptr(ptr), ty::Ref( _, ty::TyS { kind: ty::Array( ty::TyS { kind: ty::Uint(ast::UintTy::U8), .. }, ty::Const { val: ty::ConstKind::Value(ConstValue::Scalar(int)), .. }, ), .. }, _, ), ) => match self.tcx().get_global_alloc(ptr.alloc_id) { Some(GlobalAlloc::Memory(alloc)) => { let bytes = int.assert_bits(self.tcx().data_layout.pointer_size); let size = Size::from_bytes(bytes); if let Ok(byte_str) = alloc.get_bytes(&self.tcx(), ptr, size) { p!(pretty_print_byte_str(byte_str)) } else { p!("<too short allocation>") } } // FIXME: for statics and functions, we could in principle print more detail. Some(GlobalAlloc::Static(def_id)) => p!(write("<static({:?})>", def_id)), Some(GlobalAlloc::Function(_)) => p!("<function>"), None => p!("<dangling pointer>"), }, // Bool (Scalar::Int(int), ty::Bool) if int == ScalarInt::FALSE => p!("false"), (Scalar::Int(int), ty::Bool) if int == ScalarInt::TRUE => p!("true"), // Float (Scalar::Int(int), ty::Float(ast::FloatTy::F32)) => { p!(write("{}f32", Single::try_from(int).unwrap())) } (Scalar::Int(int), ty::Float(ast::FloatTy::F64)) => { p!(write("{}f64", Double::try_from(int).unwrap())) } // Int (Scalar::Int(int), ty::Uint(_) | ty::Int(_)) => { let int = ConstInt::new(int, matches!(ty.kind(), ty::Int(_)), ty.is_ptr_sized_integral()); if print_ty { p!(write("{:#?}", int)) } else { p!(write("{:?}", int)) } } // Char (Scalar::Int(int), ty::Char) if char::try_from(int).is_ok() => { p!(write("{:?}", char::try_from(int).unwrap())) } // Raw pointers (Scalar::Int(int), ty::RawPtr(_)) => { let data = int.assert_bits(self.tcx().data_layout.pointer_size); self = self.typed_value( |mut this| { write!(this, "0x{:x}", data)?; Ok(this) }, |this| this.print_type(ty), " as ", )?; } (Scalar::Ptr(ptr), ty::FnPtr(_)) => { // FIXME: this can ICE when the ptr is dangling or points to a non-function. // We should probably have a helper method to share code with the "Byte strings" // printing above (which also has to handle pointers to all sorts of things). let instance = self.tcx().global_alloc(ptr.alloc_id).unwrap_fn(); self = self.typed_value( |this| this.print_value_path(instance.def_id(), instance.substs), |this| this.print_type(ty), " as ", )?; } // For function type zsts just printing the path is enough (Scalar::Int(int), ty::FnDef(d, s)) if int == ScalarInt::ZST => { p!(print_value_path(*d, s)) } // Nontrivial types with scalar bit representation (Scalar::Int(int), _) => { let print = |mut this: Self| { if int.size() == Size::ZERO { write!(this, "transmute(())")?; } else { write!(this, "transmute(0x{:x})", int)?; } Ok(this) }; self = if print_ty { self.typed_value(print, |this| this.print_type(ty), ": ")? } else { print(self)? }; } // Any pointer values not covered by a branch above (Scalar::Ptr(p), _) => { self = self.pretty_print_const_pointer(p, ty, print_ty)?; } } Ok(self) } /// This is overridden for MIR printing because we only want to hide alloc ids from users, not /// from MIR where it is actually useful. fn pretty_print_const_pointer( mut self, _: Pointer, ty: Ty<'tcx>, print_ty: bool, ) -> Result<Self::Const, Self::Error> { if print_ty { self.typed_value( |mut this| { this.write_str("&_")?; Ok(this) }, |this| this.print_type(ty), ": ", ) } else { self.write_str("&_")?; Ok(self) } } fn pretty_print_byte_str(mut self, byte_str: &'tcx [u8]) -> Result<Self::Const, Self::Error> { define_scoped_cx!(self); p!("b\""); for &c in byte_str { for e in std::ascii::escape_default(c) { self.write_char(e as char)?; } } p!("\""); Ok(self) } fn pretty_print_const_value( mut self, ct: ConstValue<'tcx>, ty: Ty<'tcx>, print_ty: bool, ) -> Result<Self::Const, Self::Error> { define_scoped_cx!(self); if self.tcx().sess.verbose() { p!(write("ConstValue({:?}: ", ct), print(ty), ")"); return Ok(self); } let u8_type = self.tcx().types.u8; match (ct, ty.kind()) { // Byte/string slices, printed as (byte) string literals. ( ConstValue::Slice { data, start, end }, ty::Ref(_, ty::TyS { kind: ty::Slice(t), .. }, _), ) if *t == u8_type => { // The `inspect` here is okay since we checked the bounds, and there are // no relocations (we have an active slice reference here). We don't use // this result to affect interpreter execution. let byte_str = data.inspect_with_uninit_and_ptr_outside_interpreter(start..end); self.pretty_print_byte_str(byte_str) } ( ConstValue::Slice { data, start, end }, ty::Ref(_, ty::TyS { kind: ty::Str, .. }, _), ) => { // The `inspect` here is okay since we checked the bounds, and there are no // relocations (we have an active `str` reference here). We don't use this // result to affect interpreter execution. let slice = data.inspect_with_uninit_and_ptr_outside_interpreter(start..end); let s = std::str::from_utf8(slice).expect("non utf8 str from miri"); p!(write("{:?}", s)); Ok(self) } (ConstValue::ByRef { alloc, offset }, ty::Array(t, n)) if *t == u8_type => { let n = n.val.try_to_bits(self.tcx().data_layout.pointer_size).unwrap(); // cast is ok because we already checked for pointer size (32 or 64 bit) above let n = Size::from_bytes(n); let ptr = Pointer::new(AllocId(0), offset); let byte_str = alloc.get_bytes(&self.tcx(), ptr, n).unwrap(); p!("*"); p!(pretty_print_byte_str(byte_str)); Ok(self) } // Aggregates, printed as array/tuple/struct/variant construction syntax. // // NB: the `has_param_types_or_consts` check ensures that we can use // the `destructure_const` query with an empty `ty::ParamEnv` without // introducing ICEs (e.g. via `layout_of`) from missing bounds. // E.g. `transmute([0usize; 2]): (u8, *mut T)` needs to know `T: Sized` // to be able to destructure the tuple into `(0u8, *mut T) // // FIXME(eddyb) for `--emit=mir`/`-Z dump-mir`, we should provide the // correct `ty::ParamEnv` to allow printing *all* constant values. (_, ty::Array(..) | ty::Tuple(..) | ty::Adt(..)) if !ty.has_param_types_or_consts() => { let contents = self.tcx().destructure_const( ty::ParamEnv::reveal_all() .and(self.tcx().mk_const(ty::Const { val: ty::ConstKind::Value(ct), ty })), ); let fields = contents.fields.iter().copied(); match *ty.kind() { ty::Array(..) => { p!("[", comma_sep(fields), "]"); } ty::Tuple(..) => { p!("(", comma_sep(fields)); if contents.fields.len() == 1 { p!(","); } p!(")"); } ty::Adt(def, substs) if def.variants.is_empty() => { p!(print_value_path(def.did, substs)); } ty::Adt(def, substs) => { let variant_id = contents.variant.expect("destructed const of adt without variant id"); let variant_def = &def.variants[variant_id]; p!(print_value_path(variant_def.def_id, substs)); match variant_def.ctor_kind { CtorKind::Const => {} CtorKind::Fn => { p!("(", comma_sep(fields), ")"); } CtorKind::Fictive => { p!(" {{ "); let mut first = true; for (field_def, field) in variant_def.fields.iter().zip(fields) { if !first { p!(", "); } p!(write("{}: ", field_def.ident), print(field)); first = false; } p!(" }}"); } } } _ => unreachable!(), } Ok(self) } (ConstValue::Scalar(scalar), _) => self.pretty_print_const_scalar(scalar, ty, print_ty), // FIXME(oli-obk): also pretty print arrays and other aggregate constants by reading // their fields instead of just dumping the memory. _ => { // fallback p!(write("{:?}", ct)); if print_ty { p!(": ", print(ty)); } Ok(self) } } } } // HACK(eddyb) boxed to avoid moving around a large struct by-value. pub struct FmtPrinter<'a, 'tcx, F>(Box<FmtPrinterData<'a, 'tcx, F>>); pub struct FmtPrinterData<'a, 'tcx, F> { tcx: TyCtxt<'tcx>, fmt: F, empty_path: bool, in_value: bool, pub print_alloc_ids: bool, used_region_names: FxHashSet<Symbol>, region_index: usize, binder_depth: usize, printed_type_count: usize, pub region_highlight_mode: RegionHighlightMode, pub name_resolver: Option<Box<&'a dyn Fn(ty::sty::TyVid) -> Option<String>>>, } impl<F> Deref for FmtPrinter<'a, 'tcx, F> { type Target = FmtPrinterData<'a, 'tcx, F>; fn deref(&self) -> &Self::Target { &self.0 } } impl<F> DerefMut for FmtPrinter<'_, '_, F> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl<F> FmtPrinter<'a, 'tcx, F> { pub fn new(tcx: TyCtxt<'tcx>, fmt: F, ns: Namespace) -> Self { FmtPrinter(Box::new(FmtPrinterData { tcx, fmt, empty_path: false, in_value: ns == Namespace::ValueNS, print_alloc_ids: false, used_region_names: Default::default(), region_index: 0, binder_depth: 0, printed_type_count: 0, region_highlight_mode: RegionHighlightMode::default(), name_resolver: None, })) } } // HACK(eddyb) get rid of `def_path_str` and/or pass `Namespace` explicitly always // (but also some things just print a `DefId` generally so maybe we need this?) fn guess_def_namespace(tcx: TyCtxt<'_>, def_id: DefId) -> Namespace { match tcx.def_key(def_id).disambiguated_data.data { DefPathData::TypeNs(..) | DefPathData::CrateRoot | DefPathData::ImplTrait => { Namespace::TypeNS } DefPathData::ValueNs(..) | DefPathData::AnonConst | DefPathData::ClosureExpr | DefPathData::Ctor => Namespace::ValueNS, DefPathData::MacroNs(..) => Namespace::MacroNS, _ => Namespace::TypeNS, } } impl TyCtxt<'t> { /// Returns a string identifying this `DefId`. This string is /// suitable for user output. pub fn def_path_str(self, def_id: DefId) -> String { self.def_path_str_with_substs(def_id, &[]) } pub fn def_path_str_with_substs(self, def_id: DefId, substs: &'t [GenericArg<'t>]) -> String { let ns = guess_def_namespace(self, def_id); debug!("def_path_str: def_id={:?}, ns={:?}", def_id, ns); let mut s = String::new(); let _ = FmtPrinter::new(self, &mut s, ns).print_def_path(def_id, substs); s } } impl<F: fmt::Write> fmt::Write for FmtPrinter<'_, '_, F> { fn write_str(&mut self, s: &str) -> fmt::Result { self.fmt.write_str(s) } } impl<F: fmt::Write> Printer<'tcx> for FmtPrinter<'_, 'tcx, F> { type Error = fmt::Error; type Path = Self; type Region = Self; type Type = Self; type DynExistential = Self; type Const = Self; fn tcx(&'a self) -> TyCtxt<'tcx> { self.tcx } fn print_def_path( mut self, def_id: DefId, substs: &'tcx [GenericArg<'tcx>], ) -> Result<Self::Path, Self::Error> { define_scoped_cx!(self); if substs.is_empty() { match self.try_print_trimmed_def_path(def_id)? { (cx, true) => return Ok(cx), (cx, false) => self = cx, } match self.try_print_visible_def_path(def_id)? { (cx, true) => return Ok(cx), (cx, false) => self = cx, } } let key = self.tcx.def_key(def_id); if let DefPathData::Impl = key.disambiguated_data.data { // Always use types for non-local impls, where types are always // available, and filename/line-number is mostly uninteresting. let use_types = !def_id.is_local() || { // Otherwise, use filename/line-number if forced. let force_no_types = FORCE_IMPL_FILENAME_LINE.with(|f| f.get()); !force_no_types }; if !use_types { // If no type info is available, fall back to // pretty printing some span information. This should // only occur very early in the compiler pipeline. let parent_def_id = DefId { index: key.parent.unwrap(), ..def_id }; let span = self.tcx.def_span(def_id); self = self.print_def_path(parent_def_id, &[])?; // HACK(eddyb) copy of `path_append` to avoid // constructing a `DisambiguatedDefPathData`. if !self.empty_path { write!(self, "::")?; } write!(self, "<impl at {}>", self.tcx.sess.source_map().span_to_string(span))?; self.empty_path = false; return Ok(self); } } self.default_print_def_path(def_id, substs) } fn print_region(self, region: ty::Region<'_>) -> Result<Self::Region, Self::Error> { self.pretty_print_region(region) } fn print_type(mut self, ty: Ty<'tcx>) -> Result<Self::Type, Self::Error> { if self.tcx.sess.type_length_limit().value_within_limit(self.printed_type_count) { self.printed_type_count += 1; self.pretty_print_type(ty) } else { write!(self, "...")?; Ok(self) } } fn print_dyn_existential( self, predicates: &'tcx ty::List<ty::ExistentialPredicate<'tcx>>, ) -> Result<Self::DynExistential, Self::Error> { self.pretty_print_dyn_existential(predicates) } fn print_const(self, ct: &'tcx ty::Const<'tcx>) -> Result<Self::Const, Self::Error> { self.pretty_print_const(ct, true) } fn path_crate(mut self, cnum: CrateNum) -> Result<Self::Path, Self::Error> { self.empty_path = true; if cnum == LOCAL_CRATE { if self.tcx.sess.rust_2018() { // We add the `crate::` keyword on Rust 2018, only when desired. if SHOULD_PREFIX_WITH_CRATE.with(|flag| flag.get()) { write!(self, "{}", kw::Crate)?; self.empty_path = false; } } } else { write!(self, "{}", self.tcx.crate_name(cnum))?; self.empty_path = false; } Ok(self) } fn path_qualified( mut self, self_ty: Ty<'tcx>, trait_ref: Option<ty::TraitRef<'tcx>>, ) -> Result<Self::Path, Self::Error> { self = self.pretty_path_qualified(self_ty, trait_ref)?; self.empty_path = false; Ok(self) } fn path_append_impl( mut self, print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>, _disambiguated_data: &DisambiguatedDefPathData, self_ty: Ty<'tcx>, trait_ref: Option<ty::TraitRef<'tcx>>, ) -> Result<Self::Path, Self::Error> { self = self.pretty_path_append_impl( |mut cx| { cx = print_prefix(cx)?; if !cx.empty_path { write!(cx, "::")?; } Ok(cx) }, self_ty, trait_ref, )?; self.empty_path = false; Ok(self) } fn path_append( mut self, print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>, disambiguated_data: &DisambiguatedDefPathData, ) -> Result<Self::Path, Self::Error> { self = print_prefix(self)?; // Skip `::{{constructor}}` on tuple/unit structs. if let DefPathData::Ctor = disambiguated_data.data { return Ok(self); } // FIXME(eddyb) `name` should never be empty, but it // currently is for `extern { ... }` "foreign modules". let name = disambiguated_data.data.name(); if name != DefPathDataName::Named(kw::Invalid) { if !self.empty_path { write!(self, "::")?; } if let DefPathDataName::Named(name) = name { if Ident::with_dummy_span(name).is_raw_guess() { write!(self, "r#")?; } } let verbose = self.tcx.sess.verbose(); disambiguated_data.fmt_maybe_verbose(&mut self, verbose)?; self.empty_path = false; } Ok(self) } fn path_generic_args( mut self, print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>, args: &[GenericArg<'tcx>], ) -> Result<Self::Path, Self::Error> { self = print_prefix(self)?; // Don't print `'_` if there's no unerased regions. let print_regions = args.iter().any(|arg| match arg.unpack() { GenericArgKind::Lifetime(r) => *r != ty::ReErased, _ => false, }); let args = args.iter().cloned().filter(|arg| match arg.unpack() { GenericArgKind::Lifetime(_) => print_regions, _ => true, }); if args.clone().next().is_some() { if self.in_value { write!(self, "::")?; } self.generic_delimiters(|cx| cx.comma_sep(args)) } else { Ok(self) } } } impl<F: fmt::Write> PrettyPrinter<'tcx> for FmtPrinter<'_, 'tcx, F> { fn infer_ty_name(&self, id: ty::TyVid) -> Option<String> { self.0.name_resolver.as_ref().and_then(|func| func(id)) } fn print_value_path( mut self, def_id: DefId, substs: &'tcx [GenericArg<'tcx>], ) -> Result<Self::Path, Self::Error> { let was_in_value = std::mem::replace(&mut self.in_value, true); self = self.print_def_path(def_id, substs)?; self.in_value = was_in_value; Ok(self) } fn in_binder<T>(self, value: &ty::Binder<T>) -> Result<Self, Self::Error> where T: Print<'tcx, Self, Output = Self, Error = Self::Error> + TypeFoldable<'tcx>, { self.pretty_in_binder(value) } fn typed_value( mut self, f: impl FnOnce(Self) -> Result<Self, Self::Error>, t: impl FnOnce(Self) -> Result<Self, Self::Error>, conversion: &str, ) -> Result<Self::Const, Self::Error> { self.write_str("{")?; self = f(self)?; self.write_str(conversion)?; let was_in_value = std::mem::replace(&mut self.in_value, false); self = t(self)?; self.in_value = was_in_value; self.write_str("}")?; Ok(self) } fn generic_delimiters( mut self, f: impl FnOnce(Self) -> Result<Self, Self::Error>, ) -> Result<Self, Self::Error> { write!(self, "<")?; let was_in_value = std::mem::replace(&mut self.in_value, false); let mut inner = f(self)?; inner.in_value = was_in_value; write!(inner, ">")?; Ok(inner) } fn region_should_not_be_omitted(&self, region: ty::Region<'_>) -> bool { let highlight = self.region_highlight_mode; if highlight.region_highlighted(region).is_some() { return true; } if self.tcx.sess.verbose() { return true; } let identify_regions = self.tcx.sess.opts.debugging_opts.identify_regions; match *region { ty::ReEarlyBound(ref data) => { data.name != kw::Invalid && data.name != kw::UnderscoreLifetime } ty::ReLateBound(_, br) | ty::ReFree(ty::FreeRegion { bound_region: br, .. }) | ty::RePlaceholder(ty::Placeholder { name: br, .. }) => { if let ty::BrNamed(_, name) = br { if name != kw::Invalid && name != kw::UnderscoreLifetime { return true; } } if let Some((region, _)) = highlight.highlight_bound_region { if br == region { return true; } } false } ty::ReVar(_) if identify_regions => true, ty::ReVar(_) | ty::ReErased => false, ty::ReStatic | ty::ReEmpty(_) => true, } } fn pretty_print_const_pointer( self, p: Pointer, ty: Ty<'tcx>, print_ty: bool, ) -> Result<Self::Const, Self::Error> { let print = |mut this: Self| { define_scoped_cx!(this); if this.print_alloc_ids { p!(write("{:?}", p)); } else { p!("&_"); } Ok(this) }; if print_ty { self.typed_value(print, |this| this.print_type(ty), ": ") } else { print(self) } } } // HACK(eddyb) limited to `FmtPrinter` because of `region_highlight_mode`. impl<F: fmt::Write> FmtPrinter<'_, '_, F> { pub fn pretty_print_region(mut self, region: ty::Region<'_>) -> Result<Self, fmt::Error> { define_scoped_cx!(self); // Watch out for region highlights. let highlight = self.region_highlight_mode; if let Some(n) = highlight.region_highlighted(region) { p!(write("'{}", n)); return Ok(self); } if self.tcx.sess.verbose() { p!(write("{:?}", region)); return Ok(self); } let identify_regions = self.tcx.sess.opts.debugging_opts.identify_regions; // These printouts are concise. They do not contain all the information // the user might want to diagnose an error, but there is basically no way // to fit that into a short string. Hence the recommendation to use // `explain_region()` or `note_and_explain_region()`. match *region { ty::ReEarlyBound(ref data) => { if data.name != kw::Invalid { p!(write("{}", data.name)); return Ok(self); } } ty::ReLateBound(_, br) | ty::ReFree(ty::FreeRegion { bound_region: br, .. }) | ty::RePlaceholder(ty::Placeholder { name: br, .. }) => { if let ty::BrNamed(_, name) = br { if name != kw::Invalid && name != kw::UnderscoreLifetime { p!(write("{}", name)); return Ok(self); } } if let Some((region, counter)) = highlight.highlight_bound_region { if br == region { p!(write("'{}", counter)); return Ok(self); } } } ty::ReVar(region_vid) if identify_regions => { p!(write("{:?}", region_vid)); return Ok(self); } ty::ReVar(_) => {} ty::ReErased => {} ty::ReStatic => { p!("'static"); return Ok(self); } ty::ReEmpty(ty::UniverseIndex::ROOT) => { p!("'<empty>"); return Ok(self); } ty::ReEmpty(ui) => { p!(write("'<empty:{:?}>", ui)); return Ok(self); } } p!("'_"); Ok(self) } } // HACK(eddyb) limited to `FmtPrinter` because of `binder_depth`, // `region_index` and `used_region_names`. impl<F: fmt::Write> FmtPrinter<'_, 'tcx, F> { pub fn name_all_regions<T>( mut self, value: &ty::Binder<T>, ) -> Result<(Self, (T, BTreeMap<ty::BoundRegion, ty::Region<'tcx>>)), fmt::Error> where T: Print<'tcx, Self, Output = Self, Error = fmt::Error> + TypeFoldable<'tcx>, { fn name_by_region_index(index: usize) -> Symbol { match index { 0 => Symbol::intern("'r"), 1 => Symbol::intern("'s"), i => Symbol::intern(&format!("'t{}", i - 2)), } } // Replace any anonymous late-bound regions with named // variants, using new unique identifiers, so that we can // clearly differentiate between named and unnamed regions in // the output. We'll probably want to tweak this over time to // decide just how much information to give. if self.binder_depth == 0 { self.prepare_late_bound_region_info(value); } let mut empty = true; let mut start_or_continue = |cx: &mut Self, start: &str, cont: &str| { write!( cx, "{}", if empty { empty = false; start } else { cont } ) }; define_scoped_cx!(self); let mut region_index = self.region_index; let new_value = self.tcx.replace_late_bound_regions(value, |br| { let _ = start_or_continue(&mut self, "for<", ", "); let br = match br { ty::BrNamed(_, name) => { let _ = write!(self, "{}", name); br } ty::BrAnon(_) | ty::BrEnv => { let name = loop { let name = name_by_region_index(region_index); region_index += 1; if !self.used_region_names.contains(&name) { break name; } }; let _ = write!(self, "{}", name); ty::BrNamed(DefId::local(CRATE_DEF_INDEX), name) } }; self.tcx.mk_region(ty::ReLateBound(ty::INNERMOST, br)) }); start_or_continue(&mut self, "", "> ")?; self.binder_depth += 1; self.region_index = region_index; Ok((self, new_value)) } pub fn pretty_in_binder<T>(self, value: &ty::Binder<T>) -> Result<Self, fmt::Error> where T: Print<'tcx, Self, Output = Self, Error = fmt::Error> + TypeFoldable<'tcx>, { let old_region_index = self.region_index; let (new, new_value) = self.name_all_regions(value)?; let mut inner = new_value.0.print(new)?; inner.region_index = old_region_index; inner.binder_depth -= 1; Ok(inner) } fn prepare_late_bound_region_info<T>(&mut self, value: &ty::Binder<T>) where T: TypeFoldable<'tcx>, { struct LateBoundRegionNameCollector<'a>(&'a mut FxHashSet<Symbol>); impl<'tcx> ty::fold::TypeVisitor<'tcx> for LateBoundRegionNameCollector<'_> { fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<()> { if let ty::ReLateBound(_, ty::BrNamed(_, name)) = *r { self.0.insert(name); } r.super_visit_with(self) } } self.used_region_names.clear(); let mut collector = LateBoundRegionNameCollector(&mut self.used_region_names); value.visit_with(&mut collector); self.region_index = 0; } } impl<'tcx, T, P: PrettyPrinter<'tcx>> Print<'tcx, P> for ty::Binder<T> where T: Print<'tcx, P, Output = P, Error = P::Error> + TypeFoldable<'tcx>, { type Output = P; type Error = P::Error; fn print(&self, cx: P) -> Result<Self::Output, Self::Error> { cx.in_binder(self) } } impl<'tcx, T, U, P: PrettyPrinter<'tcx>> Print<'tcx, P> for ty::OutlivesPredicate<T, U> where T: Print<'tcx, P, Output = P, Error = P::Error>, U: Print<'tcx, P, Output = P, Error = P::Error>, { type Output = P; type Error = P::Error; fn print(&self, mut cx: P) -> Result<Self::Output, Self::Error> { define_scoped_cx!(cx); p!(print(self.0), ": ", print(self.1)); Ok(cx) } } macro_rules! forward_display_to_print { ($($ty:ty),+) => { $(impl fmt::Display for $ty { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ty::tls::with(|tcx| { tcx.lift(*self) .expect("could not lift for printing") .print(FmtPrinter::new(tcx, f, Namespace::TypeNS))?; Ok(()) }) } })+ }; } macro_rules! define_print_and_forward_display { (($self:ident, $cx:ident): $($ty:ty $print:block)+) => { $(impl<'tcx, P: PrettyPrinter<'tcx>> Print<'tcx, P> for $ty { type Output = P; type Error = fmt::Error; fn print(&$self, $cx: P) -> Result<Self::Output, Self::Error> { #[allow(unused_mut)] let mut $cx = $cx; define_scoped_cx!($cx); let _: () = $print; #[allow(unreachable_code)] Ok($cx) } })+ forward_display_to_print!($($ty),+); }; } // HACK(eddyb) this is separate because `ty::RegionKind` doesn't need lifting. impl fmt::Display for ty::RegionKind { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ty::tls::with(|tcx| { self.print(FmtPrinter::new(tcx, f, Namespace::TypeNS))?; Ok(()) }) } } /// Wrapper type for `ty::TraitRef` which opts-in to pretty printing only /// the trait path. That is, it will print `Trait<U>` instead of /// `<T as Trait<U>>`. #[derive(Copy, Clone, TypeFoldable, Lift)] pub struct TraitRefPrintOnlyTraitPath<'tcx>(ty::TraitRef<'tcx>); impl fmt::Debug for TraitRefPrintOnlyTraitPath<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(self, f) } } impl ty::TraitRef<'tcx> { pub fn print_only_trait_path(self) -> TraitRefPrintOnlyTraitPath<'tcx> { TraitRefPrintOnlyTraitPath(self) } } impl ty::Binder<ty::TraitRef<'tcx>> { pub fn print_only_trait_path(self) -> ty::Binder<TraitRefPrintOnlyTraitPath<'tcx>> { self.map_bound(|tr| tr.print_only_trait_path()) } } forward_display_to_print! { Ty<'tcx>, &'tcx ty::List<ty::ExistentialPredicate<'tcx>>, &'tcx ty::Const<'tcx>, // HACK(eddyb) these are exhaustive instead of generic, // because `for<'tcx>` isn't possible yet. ty::Binder<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>, ty::Binder<ty::TraitRef<'tcx>>, ty::Binder<TraitRefPrintOnlyTraitPath<'tcx>>, ty::Binder<ty::FnSig<'tcx>>, ty::Binder<ty::TraitPredicate<'tcx>>, ty::Binder<ty::SubtypePredicate<'tcx>>, ty::Binder<ty::ProjectionPredicate<'tcx>>, ty::Binder<ty::OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>>>, ty::Binder<ty::OutlivesPredicate<ty::Region<'tcx>, ty::Region<'tcx>>>, ty::OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>>, ty::OutlivesPredicate<ty::Region<'tcx>, ty::Region<'tcx>> } define_print_and_forward_display! { (self, cx): &'tcx ty::List<Ty<'tcx>> { p!("{{", comma_sep(self.iter()), "}}") } ty::TypeAndMut<'tcx> { p!(write("{}", self.mutbl.prefix_str()), print(self.ty)) } ty::ExistentialTraitRef<'tcx> { // Use a type that can't appear in defaults of type parameters. let dummy_self = cx.tcx().mk_ty_infer(ty::FreshTy(0)); let trait_ref = self.with_self_ty(cx.tcx(), dummy_self); p!(print(trait_ref.print_only_trait_path())) } ty::ExistentialProjection<'tcx> { let name = cx.tcx().associated_item(self.item_def_id).ident; p!(write("{} = ", name), print(self.ty)) } ty::ExistentialPredicate<'tcx> { match *self { ty::ExistentialPredicate::Trait(x) => p!(print(x)), ty::ExistentialPredicate::Projection(x) => p!(print(x)), ty::ExistentialPredicate::AutoTrait(def_id) => { p!(print_def_path(def_id, &[])); } } } ty::FnSig<'tcx> { p!(write("{}", self.unsafety.prefix_str())); if self.abi != Abi::Rust { p!(write("extern {} ", self.abi)); } p!("fn", pretty_fn_sig(self.inputs(), self.c_variadic, self.output())); } ty::InferTy { if cx.tcx().sess.verbose() { p!(write("{:?}", self)); return Ok(cx); } match *self { ty::TyVar(_) => p!("_"), ty::IntVar(_) => p!(write("{}", "{integer}")), ty::FloatVar(_) => p!(write("{}", "{float}")), ty::FreshTy(v) => p!(write("FreshTy({})", v)), ty::FreshIntTy(v) => p!(write("FreshIntTy({})", v)), ty::FreshFloatTy(v) => p!(write("FreshFloatTy({})", v)) } } ty::TraitRef<'tcx> { p!(write("<{} as {}>", self.self_ty(), self.print_only_trait_path())) } TraitRefPrintOnlyTraitPath<'tcx> { p!(print_def_path(self.0.def_id, self.0.substs)); } ty::ParamTy { p!(write("{}", self.name)) } ty::ParamConst { p!(write("{}", self.name)) } ty::SubtypePredicate<'tcx> { p!(print(self.a), " <: ", print(self.b)) } ty::TraitPredicate<'tcx> { p!(print(self.trait_ref.self_ty()), ": ", print(self.trait_ref.print_only_trait_path())) } ty::ProjectionPredicate<'tcx> { p!(print(self.projection_ty), " == ", print(self.ty)) } ty::ProjectionTy<'tcx> { p!(print_def_path(self.item_def_id, self.substs)); } ty::ClosureKind { match *self { ty::ClosureKind::Fn => p!("Fn"), ty::ClosureKind::FnMut => p!("FnMut"), ty::ClosureKind::FnOnce => p!("FnOnce"), } } ty::Predicate<'tcx> { match self.kind() { &ty::PredicateKind::Atom(atom) => p!(print(atom)), ty::PredicateKind::ForAll(binder) => p!(print(binder)), } } ty::PredicateAtom<'tcx> { match *self { ty::PredicateAtom::Trait(ref data, constness) => { if let hir::Constness::Const = constness { p!("const "); } p!(print(data)) } ty::PredicateAtom::Subtype(predicate) => p!(print(predicate)), ty::PredicateAtom::RegionOutlives(predicate) => p!(print(predicate)), ty::PredicateAtom::TypeOutlives(predicate) => p!(print(predicate)), ty::PredicateAtom::Projection(predicate) => p!(print(predicate)), ty::PredicateAtom::WellFormed(arg) => p!(print(arg), " well-formed"), ty::PredicateAtom::ObjectSafe(trait_def_id) => { p!("the trait `", print_def_path(trait_def_id, &[]), "` is object-safe") } ty::PredicateAtom::ClosureKind(closure_def_id, _closure_substs, kind) => { p!("the closure `", print_value_path(closure_def_id, &[]), write("` implements the trait `{}`", kind)) } ty::PredicateAtom::ConstEvaluatable(def, substs) => { p!("the constant `", print_value_path(def.did, substs), "` can be evaluated") } ty::PredicateAtom::ConstEquate(c1, c2) => { p!("the constant `", print(c1), "` equals `", print(c2), "`") } ty::PredicateAtom::TypeWellFormedFromEnv(ty) => { p!("the type `", print(ty), "` is found in the environment") } } } GenericArg<'tcx> { match self.unpack() { GenericArgKind::Lifetime(lt) => p!(print(lt)), GenericArgKind::Type(ty) => p!(print(ty)), GenericArgKind::Const(ct) => p!(print(ct)), } } } fn for_each_def(tcx: TyCtxt<'_>, mut collect_fn: impl for<'b> FnMut(&'b Ident, Namespace, DefId)) { // Iterate all local crate items no matter where they are defined. let hir = tcx.hir(); for item in hir.krate().items.values() { if item.ident.name.as_str().is_empty() || matches!(item.kind, ItemKind::Use(_, _)) { continue; } if let Some(local_def_id) = hir.definitions().opt_hir_id_to_local_def_id(item.hir_id) { let def_id = local_def_id.to_def_id(); let ns = tcx.def_kind(def_id).ns().unwrap_or(Namespace::TypeNS); collect_fn(&item.ident, ns, def_id); } } // Now take care of extern crate items. let queue = &mut Vec::new(); let mut seen_defs: DefIdSet = Default::default(); for &cnum in tcx.crates().iter() { let def_id = DefId { krate: cnum, index: CRATE_DEF_INDEX }; // Ignore crates that are not direct dependencies. match tcx.extern_crate(def_id) { None => continue, Some(extern_crate) => { if !extern_crate.is_direct() { continue; } } } queue.push(def_id); } // Iterate external crate defs but be mindful about visibility while let Some(def) = queue.pop() { for child in tcx.item_children(def).iter() { if child.vis != ty::Visibility::Public { continue; } match child.res { def::Res::Def(DefKind::AssocTy, _) => {} def::Res::Def(defkind, def_id) => { if let Some(ns) = defkind.ns() { collect_fn(&child.ident, ns, def_id); } if seen_defs.insert(def_id) { queue.push(def_id); } } _ => {} } } } } /// The purpose of this function is to collect public symbols names that are unique across all /// crates in the build. Later, when printing about types we can use those names instead of the /// full exported path to them. /// /// So essentially, if a symbol name can only be imported from one place for a type, and as /// long as it was not glob-imported anywhere in the current crate, we can trim its printed /// path and print only the name. /// /// This has wide implications on error messages with types, for example, shortening /// `std::vec::Vec` to just `Vec`, as long as there is no other `Vec` importable anywhere. /// /// The implementation uses similar import discovery logic to that of 'use' suggestions. fn trimmed_def_paths(tcx: TyCtxt<'_>, crate_num: CrateNum) -> FxHashMap<DefId, Symbol> { assert_eq!(crate_num, LOCAL_CRATE); let mut map = FxHashMap::default(); if let TrimmedDefPaths::GoodPath = tcx.sess.opts.trimmed_def_paths { // For good paths causing this bug, the `rustc_middle::ty::print::with_no_trimmed_paths` // wrapper can be used to suppress this query, in exchange for full paths being formatted. tcx.sess.delay_good_path_bug("trimmed_def_paths constructed"); } let unique_symbols_rev: &mut FxHashMap<(Namespace, Symbol), Option<DefId>> = &mut FxHashMap::default(); for symbol_set in tcx.glob_map.values() { for symbol in symbol_set { unique_symbols_rev.insert((Namespace::TypeNS, *symbol), None); unique_symbols_rev.insert((Namespace::ValueNS, *symbol), None); unique_symbols_rev.insert((Namespace::MacroNS, *symbol), None); } } for_each_def(tcx, |ident, ns, def_id| { use std::collections::hash_map::Entry::{Occupied, Vacant}; match unique_symbols_rev.entry((ns, ident.name)) { Occupied(mut v) => match v.get() { None => {} Some(existing) => { if *existing != def_id { v.insert(None); } } }, Vacant(v) => { v.insert(Some(def_id)); } } }); for ((_, symbol), opt_def_id) in unique_symbols_rev.drain() { if let Some(def_id) = opt_def_id { map.insert(def_id, symbol); } } map } pub fn provide(providers: &mut ty::query::Providers) { *providers = ty::query::Providers { trimmed_def_paths, ..*providers }; }
36.81628
100
0.493768
39bd87e9a25fb84b9668d906ed4341ea61e187ab
4,615
use console_error_panic_hook; use isomorphic_app; use isomorphic_app::Msg; use isomorphic_app::{App, Store}; use js_sys::Reflect; use log::Level; use percy_dom::prelude::*; use std::cell::RefCell; use std::rc::Rc; use wasm_bindgen; use wasm_bindgen::prelude::*; use wasm_bindgen::JsCast; use web_sys; use web_sys::Url; #[wasm_bindgen] pub struct Client { app: App, dom_updater: DomUpdater, } // Expose globals from JS for things such as request animation frame // that web sys doesn't seem to have yet // // TODO: Remove this and use RAF from Rust // https://rustwasm.github.io/wasm-bindgen/api/web_sys/struct.Window.html#method.request_animation_frame #[wasm_bindgen] extern "C" { pub type GlobalJS; pub static global_js: GlobalJS; #[wasm_bindgen(method)] pub fn update(this: &GlobalJS); } #[wasm_bindgen] impl Client { #[wasm_bindgen(constructor)] pub fn new(initial_state: &str) -> Client { // In a real app you'd typically uncomment this line // #[cfg(debug_assertions)] console_log::init_with_level(Level::Debug); console_error_panic_hook::set_once(); let app = App::from_state_json(initial_state); // TODO: Use request animation frame from web_sys // https://rustwasm.github.io/wasm-bindgen/api/web_sys/struct.Window.html#method.request_animation_frame app.store.borrow_mut().subscribe(Box::new(|| { web_sys::console::log_1(&"Updating state".into()); global_js.update(); })); app.store.borrow_mut().set_after_route(Box::new(|new_path| { history().push_state_with_url(&JsValue::null(), "Rust Web App", Some(new_path)); })); let store = Rc::clone(&app.store); let on_popstate = move |_: web_sys::Event| { let location = location(); let path = location.pathname().unwrap() + &location.search().unwrap(); store.borrow_mut().msg(&Msg::SetPath(path)) }; let on_popstate = Box::new(on_popstate) as Box<FnMut(_)>; let mut on_popstate = Closure::wrap(on_popstate); window().set_onpopstate(Some(on_popstate.as_ref().unchecked_ref())); on_popstate.forget(); let root_node = document() .get_element_by_id("isomorphic-rust-web-app") .unwrap(); let dom_updater = DomUpdater::new_replace_mount(app.render(), root_node); let store = Rc::clone(&app.store); intercept_relative_links(store); Client { app, dom_updater } } pub fn render(&mut self) { let vdom = self.app.render(); self.dom_updater.update(vdom); } } // Ensure that anytime a link such as `<a href="/foo" />` is clicked we re-render the page locally // instead of hitting the server to load a new page. fn intercept_relative_links(store: Rc<RefCell<Store>>) { let on_anchor_click = move |event: web_sys::Event| { // Get the tag name of the element that was clicked let target = event .target() .unwrap() .dyn_into::<web_sys::Element>() .unwrap(); let tag_name = target.tag_name(); let tag_name = tag_name.as_str(); // If the clicked element is an anchor tag, check if it points to the current website // (ex: '<a href="/some-page"></a>' if tag_name.to_lowercase() == "a" { let link = Reflect::get(&target, &"href".into()) .unwrap() .as_string() .unwrap(); let link_url = Url::new(link.as_str()).unwrap(); // If this was indeed a relative URL, let our single page application router // handle it if link_url.hostname() == hostname() && link_url.port() == port() { event.prevent_default(); let msg = &Msg::SetPath(link_url.pathname()); store.borrow_mut().msg(msg); } } }; let on_anchor_click = Closure::wrap(Box::new(on_anchor_click) as Box<FnMut(_)>); window() .add_event_listener_with_callback("click", on_anchor_click.as_ref().unchecked_ref()) .unwrap(); on_anchor_click.forget(); } fn window() -> web_sys::Window { web_sys::window().unwrap() } fn document() -> web_sys::Document { window().document().unwrap() } fn history() -> web_sys::History { window().history().unwrap() } fn location() -> web_sys::Location { document().location().unwrap() } fn hostname() -> String { location().hostname().unwrap() } fn port() -> String { location().port().unwrap() }
30.361842
112
0.613868
26823164b8553a0bce4d79841fc4e2a9c8ed8530
4,824
extern crate proc_macro; use log::warn; use proc_macro::TokenStream; use std::fs::{self, File}; use std::io::Read; use std::path::PathBuf; use std::process::{Command, Output}; use std::thread::sleep; use std::time::Duration; use syn::parse_macro_input; fn check_output(output: &Output, command_name: &str) { if !output.status.success() { let out = String::from_utf8(output.stdout.clone()).expect("Failed to parse output"); let err = String::from_utf8(output.stderr.clone()).expect("Failed to parse error output"); eprintln!("=== {} output ===", command_name); eprintln!("{}", out); eprintln!("{}", err); panic!( "{} failed with error code: {}", command_name, output.status.code().unwrap() ); } } #[proc_macro] pub fn flatc_gen(input: TokenStream) -> TokenStream { let input = parse_macro_input!(input as syn::LitStr); // Validate input file path let path = PathBuf::from(input.value()); let path = if path.is_relative() { let src = input.span().source_file(); // XXX This needs `RUSTFLAG=--cfg procmacro2_semver_exempt` // see https://docs.rs/proc-macro2/*/proc_macro2/#unstable-features if !src.is_real() { panic!("flatc_gen! with relative path is supported only from real file and nightly compiler"); } let src = src.path(); let basedir = src.parent().unwrap(); basedir.join(path) } else { path }; if !path.exists() { panic!("Flatbuffer file '{}' does not exist.", path.display()); } let stem = path .file_stem() .expect("Cannot get the stem portion of filename") .to_str() .expect("Cannot convert filename into UTF-8"); let work_dir = dirs::cache_dir() .expect("Cannot get global cache directory") .join("flatc-gen"); fs::create_dir_all(&work_dir).expect("Failed to create cache directory"); let lock_file = work_dir.join("flatc-gen.lock"); { fs::File::create(&lock_file).expect("Cannot create lock file"); } // inter-process exclusion (parallel cmake will cause problems) let mut count = 0; let _lock = loop { match file_lock::FileLock::lock(lock_file.to_str().unwrap(), true, true) { Ok(lock) => break lock, Err(err) => { count += 1; eprintln!("Waiting lock of {}, {:?}", lock_file.display(), err); } }; // Try 30s to get lock if count > 30 { panic!("Cannot get lock of {} in 30s", lock_file.display()); } sleep(Duration::from_secs(1)); }; // Download flatbuffers // // FIXME use release version instead of HEAD let fbs_repo = work_dir.join("flatbuffers"); if !fbs_repo.exists() { let st = Command::new("git") .args(&["clone", "http://github.com/google/flatbuffers"]) .current_dir(&work_dir) .status() .expect("Git is not installed"); if !st.success() { panic!("Git clone of google/flatbuffers failed"); } } // Build flatbuffers let output = Command::new("cmake") .args(&["-Bbuild", "-H."]) .current_dir(&fbs_repo) .output() .expect("cmake not found"); check_output(&output, "cmake"); let output = Command::new("cmake") .args(&["--build", "build", "--target", "flatc"]) .current_dir(&fbs_repo) .output() .expect("cmake not found"); check_output(&output, "cmake"); let flatc = fbs_repo.join("build/flatc"); // Generate Rust code from FlatBuffer definitions let st = Command::new(flatc) .args(&["-r", "-o"]) .arg(&work_dir) .arg("-b") .arg(&path) .status() .expect("flatc command failed"); if !st.success() { panic!("flatc failed: {}", st.code().expect("No error code")); } let generated = work_dir.join(format!("{}_generated.rs", stem)); if !generated.exists() { panic!( "Generated Rust file '{}' does not found.", generated.display() ); } // Optional: Format generated code match Command::new("rustfmt").arg(&generated).status() { Ok(st) => { if !st.success() { panic!("rustfmt failed: {}", st.code().expect("No error code")); } } Err(_) => warn!("rustfmt is not installed"), } let mut f = File::open(&generated).unwrap(); let mut code = String::new(); f.read_to_string(&mut code) .expect("Failed to read generated file"); let ts: proc_macro2::TokenStream = syn::parse_str(&code).unwrap(); ts.into() }
31.94702
113
0.556385
0aab8f65b6eedc98450796655e8c983954c8574e
14,656
///! A port of *Optionals from apimachinery/types.go use crate::{Error, Result}; use serde::Serialize; /// Common query parameters used in watch/list/delete calls on collections #[derive(Clone, Debug)] #[allow(missing_docs)] pub struct ListParams { /// A selector to restrict the list of returned objects by their labels. /// /// Defaults to everything if `None`. pub label_selector: Option<String>, /// A selector to restrict the list of returned objects by their fields. /// /// Defaults to everything if `None`. pub field_selector: Option<String>, /// Timeout for the list/watch call. /// /// This limits the duration of the call, regardless of any activity or inactivity. /// If unset for a watch call, we will use 290s. /// We limit this to 295s due to [inherent watch limitations](https://github.com/kubernetes/kubernetes/issues/6513). pub timeout: Option<u32>, /// Enables watch events with type "BOOKMARK". /// /// Servers that do not implement bookmarks ignore this flag and /// bookmarks are sent at the server's discretion. Clients should not /// assume bookmarks are returned at any specific interval, nor may they /// assume the server will send any BOOKMARK event during a session. /// If this is not a watch, this field is ignored. /// If the feature gate WatchBookmarks is not enabled in apiserver, /// this field is ignored. pub bookmarks: bool, /// Limit the number of results. /// /// If there are more results, the server will respond with a continue token which can be used to fetch another page /// of results. See the [Kubernetes API docs](https://kubernetes.io/docs/reference/using-api/api-concepts/#retrieving-large-results-sets-in-chunks) /// for pagination details. pub limit: Option<u32>, /// Fetch a second page of results. /// /// After listing results with a limit, a continue token can be used to fetch another page of results. pub continue_token: Option<String>, } impl Default for ListParams { fn default() -> Self { Self { // bookmarks stable since 1.17, and backwards compatible bookmarks: true, label_selector: None, field_selector: None, timeout: None, limit: None, continue_token: None, } } } impl ListParams { pub(crate) fn validate(&self) -> Result<()> { if let Some(to) = &self.timeout { // https://github.com/kubernetes/kubernetes/issues/6513 if *to >= 295 { return Err(Error::RequestValidation( "ListParams::timeout must be < 295s".into(), )); } } Ok(()) } } /// Builder interface to ListParams /// /// Usage: /// ``` /// use kube::api::ListParams; /// let lp = ListParams::default() /// .timeout(60) /// .labels("kubernetes.io/lifecycle=spot"); /// ``` impl ListParams { /// Configure the timeout for list/watch calls /// /// This limits the duration of the call, regardless of any activity or inactivity. /// Defaults to 290s pub fn timeout(mut self, timeout_secs: u32) -> Self { self.timeout = Some(timeout_secs); self } /// Configure the selector to restrict the list of returned objects by their fields. /// /// Defaults to everything. /// Supports `=`, `==`, `!=`, and can be comma separated: `key1=value1,key2=value2`. /// The server only supports a limited number of field queries per type. pub fn fields(mut self, field_selector: &str) -> Self { self.field_selector = Some(field_selector.to_string()); self } /// Configure the selector to restrict the list of returned objects by their labels. /// /// Defaults to everything. /// Supports `=`, `==`, `!=`, and can be comma separated: `key1=value1,key2=value2`. pub fn labels(mut self, label_selector: &str) -> Self { self.label_selector = Some(label_selector.to_string()); self } /// Disables watch bookmarks to simplify watch handling /// /// This is not recommended to use with production watchers as it can cause desyncs. /// See [#219](https://github.com/clux/kube-rs/issues/219) for details. pub fn disable_bookmarks(mut self) -> Self { self.bookmarks = false; self } /// Sets a result limit. pub fn limit(mut self, limit: u32) -> Self { self.limit = Some(limit); self } /// Sets a continue token. pub fn continue_token(mut self, token: &str) -> Self { self.continue_token = Some(token.to_string()); self } } /// Common query parameters for put/post calls #[derive(Default, Clone, Debug)] pub struct PostParams { /// Whether to run this as a dry run pub dry_run: bool, /// fieldManager is a name of the actor that is making changes pub field_manager: Option<String>, } impl PostParams { pub(crate) fn validate(&self) -> Result<()> { if let Some(field_manager) = &self.field_manager { // Implement the easy part of validation, in future this may be extended to provide validation as in go code // For now it's fine, because k8s API server will return an error if field_manager.len() > 128 { return Err(Error::RequestValidation( "Failed to validate PostParams::field_manager!".into(), )); } } Ok(()) } } /// Describes changes that should be applied to a resource /// /// Takes arbitrary serializable data for all strategies except `Json`. /// /// We recommend using ([server-side](https://kubernetes.io/blog/2020/04/01/kubernetes-1.18-feature-server-side-apply-beta-2)) `Apply` patches on new kubernetes releases. /// /// See [kubernetes patch docs](https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment) for the older patch types. /// /// Note that patches have different effects on different fields depending on their merge strategies. /// These strategies are configurable when deriving your [`CustomResource`](kube_derive::CustomResource#customizing-schemas). /// /// # Creating a patch via serde_json /// ``` /// use kube::api::Patch; /// let patch = serde_json::json!({ /// "apiVersion": "v1", /// "kind": "Pod", /// "metadata": { /// "name": "blog" /// }, /// "spec": { /// "activeDeadlineSeconds": 5 /// } /// }); /// let patch = Patch::Apply(&patch); /// ``` /// # Creating a patch from a type /// ``` /// use kube::api::Patch; /// use k8s_openapi::api::rbac::v1::Role; /// use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta; /// let r = Role { /// metadata: ObjectMeta { name: Some("user".into()), ..ObjectMeta::default() }, /// rules: Some(vec![]) /// }; /// let patch = Patch::Apply(&r); /// ``` #[non_exhaustive] #[derive(Debug)] pub enum Patch<T: Serialize> { /// [Server side apply](https://kubernetes.io/docs/reference/using-api/api-concepts/#server-side-apply) /// /// Requires kubernetes >= 1.16 Apply(T), /// [JSON patch](https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment) /// /// Using this variant will require you to explicitly provide a type for `T` at the moment. /// /// # Example /// /// ``` /// use kube::api::Patch; /// let json_patch = json_patch::Patch(vec![]); /// let patch = Patch::Json::<()>(json_patch); /// ``` #[cfg(feature = "jsonpatch")] #[cfg_attr(docsrs, doc(cfg(feature = "jsonpatch")))] Json(json_patch::Patch), /// [JSON Merge patch](https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment) Merge(T), /// [Strategic JSON Merge patch](https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-strategic-merge-patch-to-update-a-deployment) Strategic(T), } impl<T: Serialize> Patch<T> { pub(crate) fn is_apply(&self) -> bool { matches!(self, Patch::Apply(_)) } pub(crate) fn content_type(&self) -> &'static str { match &self { Self::Apply(_) => "application/apply-patch+yaml", #[cfg(feature = "jsonpatch")] #[cfg_attr(docsrs, doc(cfg(feature = "jsonpatch")))] Self::Json(_) => "application/json-patch+json", Self::Merge(_) => "application/merge-patch+json", Self::Strategic(_) => "application/strategic-merge-patch+json", } } } impl<T: Serialize> Patch<T> { pub(crate) fn serialize(&self) -> Result<Vec<u8>> { match self { Self::Apply(p) => serde_json::to_vec(p), #[cfg(feature = "jsonpatch")] #[cfg_attr(docsrs, doc(cfg(feature = "jsonpatch")))] Self::Json(p) => serde_json::to_vec(p), Self::Strategic(p) => serde_json::to_vec(p), Self::Merge(p) => serde_json::to_vec(p), } .map_err(Into::into) } } /// Common query parameters for patch calls #[derive(Default, Clone, Debug)] pub struct PatchParams { /// Whether to run this as a dry run pub dry_run: bool, /// force Apply requests. Applicable only to [`Patch::Apply`]. pub force: bool, /// fieldManager is a name of the actor that is making changes. Required for [`Patch::Apply`] /// optional for everything else. pub field_manager: Option<String>, } impl PatchParams { pub(crate) fn validate<P: Serialize>(&self, patch: &Patch<P>) -> Result<()> { if let Some(field_manager) = &self.field_manager { // Implement the easy part of validation, in future this may be extended to provide validation as in go code // For now it's fine, because k8s API server will return an error if field_manager.len() > 128 { return Err(Error::RequestValidation( "Failed to validate PatchParams::field_manager!".into(), )); } } if self.force && !patch.is_apply() { warn!("PatchParams::force only works with Patch::Apply"); } Ok(()) } pub(crate) fn populate_qp(&self, qp: &mut url::form_urlencoded::Serializer<String>) { if self.dry_run { qp.append_pair("dryRun", "All"); } if self.force { qp.append_pair("force", "true"); } if let Some(ref field_manager) = self.field_manager { qp.append_pair("fieldManager", &field_manager); } } /// Construct `PatchParams` for server-side apply pub fn apply(manager: &str) -> Self { Self { field_manager: Some(manager.into()), ..Self::default() } } /// Force the result through on conflicts /// /// NB: Force is a concept restricted to the server-side [`Patch::Apply`]. pub fn force(mut self) -> Self { self.force = true; self } /// Perform a dryRun only pub fn dry_run(mut self) -> Self { self.dry_run = true; self } } /// Common query parameters for delete calls #[derive(Default, Clone, Serialize, Debug)] #[serde(rename_all = "camelCase")] pub struct DeleteParams { /// When present, indicates that modifications should not be persisted. #[serde( serialize_with = "dry_run_all_ser", skip_serializing_if = "std::ops::Not::not" )] pub dry_run: bool, /// The duration in seconds before the object should be deleted. /// /// Value must be non-negative integer. The value zero indicates delete immediately. /// If this value is `None`, the default grace period for the specified type will be used. /// Defaults to a per object value if not specified. Zero means delete immediately. #[serde(skip_serializing_if = "Option::is_none")] pub grace_period_seconds: Option<u32>, /// Whether or how garbage collection is performed. /// /// The default policy is decided by the existing finalizer set in /// `metadata.finalizers`, and the resource-specific default policy. #[serde(skip_serializing_if = "Option::is_none")] pub propagation_policy: Option<PropagationPolicy>, /// Condtions that must be fulfilled before a deletion is carried out /// /// If not possible, a `409 Conflict` status will be returned. #[serde(skip_serializing_if = "Option::is_none")] pub preconditions: Option<Preconditions>, } // dryRun serialization differ when used as body parameters and query strings: // query strings are either true/false // body params allow only: missing field, or ["All"] // The latter is a very awkward API causing users to do to // dp.dry_run = vec!["All".into()]; // just to turn on dry_run.. // so we hide this detail for now. fn dry_run_all_ser<S>(t: &bool, s: S) -> std::result::Result<S::Ok, S::Error> where S: serde::ser::Serializer, { use serde::ser::SerializeTuple; match t { true => { let mut map = s.serialize_tuple(1)?; map.serialize_element("All")?; map.end() } false => s.serialize_none(), } } #[cfg(test)] mod test { use super::DeleteParams; #[test] fn delete_param_serialize() { let mut dp = DeleteParams::default(); let emptyser = serde_json::to_string(&dp).unwrap(); //println!("emptyser is: {}", emptyser); assert_eq!(emptyser, "{}"); dp.dry_run = true; let ser = serde_json::to_string(&dp).unwrap(); //println!("ser is: {}", ser); assert_eq!(ser, "{\"dryRun\":[\"All\"]}"); } } /// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. #[derive(Default, Clone, Serialize, Debug)] #[serde(rename_all = "camelCase")] pub struct Preconditions { /// Specifies the target ResourceVersion #[serde(skip_serializing_if = "Option::is_none")] pub resource_version: Option<String>, /// Specifies the target UID #[serde(skip_serializing_if = "Option::is_none")] pub uid: Option<String>, } /// Propagation policy when deleting single objects #[derive(Clone, Debug, Serialize)] pub enum PropagationPolicy { /// Orphan dependents Orphan, /// Allow the garbage collector to delete the dependents in the background Background, /// A cascading policy that deletes all dependents in the foreground Foreground, }
35.146283
187
0.621316
c1980b89c0f86c236aacd208912bc846ad26ea8d
5,370
//! This module implements a [smoltcp][] device interface `phy::Device` for //! the STM32H7 series of microcontrollers. //! //! Multiple PHYs are supported: //! - SMSC LAN8742a //! - Micrel KSZ8081R //! /// Station Management Interface (SMI) on an ethernet PHY pub trait StationManagement { /// Read a register over SMI. fn smi_read(&mut self, reg: u8) -> u16; /// Write a register over SMI. fn smi_write(&mut self, reg: u8, val: u16); } /// Traits for an Ethernet PHY pub trait PHY { /// Reset PHY and wait for it to come out of reset. fn phy_reset(&mut self); /// PHY initialisation. fn phy_init(&mut self); } mod ksz8081r; mod lan8742a; /// Some common implementations of the [PHY trait](PHY) pub mod phy { pub use super::ksz8081r::*; pub use super::lan8742a::*; } mod eth; pub use eth::{enable_interrupt, interrupt_handler, new_unchecked}; pub use eth::{DesRing, EthernetDMA, EthernetMAC}; /// Marks a set of pins used to communciate to a PHY with a Reduced Media /// Independent Interface (RMII) pub trait PinsRMII {} impl<REF_CLK, MDIO, MDC, CRS_DV, RXD0, RXD1, TX_EN, TXD0, TXD1> PinsRMII for (REF_CLK, MDIO, MDC, CRS_DV, RXD0, RXD1, TX_EN, TXD0, TXD1) where REF_CLK: RefClk, MDIO: Mdio, MDC: Mdc, CRS_DV: CrsDv, RXD0: Rxd0, RXD1: Rxd1, TX_EN: TxEn, TXD0: Txd0, TXD1: Txd1, { // RMII } /// Marks a type as a REF_CLK pin pub trait RefClk {} /// Marks a type as a TX_CLK pin pub trait TxClk {} /// Marks a type as a MDIO pin pub trait Mdio {} /// Marks a type as a MDC pin pub trait Mdc {} /// Marks a type as a COL pin pub trait Col {} /// Marks a type as a CRS pin pub trait Crs {} /// Marks a type as a CRS_DV pin pub trait CrsDv {} /// Marks a type as a PPS_OUT pin pub trait PpsOut {} /// Marks a type as a RX_ER pin pub trait RxEr {} /// Marks a type as a TX_EN pin pub trait TxEn {} /// Marks a type as a RXD0 pin pub trait Rxd0 {} /// Marks a type as a RXD1 pin pub trait Rxd1 {} /// Marks a type as a RXD2 pin pub trait Rxd2 {} /// Marks a type as a RXD3 pin pub trait Rxd3 {} /// Marks a type as a TXD0 pin pub trait Txd0 {} /// Marks a type as a TXD1 pin pub trait Txd1 {} /// Marks a type as a TXD2 pin pub trait Txd2 {} /// Marks a type as a TXD3 pin pub trait Txd3 {} macro_rules! pins { (REF_CLK: [$($REF_CLK:ty),*] TX_CLK: [$($TX_CLK:ty),*] MDIO: [$($MDIO:ty),*] MDC: [$($MDC:ty),*] COL: [$($COL:ty),*] CRS: [$($CRS:ty),*] CRS_DV: [$($CRS_DV:ty),*] PPS_OUT: [$($PPS_OUT:ty),*] RX_ER: [$($RX_ER:ty),*] TX_EN: [$($TX_EN:ty),*] RXD0: [$($RXD0:ty),*] RXD1: [$($RXD1:ty),*] RXD2: [$($RXD2:ty),*] RXD3: [$($RXD3:ty),*] TXD0: [$($TXD0:ty),*] TXD1: [$($TXD1:ty),*] TXD2: [$($TXD2:ty),*] TXD3: [$($TXD3:ty),*]) => { $( impl RefClk for $REF_CLK {} )* $( impl TxClk for $TX_CLK {} )* $( impl Mdio for $MDIO {} )* $( impl Mdc for $MDC {} )* $( impl Col for $COL {} )* $( impl Crs for $CRS {} )* $( impl CrsDv for $CRS_DV {} )* $( impl PpsOut for $PPS_OUT {} )* $( impl RxEr for $RX_ER {} )* $( impl TxEn for $TX_EN {} )* $( impl Rxd0 for $RXD0 {} )* $( impl Rxd1 for $RXD1 {} )* $( impl Rxd2 for $RXD2 {} )* $( impl Rxd3 for $RXD3 {} )* $( impl Txd0 for $TXD0 {} )* $( impl Txd1 for $TXD1 {} )* $( impl Txd2 for $TXD2 {} )* $( impl Txd3 for $TXD3 {} )* } } use crate::gpio::gpioa::{PA0, PA1, PA2, PA3, PA7}; use crate::gpio::gpiob::{PB0, PB1, PB10, PB11, PB12, PB13, PB5, PB8}; use crate::gpio::gpioc::{PC1, PC2, PC3, PC4, PC5}; use crate::gpio::gpioe::PE2; use crate::gpio::gpiog::{PG11, PG12, PG13, PG14, PG8}; use crate::gpio::gpioh::{PH2, PH3, PH6, PH7}; use crate::gpio::gpioi::PI10; use crate::gpio::{Alternate, AF11}; pins! { REF_CLK: [ PA1<Alternate<AF11>> ] TX_CLK: [ PC3<Alternate<AF11>> ] MDIO: [ PA2<Alternate<AF11>> ] MDC: [ PC1<Alternate<AF11>> ] COL: [ PA3<Alternate<AF11>>, PH3<Alternate<AF11>> ] CRS: [ PA0<Alternate<AF11>>, PH2<Alternate<AF11>> ] CRS_DV: [ PA7<Alternate<AF11>> ] PPS_OUT: [ PB5<Alternate<AF11>>, PG8<Alternate<AF11>> ] RX_ER: [ PB10<Alternate<AF11>>, PI10<Alternate<AF11>> ] TX_EN: [ PB11<Alternate<AF11>>, PG11<Alternate<AF11>> ] RXD0: [ PC4<Alternate<AF11>> ] RXD1: [ PC5<Alternate<AF11>> ] RXD2: [ PB0<Alternate<AF11>>, PH6<Alternate<AF11>> ] RXD3: [ PB1<Alternate<AF11>>, PH7<Alternate<AF11>> ] TXD0: [ PB12<Alternate<AF11>>, PG13<Alternate<AF11>> ] TXD1: [ PB13<Alternate<AF11>>, PG12<Alternate<AF11>>, PG14<Alternate<AF11>> ] TXD2: [ PC2<Alternate<AF11>> ] TXD3: [ PB8<Alternate<AF11>>, PE2<Alternate<AF11>> ] }
22.851064
98
0.520298
23e0a3d5a1ac492ece01acf93e60b421c1a4003c
3,238
// ignore-tidy-linelength // ignore-test FIXME swt_ignore // ^ trace recorder function interferes with gdb output. // Require LLVM with DW_TAG_variant_part and a gdb that can read it. // min-system-llvm-version: 8.0 // min-gdb-version: 8.2 // compile-flags:-g // === GDB TESTS =================================================================================== // gdb-command:run // gdb-command:print b // gdb-check:$1 = generator_objects::main::generator-0 {__0: 0x[...], <<variant>>: {__state: 0, 0: generator_objects::main::generator-0::Unresumed, 1: generator_objects::main::generator-0::Returned, 2: generator_objects::main::generator-0::Panicked, 3: generator_objects::main::generator-0::Suspend0 {[...]}, 4: generator_objects::main::generator-0::Suspend1 {[...]}}} // gdb-command:continue // gdb-command:print b // gdb-check:$2 = generator_objects::main::generator-0 {__0: 0x[...], <<variant>>: {__state: 3, 0: generator_objects::main::generator-0::Unresumed, 1: generator_objects::main::generator-0::Returned, 2: generator_objects::main::generator-0::Panicked, 3: generator_objects::main::generator-0::Suspend0 {c: 6, d: 7}, 4: generator_objects::main::generator-0::Suspend1 {[...]}}} // gdb-command:continue // gdb-command:print b // gdb-check:$3 = generator_objects::main::generator-0 {__0: 0x[...], <<variant>>: {__state: 4, 0: generator_objects::main::generator-0::Unresumed, 1: generator_objects::main::generator-0::Returned, 2: generator_objects::main::generator-0::Panicked, 3: generator_objects::main::generator-0::Suspend0 {[...]}, 4: generator_objects::main::generator-0::Suspend1 {c: 7, d: 8}}} // gdb-command:continue // gdb-command:print b // gdb-check:$4 = generator_objects::main::generator-0 {__0: 0x[...], <<variant>>: {__state: 1, 0: generator_objects::main::generator-0::Unresumed, 1: generator_objects::main::generator-0::Returned, 2: generator_objects::main::generator-0::Panicked, 3: generator_objects::main::generator-0::Suspend0 {[...]}, 4: generator_objects::main::generator-0::Suspend1 {[...]}}} // === LLDB TESTS ================================================================================== // lldb-command:run // lldb-command:print b // lldbg-check:(generator_objects::main::generator-0) $0 = generator-0(&0x[...]) // lldb-command:continue // lldb-command:print b // lldbg-check:(generator_objects::main::generator-0) $1 = generator-0(&0x[...]) // lldb-command:continue // lldb-command:print b // lldbg-check:(generator_objects::main::generator-0) $2 = generator-0(&0x[...]) // lldb-command:continue // lldb-command:print b // lldbg-check:(generator_objects::main::generator-0) $3 = generator-0(&0x[...]) #![feature(omit_gdb_pretty_printer_section, generators, generator_trait)] #![omit_gdb_pretty_printer_section] use std::ops::Generator; use std::pin::Pin; fn main() { let mut a = 5; let mut b = || { let mut c = 6; let mut d = 7; yield; a += 1; c += 1; d += 1; yield; println!("{} {} {}", a, c, d); }; _zzz(); // #break Pin::new(&mut b).resume(()); _zzz(); // #break Pin::new(&mut b).resume(()); _zzz(); // #break Pin::new(&mut b).resume(()); _zzz(); // #break } fn _zzz() {()}
45.605634
373
0.625386
acdd0c942fcf21bf07ca938b4dc167c7aea76dde
6,498
use super::VirtualMachine; use std::num::Wrapping; impl VirtualMachine { pub(super) fn jump_to_sys_routine(&mut self, _addr: usize) { // Do nothing } pub(super) fn clear_display(&mut self) { for px in self.display.iter_mut() { *px = 0; } } pub(super) fn ret_from_subroutine(&mut self) { self.pc = self.stack[self.sp as usize]; self.sp -= 1; } pub(super) fn jump_addr(&mut self, addr: u16) { self.pc = addr; } pub(super) fn call_subroutine(&mut self, addr: u16) { self.sp += 1; self.stack[self.sp as usize] = self.pc; self.pc = addr; } pub(super) fn skip_next_vx_eq(&mut self, x: usize, to: u8) { if self.v[x].0 == to { self.pc += 2; } } pub(super) fn skip_next_vx_ne(&mut self, x: usize, to: u8) { if self.v[x].0 != to { self.pc += 2; } } pub(super) fn skip_next_vx_eq_vy(&mut self, x: usize, y: usize) { if self.v[x] == self.v[y] { self.pc += 2; } } pub(super) fn set_vx_byte(&mut self, x: usize, byte: u8) { self.v[x].0 = byte; } pub(super) fn add_vx_byte(&mut self, x: usize, byte: u8) { self.v[x] += Wrapping(byte); } pub(super) fn set_vx_to_vy(&mut self, x: usize, y: usize) { self.v[x] = self.v[y]; } pub(super) fn set_vx_to_vx_or_vy(&mut self, x: usize, y: usize) { self.v[x] |= self.v[y]; } pub(super) fn set_vx_to_vx_and_vy(&mut self, x: usize, y: usize) { self.v[x] &= self.v[y]; } pub(super) fn set_vx_to_vx_xor_vy(&mut self, x: usize, y: usize) { self.v[x] ^= self.v[y]; } pub(super) fn add_vx_vy(&mut self, x: usize, y: usize) { self.v[0xF].0 = if u16::from(self.v[x].0) + u16::from(self.v[y].0) > 255 { 1 } else { 0 }; self.v[x] += self.v[y]; } pub(super) fn sub_vx_vy(&mut self, x: usize, y: usize) { self.v[0xF].0 = if self.v[x] > self.v[y] { 1 } else { 0 }; self.v[x] -= self.v[y]; } pub(super) fn subn_vx_vy(&mut self, x: usize, y: usize) { self.v[0xF].0 = if self.v[y] > self.v[x] { 1 } else { 0 }; self.v[x] = self.v[y] - self.v[x]; } pub(super) fn set_vx_to_vy_shr_1(&mut self, x: usize, y: usize) { self.v[0xF].0 = nth_bit(self.v[y].0, 7); self.v[x] = self.v[y] >> 1; } pub(super) fn set_vx_to_vy_shl_1(&mut self, x: usize, y: usize) { self.v[0xF].0 = nth_bit(self.v[y].0, 0); self.v[x] = self.v[y] << 1; } pub(super) fn skip_next_vx_ne_vy(&mut self, x: usize, y: usize) { if self.v[x] != self.v[y] { self.pc += 2; } } pub(super) fn set_i(&mut self, to: u16) { self.i = to; } pub(super) fn set_vx_rand_and(&mut self, x: usize, to: u8) { use rand::Rng; let mut rgen = rand::thread_rng(); self.v[x].0 = rgen.gen::<u8>() & to; } pub(super) fn display_sprite(&mut self, vx: usize, vy: usize, n: usize) { use super::{DISPLAY_HEIGHT, DISPLAY_WIDTH}; self.v[0xF].0 = 0; for y in 0..n { let b = self.ram[self.i as usize + y]; for x in 0..8 { let xx = x + self.v[vx].0 as usize; let yy = y + self.v[vy].0 as usize; if xx < DISPLAY_WIDTH && yy < DISPLAY_HEIGHT { let idx = yy * DISPLAY_WIDTH + xx; if b & (0b1000_0000 >> x) != 0 { if self.display[idx] == 1 { self.v[0xF].0 = 1; } self.display[idx] ^= 1; } } } } self.display_updated = true; } pub(super) fn skip_next_key_vx_not_pressed(&mut self, x: usize) { if !self.keys[self.v[x].0 as usize] { self.pc += 2; } } pub(super) fn skip_next_key_vx_pressed(&mut self, x: usize) { if self.keys[self.v[x].0 as usize] { self.pc += 2; } } pub(super) fn set_vx_to_delay_timer(&mut self, x: usize) { self.v[x].0 = self.delay_timer; } pub(super) fn wait_for_keypress_store_in_vx(&mut self, x: usize) { self.keypress_wait.wait = true; self.keypress_wait.vx = x; } pub(super) fn set_delay_timer(&mut self, x: usize) { self.delay_timer = self.v[x].0; } pub(super) fn set_sound_timer(&mut self, x: usize) { self.sound_timer = self.v[x].0; } pub(super) fn add_vx_to_i(&mut self, x: usize) { self.i += u16::from(self.v[x].0); } pub(super) fn set_i_to_loc_of_digit_vx(&mut self, x: usize) { self.i = u16::from((self.v[x] * Wrapping(5)).0); } pub(super) fn store_bcd_of_vx_to_i(&mut self, x: usize) { let num = self.v[x].0; // TODO: Should probably be wrapping let h = num / 100; let t = (num - h * 100) / 10; let o = num - h * 100 - t * 10; self.ram[self.i as usize] = h; self.ram[self.i as usize + 1] = t; self.ram[self.i as usize + 2] = o; } pub(super) fn copy_v0_through_vx_to_mem(&mut self, x: u16) { for pos in 0..=x { self.ram[(self.i + pos) as usize] = self.v[pos as usize].0; } self.i += x + 1; } pub(super) fn read_v0_through_vx_from_mem(&mut self, x: u16) { for pos in 0..=x { self.v[pos as usize].0 = self.ram[(self.i + pos) as usize]; } self.i += x + 1; } } fn nth_bit(byte: u8, pos: usize) -> u8 { use bit_utils::BitInformation; if byte.has_x_bit(7 - pos) { 1 } else { 0 } } #[test] fn test_nth_bit() { assert_eq!(nth_bit(0b10000000, 0), 1); for i in 1..8 { assert_eq!(nth_bit(0b10000000, i), 0); } assert_eq!(nth_bit(0b01000000, 1), 1); assert_eq!(nth_bit(0b00100000, 2), 1); assert_eq!(nth_bit(0b00010000, 3), 1); assert_eq!(nth_bit(0b00001000, 4), 1); assert_eq!(nth_bit(0b00000100, 5), 1); assert_eq!(nth_bit(0b00000010, 6), 1); assert_eq!(nth_bit(0b00000001, 7), 1); } #[test] fn test_strore_bcd_of_vx_to_i() { let mut vm = VirtualMachine::new(); vm.v[0].0 = 146; vm.i = 0; vm.store_bcd_of_vx_to_i(0); assert!(vm.ram[0] == 1); assert!(vm.ram[1] == 4); assert!(vm.ram[2] == 6); }
27.302521
82
0.505232
210e764d078df2b57821fd6e84c88a858dc88b50
48,691
#[doc = "Register `CC12` reader"] pub struct R(crate::R<CC12_SPEC>); impl core::ops::Deref for R { type Target = crate::R<CC12_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::convert::From<crate::R<CC12_SPEC>> for R { fn from(reader: crate::R<CC12_SPEC>) -> Self { R(reader) } } #[doc = "Register `CC12` writer"] pub struct W(crate::W<CC12_SPEC>); impl core::ops::Deref for W { type Target = crate::W<CC12_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl core::convert::From<crate::W<CC12_SPEC>> for W { fn from(writer: crate::W<CC12_SPEC>) -> Self { W(writer) } } #[doc = "Channel x Transfer Type\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TYPE_A { #[doc = "0: Self triggered mode (Memory to Memory Transfer)."] MEM_TRAN = 0, #[doc = "1: Synchronized mode (Peripheral to Memory or Memory to Peripheral Transfer)."] PER_TRAN = 1, } impl From<TYPE_A> for bool { #[inline(always)] fn from(variant: TYPE_A) -> Self { variant as u8 != 0 } } #[doc = "Field `TYPE` reader - Channel x Transfer Type"] pub struct TYPE_R(crate::FieldReader<bool, TYPE_A>); impl TYPE_R { pub(crate) fn new(bits: bool) -> Self { TYPE_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> TYPE_A { match self.bits { false => TYPE_A::MEM_TRAN, true => TYPE_A::PER_TRAN, } } #[doc = "Checks if the value of the field is `MEM_TRAN`"] #[inline(always)] pub fn is_mem_tran(&self) -> bool { **self == TYPE_A::MEM_TRAN } #[doc = "Checks if the value of the field is `PER_TRAN`"] #[inline(always)] pub fn is_per_tran(&self) -> bool { **self == TYPE_A::PER_TRAN } } impl core::ops::Deref for TYPE_R { type Target = crate::FieldReader<bool, TYPE_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `TYPE` writer - Channel x Transfer Type"] pub struct TYPE_W<'a> { w: &'a mut W, } impl<'a> TYPE_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: TYPE_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Self triggered mode (Memory to Memory Transfer)."] #[inline(always)] pub fn mem_tran(self) -> &'a mut W { self.variant(TYPE_A::MEM_TRAN) } #[doc = "Synchronized mode (Peripheral to Memory or Memory to Peripheral Transfer)."] #[inline(always)] pub fn per_tran(self) -> &'a mut W { self.variant(TYPE_A::PER_TRAN) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01); self.w } } #[doc = "Channel x Memory Burst Size\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum MBSIZE_A { #[doc = "0: The memory burst size is set to one."] SINGLE = 0, #[doc = "1: The memory burst size is set to four."] FOUR = 1, #[doc = "2: The memory burst size is set to eight."] EIGHT = 2, #[doc = "3: The memory burst size is set to sixteen."] SIXTEEN = 3, } impl From<MBSIZE_A> for u8 { #[inline(always)] fn from(variant: MBSIZE_A) -> Self { variant as _ } } #[doc = "Field `MBSIZE` reader - Channel x Memory Burst Size"] pub struct MBSIZE_R(crate::FieldReader<u8, MBSIZE_A>); impl MBSIZE_R { pub(crate) fn new(bits: u8) -> Self { MBSIZE_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> MBSIZE_A { match self.bits { 0 => MBSIZE_A::SINGLE, 1 => MBSIZE_A::FOUR, 2 => MBSIZE_A::EIGHT, 3 => MBSIZE_A::SIXTEEN, _ => unreachable!(), } } #[doc = "Checks if the value of the field is `SINGLE`"] #[inline(always)] pub fn is_single(&self) -> bool { **self == MBSIZE_A::SINGLE } #[doc = "Checks if the value of the field is `FOUR`"] #[inline(always)] pub fn is_four(&self) -> bool { **self == MBSIZE_A::FOUR } #[doc = "Checks if the value of the field is `EIGHT`"] #[inline(always)] pub fn is_eight(&self) -> bool { **self == MBSIZE_A::EIGHT } #[doc = "Checks if the value of the field is `SIXTEEN`"] #[inline(always)] pub fn is_sixteen(&self) -> bool { **self == MBSIZE_A::SIXTEEN } } impl core::ops::Deref for MBSIZE_R { type Target = crate::FieldReader<u8, MBSIZE_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `MBSIZE` writer - Channel x Memory Burst Size"] pub struct MBSIZE_W<'a> { w: &'a mut W, } impl<'a> MBSIZE_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: MBSIZE_A) -> &'a mut W { self.bits(variant.into()) } #[doc = "The memory burst size is set to one."] #[inline(always)] pub fn single(self) -> &'a mut W { self.variant(MBSIZE_A::SINGLE) } #[doc = "The memory burst size is set to four."] #[inline(always)] pub fn four(self) -> &'a mut W { self.variant(MBSIZE_A::FOUR) } #[doc = "The memory burst size is set to eight."] #[inline(always)] pub fn eight(self) -> &'a mut W { self.variant(MBSIZE_A::EIGHT) } #[doc = "The memory burst size is set to sixteen."] #[inline(always)] pub fn sixteen(self) -> &'a mut W { self.variant(MBSIZE_A::SIXTEEN) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 1)) | ((value as u32 & 0x03) << 1); self.w } } #[doc = "Channel x Synchronization\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DSYNC_A { #[doc = "0: Peripheral to Memory transfer"] PER2MEM = 0, #[doc = "1: Memory to Peripheral transfer"] MEM2PER = 1, } impl From<DSYNC_A> for bool { #[inline(always)] fn from(variant: DSYNC_A) -> Self { variant as u8 != 0 } } #[doc = "Field `DSYNC` reader - Channel x Synchronization"] pub struct DSYNC_R(crate::FieldReader<bool, DSYNC_A>); impl DSYNC_R { pub(crate) fn new(bits: bool) -> Self { DSYNC_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DSYNC_A { match self.bits { false => DSYNC_A::PER2MEM, true => DSYNC_A::MEM2PER, } } #[doc = "Checks if the value of the field is `PER2MEM`"] #[inline(always)] pub fn is_per2mem(&self) -> bool { **self == DSYNC_A::PER2MEM } #[doc = "Checks if the value of the field is `MEM2PER`"] #[inline(always)] pub fn is_mem2per(&self) -> bool { **self == DSYNC_A::MEM2PER } } impl core::ops::Deref for DSYNC_R { type Target = crate::FieldReader<bool, DSYNC_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `DSYNC` writer - Channel x Synchronization"] pub struct DSYNC_W<'a> { w: &'a mut W, } impl<'a> DSYNC_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DSYNC_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Peripheral to Memory transfer"] #[inline(always)] pub fn per2mem(self) -> &'a mut W { self.variant(DSYNC_A::PER2MEM) } #[doc = "Memory to Peripheral transfer"] #[inline(always)] pub fn mem2per(self) -> &'a mut W { self.variant(DSYNC_A::MEM2PER) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 4)) | ((value as u32 & 0x01) << 4); self.w } } #[doc = "Channel x Protection\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PROT_A { #[doc = "0: Channel is secured"] SEC = 0, #[doc = "1: Channel is unsecured"] UNSEC = 1, } impl From<PROT_A> for bool { #[inline(always)] fn from(variant: PROT_A) -> Self { variant as u8 != 0 } } #[doc = "Field `PROT` reader - Channel x Protection"] pub struct PROT_R(crate::FieldReader<bool, PROT_A>); impl PROT_R { pub(crate) fn new(bits: bool) -> Self { PROT_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> PROT_A { match self.bits { false => PROT_A::SEC, true => PROT_A::UNSEC, } } #[doc = "Checks if the value of the field is `SEC`"] #[inline(always)] pub fn is_sec(&self) -> bool { **self == PROT_A::SEC } #[doc = "Checks if the value of the field is `UNSEC`"] #[inline(always)] pub fn is_unsec(&self) -> bool { **self == PROT_A::UNSEC } } impl core::ops::Deref for PROT_R { type Target = crate::FieldReader<bool, PROT_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PROT` writer - Channel x Protection"] pub struct PROT_W<'a> { w: &'a mut W, } impl<'a> PROT_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: PROT_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Channel is secured"] #[inline(always)] pub fn sec(self) -> &'a mut W { self.variant(PROT_A::SEC) } #[doc = "Channel is unsecured"] #[inline(always)] pub fn unsec(self) -> &'a mut W { self.variant(PROT_A::UNSEC) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 5)) | ((value as u32 & 0x01) << 5); self.w } } #[doc = "Channel x Software Request Trigger\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SWREQ_A { #[doc = "0: Hardware request line is connected to the peripheral request line."] HWR_CONNECTED = 0, #[doc = "1: Software request is connected to the peripheral request line."] SWR_CONNECTED = 1, } impl From<SWREQ_A> for bool { #[inline(always)] fn from(variant: SWREQ_A) -> Self { variant as u8 != 0 } } #[doc = "Field `SWREQ` reader - Channel x Software Request Trigger"] pub struct SWREQ_R(crate::FieldReader<bool, SWREQ_A>); impl SWREQ_R { pub(crate) fn new(bits: bool) -> Self { SWREQ_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> SWREQ_A { match self.bits { false => SWREQ_A::HWR_CONNECTED, true => SWREQ_A::SWR_CONNECTED, } } #[doc = "Checks if the value of the field is `HWR_CONNECTED`"] #[inline(always)] pub fn is_hwr_connected(&self) -> bool { **self == SWREQ_A::HWR_CONNECTED } #[doc = "Checks if the value of the field is `SWR_CONNECTED`"] #[inline(always)] pub fn is_swr_connected(&self) -> bool { **self == SWREQ_A::SWR_CONNECTED } } impl core::ops::Deref for SWREQ_R { type Target = crate::FieldReader<bool, SWREQ_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `SWREQ` writer - Channel x Software Request Trigger"] pub struct SWREQ_W<'a> { w: &'a mut W, } impl<'a> SWREQ_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: SWREQ_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Hardware request line is connected to the peripheral request line."] #[inline(always)] pub fn hwr_connected(self) -> &'a mut W { self.variant(SWREQ_A::HWR_CONNECTED) } #[doc = "Software request is connected to the peripheral request line."] #[inline(always)] pub fn swr_connected(self) -> &'a mut W { self.variant(SWREQ_A::SWR_CONNECTED) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 6)) | ((value as u32 & 0x01) << 6); self.w } } #[doc = "Channel x Fill Block of memory\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum MEMSET_A { #[doc = "0: Memset is not activated"] NORMAL_MODE = 0, #[doc = "1: Sets the block of memory pointed by DA field to the specified value. This operation is performed on 8, 16 or 32 bits basis."] HW_MODE = 1, } impl From<MEMSET_A> for bool { #[inline(always)] fn from(variant: MEMSET_A) -> Self { variant as u8 != 0 } } #[doc = "Field `MEMSET` reader - Channel x Fill Block of memory"] pub struct MEMSET_R(crate::FieldReader<bool, MEMSET_A>); impl MEMSET_R { pub(crate) fn new(bits: bool) -> Self { MEMSET_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> MEMSET_A { match self.bits { false => MEMSET_A::NORMAL_MODE, true => MEMSET_A::HW_MODE, } } #[doc = "Checks if the value of the field is `NORMAL_MODE`"] #[inline(always)] pub fn is_normal_mode(&self) -> bool { **self == MEMSET_A::NORMAL_MODE } #[doc = "Checks if the value of the field is `HW_MODE`"] #[inline(always)] pub fn is_hw_mode(&self) -> bool { **self == MEMSET_A::HW_MODE } } impl core::ops::Deref for MEMSET_R { type Target = crate::FieldReader<bool, MEMSET_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `MEMSET` writer - Channel x Fill Block of memory"] pub struct MEMSET_W<'a> { w: &'a mut W, } impl<'a> MEMSET_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: MEMSET_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Memset is not activated"] #[inline(always)] pub fn normal_mode(self) -> &'a mut W { self.variant(MEMSET_A::NORMAL_MODE) } #[doc = "Sets the block of memory pointed by DA field to the specified value. This operation is performed on 8, 16 or 32 bits basis."] #[inline(always)] pub fn hw_mode(self) -> &'a mut W { self.variant(MEMSET_A::HW_MODE) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 7)) | ((value as u32 & 0x01) << 7); self.w } } #[doc = "Channel x Chunk Size\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum CSIZE_A { #[doc = "0: 1 data transferred"] CHK_1 = 0, #[doc = "1: 2 data transferred"] CHK_2 = 1, #[doc = "2: 4 data transferred"] CHK_4 = 2, #[doc = "3: 8 data transferred"] CHK_8 = 3, #[doc = "4: 16 data transferred"] CHK_16 = 4, } impl From<CSIZE_A> for u8 { #[inline(always)] fn from(variant: CSIZE_A) -> Self { variant as _ } } #[doc = "Field `CSIZE` reader - Channel x Chunk Size"] pub struct CSIZE_R(crate::FieldReader<u8, CSIZE_A>); impl CSIZE_R { pub(crate) fn new(bits: u8) -> Self { CSIZE_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> Option<CSIZE_A> { match self.bits { 0 => Some(CSIZE_A::CHK_1), 1 => Some(CSIZE_A::CHK_2), 2 => Some(CSIZE_A::CHK_4), 3 => Some(CSIZE_A::CHK_8), 4 => Some(CSIZE_A::CHK_16), _ => None, } } #[doc = "Checks if the value of the field is `CHK_1`"] #[inline(always)] pub fn is_chk_1(&self) -> bool { **self == CSIZE_A::CHK_1 } #[doc = "Checks if the value of the field is `CHK_2`"] #[inline(always)] pub fn is_chk_2(&self) -> bool { **self == CSIZE_A::CHK_2 } #[doc = "Checks if the value of the field is `CHK_4`"] #[inline(always)] pub fn is_chk_4(&self) -> bool { **self == CSIZE_A::CHK_4 } #[doc = "Checks if the value of the field is `CHK_8`"] #[inline(always)] pub fn is_chk_8(&self) -> bool { **self == CSIZE_A::CHK_8 } #[doc = "Checks if the value of the field is `CHK_16`"] #[inline(always)] pub fn is_chk_16(&self) -> bool { **self == CSIZE_A::CHK_16 } } impl core::ops::Deref for CSIZE_R { type Target = crate::FieldReader<u8, CSIZE_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `CSIZE` writer - Channel x Chunk Size"] pub struct CSIZE_W<'a> { w: &'a mut W, } impl<'a> CSIZE_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: CSIZE_A) -> &'a mut W { unsafe { self.bits(variant.into()) } } #[doc = "1 data transferred"] #[inline(always)] pub fn chk_1(self) -> &'a mut W { self.variant(CSIZE_A::CHK_1) } #[doc = "2 data transferred"] #[inline(always)] pub fn chk_2(self) -> &'a mut W { self.variant(CSIZE_A::CHK_2) } #[doc = "4 data transferred"] #[inline(always)] pub fn chk_4(self) -> &'a mut W { self.variant(CSIZE_A::CHK_4) } #[doc = "8 data transferred"] #[inline(always)] pub fn chk_8(self) -> &'a mut W { self.variant(CSIZE_A::CHK_8) } #[doc = "16 data transferred"] #[inline(always)] pub fn chk_16(self) -> &'a mut W { self.variant(CSIZE_A::CHK_16) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x07 << 8)) | ((value as u32 & 0x07) << 8); self.w } } #[doc = "Channel x Data Width\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum DWIDTH_A { #[doc = "0: The data size is set to 8 bits"] BYTE = 0, #[doc = "1: The data size is set to 16 bits"] HALFWORD = 1, #[doc = "2: The data size is set to 32 bits"] WORD = 2, } impl From<DWIDTH_A> for u8 { #[inline(always)] fn from(variant: DWIDTH_A) -> Self { variant as _ } } #[doc = "Field `DWIDTH` reader - Channel x Data Width"] pub struct DWIDTH_R(crate::FieldReader<u8, DWIDTH_A>); impl DWIDTH_R { pub(crate) fn new(bits: u8) -> Self { DWIDTH_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> Option<DWIDTH_A> { match self.bits { 0 => Some(DWIDTH_A::BYTE), 1 => Some(DWIDTH_A::HALFWORD), 2 => Some(DWIDTH_A::WORD), _ => None, } } #[doc = "Checks if the value of the field is `BYTE`"] #[inline(always)] pub fn is_byte(&self) -> bool { **self == DWIDTH_A::BYTE } #[doc = "Checks if the value of the field is `HALFWORD`"] #[inline(always)] pub fn is_halfword(&self) -> bool { **self == DWIDTH_A::HALFWORD } #[doc = "Checks if the value of the field is `WORD`"] #[inline(always)] pub fn is_word(&self) -> bool { **self == DWIDTH_A::WORD } } impl core::ops::Deref for DWIDTH_R { type Target = crate::FieldReader<u8, DWIDTH_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `DWIDTH` writer - Channel x Data Width"] pub struct DWIDTH_W<'a> { w: &'a mut W, } impl<'a> DWIDTH_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DWIDTH_A) -> &'a mut W { unsafe { self.bits(variant.into()) } } #[doc = "The data size is set to 8 bits"] #[inline(always)] pub fn byte(self) -> &'a mut W { self.variant(DWIDTH_A::BYTE) } #[doc = "The data size is set to 16 bits"] #[inline(always)] pub fn halfword(self) -> &'a mut W { self.variant(DWIDTH_A::HALFWORD) } #[doc = "The data size is set to 32 bits"] #[inline(always)] pub fn word(self) -> &'a mut W { self.variant(DWIDTH_A::WORD) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 11)) | ((value as u32 & 0x03) << 11); self.w } } #[doc = "Channel x Source Interface Identifier\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SIF_A { #[doc = "0: The data is read through the system bus interface 0"] AHB_IF0 = 0, #[doc = "1: The data is read through the system bus interface 1"] AHB_IF1 = 1, } impl From<SIF_A> for bool { #[inline(always)] fn from(variant: SIF_A) -> Self { variant as u8 != 0 } } #[doc = "Field `SIF` reader - Channel x Source Interface Identifier"] pub struct SIF_R(crate::FieldReader<bool, SIF_A>); impl SIF_R { pub(crate) fn new(bits: bool) -> Self { SIF_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> SIF_A { match self.bits { false => SIF_A::AHB_IF0, true => SIF_A::AHB_IF1, } } #[doc = "Checks if the value of the field is `AHB_IF0`"] #[inline(always)] pub fn is_ahb_if0(&self) -> bool { **self == SIF_A::AHB_IF0 } #[doc = "Checks if the value of the field is `AHB_IF1`"] #[inline(always)] pub fn is_ahb_if1(&self) -> bool { **self == SIF_A::AHB_IF1 } } impl core::ops::Deref for SIF_R { type Target = crate::FieldReader<bool, SIF_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `SIF` writer - Channel x Source Interface Identifier"] pub struct SIF_W<'a> { w: &'a mut W, } impl<'a> SIF_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: SIF_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "The data is read through the system bus interface 0"] #[inline(always)] pub fn ahb_if0(self) -> &'a mut W { self.variant(SIF_A::AHB_IF0) } #[doc = "The data is read through the system bus interface 1"] #[inline(always)] pub fn ahb_if1(self) -> &'a mut W { self.variant(SIF_A::AHB_IF1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 13)) | ((value as u32 & 0x01) << 13); self.w } } #[doc = "Channel x Destination Interface Identifier\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DIF_A { #[doc = "0: The data is written through the system bus interface 0"] AHB_IF0 = 0, #[doc = "1: The data is written though the system bus interface 1"] AHB_IF1 = 1, } impl From<DIF_A> for bool { #[inline(always)] fn from(variant: DIF_A) -> Self { variant as u8 != 0 } } #[doc = "Field `DIF` reader - Channel x Destination Interface Identifier"] pub struct DIF_R(crate::FieldReader<bool, DIF_A>); impl DIF_R { pub(crate) fn new(bits: bool) -> Self { DIF_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DIF_A { match self.bits { false => DIF_A::AHB_IF0, true => DIF_A::AHB_IF1, } } #[doc = "Checks if the value of the field is `AHB_IF0`"] #[inline(always)] pub fn is_ahb_if0(&self) -> bool { **self == DIF_A::AHB_IF0 } #[doc = "Checks if the value of the field is `AHB_IF1`"] #[inline(always)] pub fn is_ahb_if1(&self) -> bool { **self == DIF_A::AHB_IF1 } } impl core::ops::Deref for DIF_R { type Target = crate::FieldReader<bool, DIF_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `DIF` writer - Channel x Destination Interface Identifier"] pub struct DIF_W<'a> { w: &'a mut W, } impl<'a> DIF_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DIF_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "The data is written through the system bus interface 0"] #[inline(always)] pub fn ahb_if0(self) -> &'a mut W { self.variant(DIF_A::AHB_IF0) } #[doc = "The data is written though the system bus interface 1"] #[inline(always)] pub fn ahb_if1(self) -> &'a mut W { self.variant(DIF_A::AHB_IF1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 14)) | ((value as u32 & 0x01) << 14); self.w } } #[doc = "Channel x Source Addressing Mode\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum SAM_A { #[doc = "0: The address remains unchanged."] FIXED_AM = 0, #[doc = "1: The addressing mode is incremented (the increment size is set to the data size)."] INCREMENTED_AM = 1, #[doc = "2: The microblock stride is added at the microblock boundary."] UBS_AM = 2, #[doc = "3: The microblock stride is added at the microblock boundary, the data stride is added at the data boundary."] UBS_DS_AM = 3, } impl From<SAM_A> for u8 { #[inline(always)] fn from(variant: SAM_A) -> Self { variant as _ } } #[doc = "Field `SAM` reader - Channel x Source Addressing Mode"] pub struct SAM_R(crate::FieldReader<u8, SAM_A>); impl SAM_R { pub(crate) fn new(bits: u8) -> Self { SAM_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> SAM_A { match self.bits { 0 => SAM_A::FIXED_AM, 1 => SAM_A::INCREMENTED_AM, 2 => SAM_A::UBS_AM, 3 => SAM_A::UBS_DS_AM, _ => unreachable!(), } } #[doc = "Checks if the value of the field is `FIXED_AM`"] #[inline(always)] pub fn is_fixed_am(&self) -> bool { **self == SAM_A::FIXED_AM } #[doc = "Checks if the value of the field is `INCREMENTED_AM`"] #[inline(always)] pub fn is_incremented_am(&self) -> bool { **self == SAM_A::INCREMENTED_AM } #[doc = "Checks if the value of the field is `UBS_AM`"] #[inline(always)] pub fn is_ubs_am(&self) -> bool { **self == SAM_A::UBS_AM } #[doc = "Checks if the value of the field is `UBS_DS_AM`"] #[inline(always)] pub fn is_ubs_ds_am(&self) -> bool { **self == SAM_A::UBS_DS_AM } } impl core::ops::Deref for SAM_R { type Target = crate::FieldReader<u8, SAM_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `SAM` writer - Channel x Source Addressing Mode"] pub struct SAM_W<'a> { w: &'a mut W, } impl<'a> SAM_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: SAM_A) -> &'a mut W { self.bits(variant.into()) } #[doc = "The address remains unchanged."] #[inline(always)] pub fn fixed_am(self) -> &'a mut W { self.variant(SAM_A::FIXED_AM) } #[doc = "The addressing mode is incremented (the increment size is set to the data size)."] #[inline(always)] pub fn incremented_am(self) -> &'a mut W { self.variant(SAM_A::INCREMENTED_AM) } #[doc = "The microblock stride is added at the microblock boundary."] #[inline(always)] pub fn ubs_am(self) -> &'a mut W { self.variant(SAM_A::UBS_AM) } #[doc = "The microblock stride is added at the microblock boundary, the data stride is added at the data boundary."] #[inline(always)] pub fn ubs_ds_am(self) -> &'a mut W { self.variant(SAM_A::UBS_DS_AM) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 16)) | ((value as u32 & 0x03) << 16); self.w } } #[doc = "Channel x Destination Addressing Mode\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum DAM_A { #[doc = "0: The address remains unchanged."] FIXED_AM = 0, #[doc = "1: The addressing mode is incremented (the increment size is set to the data size)."] INCREMENTED_AM = 1, #[doc = "2: The microblock stride is added at the microblock boundary."] UBS_AM = 2, #[doc = "3: The microblock stride is added at the microblock boundary, the data stride is added at the data boundary."] UBS_DS_AM = 3, } impl From<DAM_A> for u8 { #[inline(always)] fn from(variant: DAM_A) -> Self { variant as _ } } #[doc = "Field `DAM` reader - Channel x Destination Addressing Mode"] pub struct DAM_R(crate::FieldReader<u8, DAM_A>); impl DAM_R { pub(crate) fn new(bits: u8) -> Self { DAM_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DAM_A { match self.bits { 0 => DAM_A::FIXED_AM, 1 => DAM_A::INCREMENTED_AM, 2 => DAM_A::UBS_AM, 3 => DAM_A::UBS_DS_AM, _ => unreachable!(), } } #[doc = "Checks if the value of the field is `FIXED_AM`"] #[inline(always)] pub fn is_fixed_am(&self) -> bool { **self == DAM_A::FIXED_AM } #[doc = "Checks if the value of the field is `INCREMENTED_AM`"] #[inline(always)] pub fn is_incremented_am(&self) -> bool { **self == DAM_A::INCREMENTED_AM } #[doc = "Checks if the value of the field is `UBS_AM`"] #[inline(always)] pub fn is_ubs_am(&self) -> bool { **self == DAM_A::UBS_AM } #[doc = "Checks if the value of the field is `UBS_DS_AM`"] #[inline(always)] pub fn is_ubs_ds_am(&self) -> bool { **self == DAM_A::UBS_DS_AM } } impl core::ops::Deref for DAM_R { type Target = crate::FieldReader<u8, DAM_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `DAM` writer - Channel x Destination Addressing Mode"] pub struct DAM_W<'a> { w: &'a mut W, } impl<'a> DAM_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DAM_A) -> &'a mut W { self.bits(variant.into()) } #[doc = "The address remains unchanged."] #[inline(always)] pub fn fixed_am(self) -> &'a mut W { self.variant(DAM_A::FIXED_AM) } #[doc = "The addressing mode is incremented (the increment size is set to the data size)."] #[inline(always)] pub fn incremented_am(self) -> &'a mut W { self.variant(DAM_A::INCREMENTED_AM) } #[doc = "The microblock stride is added at the microblock boundary."] #[inline(always)] pub fn ubs_am(self) -> &'a mut W { self.variant(DAM_A::UBS_AM) } #[doc = "The microblock stride is added at the microblock boundary, the data stride is added at the data boundary."] #[inline(always)] pub fn ubs_ds_am(self) -> &'a mut W { self.variant(DAM_A::UBS_DS_AM) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 18)) | ((value as u32 & 0x03) << 18); self.w } } #[doc = "Channel Initialization Terminated (this bit is read-only)\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum INITD_A { #[doc = "0: Channel initialization is in progress."] TERMINATED = 0, #[doc = "1: Channel initialization is completed."] IN_PROGRESS = 1, } impl From<INITD_A> for bool { #[inline(always)] fn from(variant: INITD_A) -> Self { variant as u8 != 0 } } #[doc = "Field `INITD` reader - Channel Initialization Terminated (this bit is read-only)"] pub struct INITD_R(crate::FieldReader<bool, INITD_A>); impl INITD_R { pub(crate) fn new(bits: bool) -> Self { INITD_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> INITD_A { match self.bits { false => INITD_A::TERMINATED, true => INITD_A::IN_PROGRESS, } } #[doc = "Checks if the value of the field is `TERMINATED`"] #[inline(always)] pub fn is_terminated(&self) -> bool { **self == INITD_A::TERMINATED } #[doc = "Checks if the value of the field is `IN_PROGRESS`"] #[inline(always)] pub fn is_in_progress(&self) -> bool { **self == INITD_A::IN_PROGRESS } } impl core::ops::Deref for INITD_R { type Target = crate::FieldReader<bool, INITD_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `INITD` writer - Channel Initialization Terminated (this bit is read-only)"] pub struct INITD_W<'a> { w: &'a mut W, } impl<'a> INITD_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: INITD_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Channel initialization is in progress."] #[inline(always)] pub fn terminated(self) -> &'a mut W { self.variant(INITD_A::TERMINATED) } #[doc = "Channel initialization is completed."] #[inline(always)] pub fn in_progress(self) -> &'a mut W { self.variant(INITD_A::IN_PROGRESS) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 21)) | ((value as u32 & 0x01) << 21); self.w } } #[doc = "Read in Progress (this bit is read-only)\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RDIP_A { #[doc = "0: No Active read transaction on the bus."] DONE = 0, #[doc = "1: A read transaction is in progress."] IN_PROGRESS = 1, } impl From<RDIP_A> for bool { #[inline(always)] fn from(variant: RDIP_A) -> Self { variant as u8 != 0 } } #[doc = "Field `RDIP` reader - Read in Progress (this bit is read-only)"] pub struct RDIP_R(crate::FieldReader<bool, RDIP_A>); impl RDIP_R { pub(crate) fn new(bits: bool) -> Self { RDIP_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RDIP_A { match self.bits { false => RDIP_A::DONE, true => RDIP_A::IN_PROGRESS, } } #[doc = "Checks if the value of the field is `DONE`"] #[inline(always)] pub fn is_done(&self) -> bool { **self == RDIP_A::DONE } #[doc = "Checks if the value of the field is `IN_PROGRESS`"] #[inline(always)] pub fn is_in_progress(&self) -> bool { **self == RDIP_A::IN_PROGRESS } } impl core::ops::Deref for RDIP_R { type Target = crate::FieldReader<bool, RDIP_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `RDIP` writer - Read in Progress (this bit is read-only)"] pub struct RDIP_W<'a> { w: &'a mut W, } impl<'a> RDIP_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: RDIP_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "No Active read transaction on the bus."] #[inline(always)] pub fn done(self) -> &'a mut W { self.variant(RDIP_A::DONE) } #[doc = "A read transaction is in progress."] #[inline(always)] pub fn in_progress(self) -> &'a mut W { self.variant(RDIP_A::IN_PROGRESS) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 22)) | ((value as u32 & 0x01) << 22); self.w } } #[doc = "Write in Progress (this bit is read-only)\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum WRIP_A { #[doc = "0: No Active write transaction on the bus."] DONE = 0, #[doc = "1: A Write transaction is in progress."] IN_PROGRESS = 1, } impl From<WRIP_A> for bool { #[inline(always)] fn from(variant: WRIP_A) -> Self { variant as u8 != 0 } } #[doc = "Field `WRIP` reader - Write in Progress (this bit is read-only)"] pub struct WRIP_R(crate::FieldReader<bool, WRIP_A>); impl WRIP_R { pub(crate) fn new(bits: bool) -> Self { WRIP_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> WRIP_A { match self.bits { false => WRIP_A::DONE, true => WRIP_A::IN_PROGRESS, } } #[doc = "Checks if the value of the field is `DONE`"] #[inline(always)] pub fn is_done(&self) -> bool { **self == WRIP_A::DONE } #[doc = "Checks if the value of the field is `IN_PROGRESS`"] #[inline(always)] pub fn is_in_progress(&self) -> bool { **self == WRIP_A::IN_PROGRESS } } impl core::ops::Deref for WRIP_R { type Target = crate::FieldReader<bool, WRIP_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `WRIP` writer - Write in Progress (this bit is read-only)"] pub struct WRIP_W<'a> { w: &'a mut W, } impl<'a> WRIP_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: WRIP_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "No Active write transaction on the bus."] #[inline(always)] pub fn done(self) -> &'a mut W { self.variant(WRIP_A::DONE) } #[doc = "A Write transaction is in progress."] #[inline(always)] pub fn in_progress(self) -> &'a mut W { self.variant(WRIP_A::IN_PROGRESS) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 23)) | ((value as u32 & 0x01) << 23); self.w } } #[doc = "Field `PERID` reader - Channel x Peripheral Identifier"] pub struct PERID_R(crate::FieldReader<u8, u8>); impl PERID_R { pub(crate) fn new(bits: u8) -> Self { PERID_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PERID_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PERID` writer - Channel x Peripheral Identifier"] pub struct PERID_W<'a> { w: &'a mut W, } impl<'a> PERID_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x7f << 24)) | ((value as u32 & 0x7f) << 24); self.w } } impl R { #[doc = "Bit 0 - Channel x Transfer Type"] #[inline(always)] pub fn type_(&self) -> TYPE_R { TYPE_R::new((self.bits & 0x01) != 0) } #[doc = "Bits 1:2 - Channel x Memory Burst Size"] #[inline(always)] pub fn mbsize(&self) -> MBSIZE_R { MBSIZE_R::new(((self.bits >> 1) & 0x03) as u8) } #[doc = "Bit 4 - Channel x Synchronization"] #[inline(always)] pub fn dsync(&self) -> DSYNC_R { DSYNC_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bit 5 - Channel x Protection"] #[inline(always)] pub fn prot(&self) -> PROT_R { PROT_R::new(((self.bits >> 5) & 0x01) != 0) } #[doc = "Bit 6 - Channel x Software Request Trigger"] #[inline(always)] pub fn swreq(&self) -> SWREQ_R { SWREQ_R::new(((self.bits >> 6) & 0x01) != 0) } #[doc = "Bit 7 - Channel x Fill Block of memory"] #[inline(always)] pub fn memset(&self) -> MEMSET_R { MEMSET_R::new(((self.bits >> 7) & 0x01) != 0) } #[doc = "Bits 8:10 - Channel x Chunk Size"] #[inline(always)] pub fn csize(&self) -> CSIZE_R { CSIZE_R::new(((self.bits >> 8) & 0x07) as u8) } #[doc = "Bits 11:12 - Channel x Data Width"] #[inline(always)] pub fn dwidth(&self) -> DWIDTH_R { DWIDTH_R::new(((self.bits >> 11) & 0x03) as u8) } #[doc = "Bit 13 - Channel x Source Interface Identifier"] #[inline(always)] pub fn sif(&self) -> SIF_R { SIF_R::new(((self.bits >> 13) & 0x01) != 0) } #[doc = "Bit 14 - Channel x Destination Interface Identifier"] #[inline(always)] pub fn dif(&self) -> DIF_R { DIF_R::new(((self.bits >> 14) & 0x01) != 0) } #[doc = "Bits 16:17 - Channel x Source Addressing Mode"] #[inline(always)] pub fn sam(&self) -> SAM_R { SAM_R::new(((self.bits >> 16) & 0x03) as u8) } #[doc = "Bits 18:19 - Channel x Destination Addressing Mode"] #[inline(always)] pub fn dam(&self) -> DAM_R { DAM_R::new(((self.bits >> 18) & 0x03) as u8) } #[doc = "Bit 21 - Channel Initialization Terminated (this bit is read-only)"] #[inline(always)] pub fn initd(&self) -> INITD_R { INITD_R::new(((self.bits >> 21) & 0x01) != 0) } #[doc = "Bit 22 - Read in Progress (this bit is read-only)"] #[inline(always)] pub fn rdip(&self) -> RDIP_R { RDIP_R::new(((self.bits >> 22) & 0x01) != 0) } #[doc = "Bit 23 - Write in Progress (this bit is read-only)"] #[inline(always)] pub fn wrip(&self) -> WRIP_R { WRIP_R::new(((self.bits >> 23) & 0x01) != 0) } #[doc = "Bits 24:30 - Channel x Peripheral Identifier"] #[inline(always)] pub fn perid(&self) -> PERID_R { PERID_R::new(((self.bits >> 24) & 0x7f) as u8) } } impl W { #[doc = "Bit 0 - Channel x Transfer Type"] #[inline(always)] pub fn type_(&mut self) -> TYPE_W { TYPE_W { w: self } } #[doc = "Bits 1:2 - Channel x Memory Burst Size"] #[inline(always)] pub fn mbsize(&mut self) -> MBSIZE_W { MBSIZE_W { w: self } } #[doc = "Bit 4 - Channel x Synchronization"] #[inline(always)] pub fn dsync(&mut self) -> DSYNC_W { DSYNC_W { w: self } } #[doc = "Bit 5 - Channel x Protection"] #[inline(always)] pub fn prot(&mut self) -> PROT_W { PROT_W { w: self } } #[doc = "Bit 6 - Channel x Software Request Trigger"] #[inline(always)] pub fn swreq(&mut self) -> SWREQ_W { SWREQ_W { w: self } } #[doc = "Bit 7 - Channel x Fill Block of memory"] #[inline(always)] pub fn memset(&mut self) -> MEMSET_W { MEMSET_W { w: self } } #[doc = "Bits 8:10 - Channel x Chunk Size"] #[inline(always)] pub fn csize(&mut self) -> CSIZE_W { CSIZE_W { w: self } } #[doc = "Bits 11:12 - Channel x Data Width"] #[inline(always)] pub fn dwidth(&mut self) -> DWIDTH_W { DWIDTH_W { w: self } } #[doc = "Bit 13 - Channel x Source Interface Identifier"] #[inline(always)] pub fn sif(&mut self) -> SIF_W { SIF_W { w: self } } #[doc = "Bit 14 - Channel x Destination Interface Identifier"] #[inline(always)] pub fn dif(&mut self) -> DIF_W { DIF_W { w: self } } #[doc = "Bits 16:17 - Channel x Source Addressing Mode"] #[inline(always)] pub fn sam(&mut self) -> SAM_W { SAM_W { w: self } } #[doc = "Bits 18:19 - Channel x Destination Addressing Mode"] #[inline(always)] pub fn dam(&mut self) -> DAM_W { DAM_W { w: self } } #[doc = "Bit 21 - Channel Initialization Terminated (this bit is read-only)"] #[inline(always)] pub fn initd(&mut self) -> INITD_W { INITD_W { w: self } } #[doc = "Bit 22 - Read in Progress (this bit is read-only)"] #[inline(always)] pub fn rdip(&mut self) -> RDIP_W { RDIP_W { w: self } } #[doc = "Bit 23 - Write in Progress (this bit is read-only)"] #[inline(always)] pub fn wrip(&mut self) -> WRIP_W { WRIP_W { w: self } } #[doc = "Bits 24:30 - Channel x Peripheral Identifier"] #[inline(always)] pub fn perid(&mut self) -> PERID_W { PERID_W { w: self } } #[doc = "Writes raw bits to the register."] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "Channel Configuration Register (chid = 12)\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [cc12](index.html) module"] pub struct CC12_SPEC; impl crate::RegisterSpec for CC12_SPEC { type Ux = u32; } #[doc = "`read()` method returns [cc12::R](R) reader structure"] impl crate::Readable for CC12_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [cc12::W](W) writer structure"] impl crate::Writable for CC12_SPEC { type Writer = W; } #[doc = "`reset()` method sets CC12 to value 0"] impl crate::Resettable for CC12_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
30.719874
427
0.567928
2822bad42ee5362e58757867962f919afbbf9645
213
use aoc::Result; pub const YEAR: u32 = 2016; pub const DAY: u32 = 21; pub fn part_one(input: &str) -> Result<usize> { Ok(input.len()) } pub fn part_two(input: &str) -> Result<usize> { Ok(input.len()) }
16.384615
47
0.615023
56a2d8a3ebecf3ff196e73644d7ec11660296a7d
32,257
// This file is a part of the mori - Material Orientation Library in Rust // Copyright 2018 Robert Carson // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use super::*; use std::cmp; ///A structure that holds an array of Rodrigues vectors #[derive(Clone, Debug)] pub struct RodVec{ ori: Array2<f64>, } impl RodVec{ ///Creates an array of zeros for the initial Rodrigues vector parameterization when data is not fed into it pub fn new(size: usize) -> RodVec{ assert!(size > 0, "Size inputted: {}, was not greater than 0", size); let mut ori = Array2::<f64>::zeros((4, size).f()); #[cfg(feature = "parallel")] par_azip!((mut rod_vec in ori.axis_iter_mut(Axis(1))) {rod_vec[2] = 1.0_f64}); #[cfg(not(feature = "parallel"))] azip!((mut rod_vec in ori.axis_iter_mut(Axis(1))) {rod_vec[2] = 1.0_f64}); RodVec{ ori, } }//End of new ///Creates a Rodrigues vector parameterization type with the supplied data as long as the supplied data is in the following format ///shape (4, nelems), memory order = fortran/column major. ///If it doesn't fit those standards it will fail. pub fn new_init(ori: Array2<f64>) -> RodVec{ let nrow = ori.nrows(); assert!(nrow == 4, "Number of rows of array was: {}, which is not equal to 4", nrow); //We need to deal with a borrowing of ori here, so we need to have strides dropped at one point. { let strides = ori.strides(); assert!(strides[0] == 1, "The memory stride is not column major (f order)"); } RodVec{ ori, } }//End of new_init ///Return a ndarray view of the orientation data pub fn ori_view(&self) -> ArrayView2<f64>{ self.ori.view() } ///Return a ndarray mutable view of the orientation data pub fn ori_view_mut(&mut self) -> ArrayViewMut2<f64>{ self.ori.view_mut() } ///Returns a new RodVec that is equal to the equivalent of transposing a rotation matrix. ///It turns out this is simply the negative of the normal vector due to the vector being formed ///from an axial vector of the rotation matrix --> Rmat\^T = -Rx where Rx is the axial vector. pub fn transpose(&self) -> RodVec{ let nelems = self.ori.len_of(Axis(1)); let mut ori = Array2::<f64>::zeros((4, nelems).f()); let f = |mut rod_vec_t: ArrayViewMut1::<f64>, ref rod_vec: ArrayView1::<f64>| { rod_vec_t[0] = -1.0_f64 * rod_vec[0]; rod_vec_t[1] = -1.0_f64 * rod_vec[1]; rod_vec_t[2] = -1.0_f64 * rod_vec[2]; rod_vec_t[3] = rod_vec[3]; }; #[cfg(feature = "parallel")] par_azip!((rod_vec_t in ori.axis_iter_mut(Axis(1)), rod_vec in self.ori.axis_iter(Axis(1))) { f(rod_vec_t, rod_vec); }); #[cfg(not(feature = "parallel"))] azip!((rod_vec_t in ori.axis_iter_mut(Axis(1)), rod_vec in self.ori.axis_iter(Axis(1))) { f(rod_vec_t, rod_vec); }); RodVec::new_init(ori) } ///Performs the equivalent of transposing a rotation matrix on the internal orientations. ///It turns out this is simply the negative of the normal vector due to the vector being formed ///from an axial vector of the rotation matrix --> Rmat\^T = -Rx where Rx is the axial vector. pub fn transpose_inplace(&mut self){ let f = |mut rod_vec_t: ArrayViewMut1::<f64>| { rod_vec_t[0] *= -1.0_f64; rod_vec_t[1] *= -1.0_f64; rod_vec_t[2] *= -1.0_f64; }; #[cfg(feature = "parallel")] par_azip!((rod_vec_t in self.ori.axis_iter_mut(Axis(1))) { f(rod_vec_t); }); #[cfg(not(feature = "parallel"))] azip!((rod_vec_t in self.ori.axis_iter_mut(Axis(1))) { f(rod_vec_t); }); } }//End of Impl of RodVec ///The orientation conversions of a series of Rodrigues vectors to a number of varying different orientation ///representations commonly used in material orientation processing. impl OriConv for RodVec{ ///Converts the Rodrigues vector representation over to Bunge angles which has the following properties ///shape (3, nelems), memory order = fortran/column major. fn to_bunge(&self) -> Bunge{ let rmat = self.to_rmat(); //When a pure conversion doesn't exist we just use the already existing ones in other orientation //representations rmat.to_bunge() }//End of to_bunge ///Converts the Rodrigues vector representation over to a rotation matrix which has the following properties ///shape (3, 3, nelems), memory order = fortran/column major. fn to_rmat(&self) -> RMat{ let nelems = self.ori.len_of(Axis(1)); let mut ori = Array3::<f64>::zeros((3, 3, nelems).f()); let f = |mut rmat: ArrayViewMut2::<f64>, ref rod_vec: ArrayView1::<f64>| { let phi = rod_vec[3].atan() * 2.0_f64; let c = phi.cos(); let s = phi.sin(); rmat[[0, 0]] = c + (1.0_f64 - c) * (rod_vec[0] * rod_vec[0]); rmat[[1, 0]] = (1.0_f64 - c) * (rod_vec[0] * rod_vec[1]) + s * rod_vec[2]; rmat[[2, 0]] = (1.0_f64 - c) * (rod_vec[0] * rod_vec[2]) - s * rod_vec[1]; rmat[[0, 1]] = (1.0_f64 - c) * (rod_vec[0] * rod_vec[1]) - s * rod_vec[2]; rmat[[1, 1]] = c + (1.0_f64 - c) * (rod_vec[1] * rod_vec[1]); rmat[[2, 1]] = (1.0_f64 - c) * (rod_vec[1] * rod_vec[2]) + s * rod_vec[0]; rmat[[0, 2]] = (1.0_f64 - c) * (rod_vec[0] * rod_vec[2]) + s * rod_vec[1]; rmat[[1, 2]] = (1.0_f64 - c) * (rod_vec[1] * rod_vec[2]) - s * rod_vec[0]; rmat[[2, 2]] = c + (1.0_f64 - c) * (rod_vec[2] * rod_vec[2]); }; #[cfg(feature = "parallel")] par_azip!((rmat in ori.axis_iter_mut(Axis(2)), rod_vec in self.ori.axis_iter(Axis(1))) { f(rmat, rod_vec); }); #[cfg(not(feature = "parallel"))] azip!((rmat in ori.axis_iter_mut(Axis(2)), rod_vec in self.ori.axis_iter(Axis(1))) { f(rmat, rod_vec); }); RMat::new_init(ori) }//End of to_rmat ///Converts the Rodrigues vector representation over to axis-angle representation which has the following properties ///shape (4, nelems), memory order = fortran/column major. fn to_ang_axis(&self) -> AngAxis{ let nelems = self.ori.len_of(Axis(1)); let mut ori = Array2::<f64>::zeros((4, nelems).f()); let f = |mut ang_axis: ArrayViewMut1::<f64>, ref rod_vec: ArrayView1::<f64>| { ang_axis[0] = rod_vec[0]; ang_axis[1] = rod_vec[1]; ang_axis[2] = rod_vec[2]; ang_axis[3] = 2.0_f64 * rod_vec[3].atan(); }; #[cfg(feature = "parallel")] par_azip!((ang_axis in ori.axis_iter_mut(Axis(1)), rod_vec in self.ori.axis_iter(Axis(1))) { f(ang_axis, rod_vec); }); #[cfg(not(feature = "parallel"))] azip!((ang_axis in ori.axis_iter_mut(Axis(1)), rod_vec in self.ori.axis_iter(Axis(1))) { f(ang_axis, rod_vec); }); AngAxis::new_init(ori) }//End of to_ang_axis ///Converts the Rodrigues vector representation over to a compact axial vector representation which has the following properties ///shape (3, nelems), memory order = fortran/column major. fn to_ang_axis_comp(&self) -> AngAxisComp{ let nelems = self.ori.len_of(Axis(1)); let mut ori = Array2::<f64>::zeros((3, nelems).f()); let f = |mut ang_axis: ArrayViewMut1::<f64>, ref rod_vec: ArrayView1::<f64>| { let phi = 2.0_f64 * rod_vec[3].atan(); ang_axis[0] = rod_vec[0] * phi; ang_axis[1] = rod_vec[1] * phi; ang_axis[2] = rod_vec[2] * phi; }; #[cfg(feature = "parallel")] par_azip!((ang_axis in ori.axis_iter_mut(Axis(1)), rod_vec in self.ori.axis_iter(Axis(1))) { f(ang_axis, rod_vec); }); #[cfg(not(feature = "parallel"))] azip!((ang_axis in ori.axis_iter_mut(Axis(1)), rod_vec in self.ori.axis_iter(Axis(1))) { f(ang_axis, rod_vec); }); AngAxisComp::new_init(ori) }//End of to_ang_axis_comp ///Returns a clone of the Rodrigues vector. fn to_rod_vec(&self) -> RodVec{ self.clone() }//End of to_rod_vec ///Converts the Rodrigues vector representation over to a compact Rodrigues which has the following properties ///shape (3, nelems), memory order = fortran/column major. fn to_rod_vec_comp(&self) -> RodVecComp{ let nelems = self.ori.len_of(Axis(1)); let mut ori = Array2::<f64>::zeros((3, nelems).f()); let f = |mut rod_vec_comp: ArrayViewMut1::<f64>, ref rod_vec: ArrayView1::<f64>| { rod_vec_comp[0] = rod_vec[0] * rod_vec[3]; rod_vec_comp[1] = rod_vec[1] * rod_vec[3]; rod_vec_comp[2] = rod_vec[2] * rod_vec[3]; }; #[cfg(feature = "parallel")] par_azip!((rod_vec_comp in ori.axis_iter_mut(Axis(1)), rod_vec in self.ori.axis_iter(Axis(1))) { f(rod_vec_comp, rod_vec); }); #[cfg(not(feature = "parallel"))] azip!((rod_vec_comp in ori.axis_iter_mut(Axis(1)), rod_vec in self.ori.axis_iter(Axis(1))) { f(rod_vec_comp, rod_vec); }); RodVecComp::new_init(ori) }//End of to_rod_vec_comp ///Converts the Rodrigues vector representation over to a unit quaternion which has the following properties ///shape (4, nelems), memory order = fortran/column major. fn to_quat(&self) -> Quat{ let nelems = self.ori.len_of(Axis(1)); let mut ori = Array2::<f64>::zeros((4, nelems).f()); let f = |mut quat: ArrayViewMut1::<f64>, ref rod_vec: ArrayView1::<f64>| { let phi = rod_vec[3].atan(); let s = f64::sin(phi); quat[0] = f64::cos(phi); quat[1] = s * rod_vec[0]; quat[2] = s * rod_vec[1]; quat[3] = s * rod_vec[2]; }; #[cfg(feature = "parallel")] par_azip!((quat in ori.axis_iter_mut(Axis(1)), rod_vec in self.ori.axis_iter(Axis(1))) { f(quat, rod_vec); }); #[cfg(not(feature = "parallel"))] azip!((quat in ori.axis_iter_mut(Axis(1)), rod_vec in self.ori.axis_iter(Axis(1))) { f(quat, rod_vec); }); Quat::new_init(ori) }//End of to_quat ///Converts the Rodrigues vector representation over to a homochoric representation which has the following properties ///shape (4, nelems), memory order = fortran/column major. fn to_homochoric(&self) -> Homochoric{ let ang_axis = self.to_ang_axis(); ang_axis.to_homochoric() }//End of to_homochoric ///Converts the Rodrigues vector representation over to Bunge angles which has the following properties ///shape (3, nelems), memory order = fortran/column major. ///This operation is done inplace and does not create a new structure fn to_bunge_inplace(&self, bunge: &mut Bunge){ let rmat = self.to_rmat(); rmat.to_bunge_inplace(bunge); } ///Converts the Rodrigues vector representation over to a rotation matrix which has the following properties ///shape (3, 3, nelems), memory order = fortran/column major. ///This operation is done inplace and does not create a new structure fn to_rmat_inplace(&self, rmat: &mut RMat){ let mut ori = rmat.ori_view_mut(); let new_nelem = ori.len_of(Axis(1)); let nelem = self.ori.len_of(Axis(1)); assert!(new_nelem == nelem, "The number of elements in the original ori field do no match up with the new field. The old field had {} elements, and the new field has {} elements", nelem, new_nelem); let f = |mut rmat: ArrayViewMut2::<f64>, ref rod_vec: ArrayView1::<f64>| { let phi = rod_vec[3].atan() * 2.0_f64; let c = phi.cos(); let s = phi.sin(); rmat[[0, 0]] = c + (1.0_f64 - c) * (rod_vec[0] * rod_vec[0]); rmat[[1, 0]] = (1.0_f64 - c) * (rod_vec[0] * rod_vec[1]) + s * rod_vec[2]; rmat[[2, 0]] = (1.0_f64 - c) * (rod_vec[0] * rod_vec[2]) - s * rod_vec[1]; rmat[[0, 1]] = (1.0_f64 - c) * (rod_vec[0] * rod_vec[1]) - s * rod_vec[2]; rmat[[1, 1]] = c + (1.0_f64 - c) * (rod_vec[1] * rod_vec[1]); rmat[[2, 1]] = (1.0_f64 - c) * (rod_vec[1] * rod_vec[2]) + s * rod_vec[0]; rmat[[0, 2]] = (1.0_f64 - c) * (rod_vec[0] * rod_vec[2]) + s * rod_vec[1]; rmat[[1, 2]] = (1.0_f64 - c) * (rod_vec[1] * rod_vec[2]) - s * rod_vec[0]; rmat[[2, 2]] = c + (1.0_f64 - c) * (rod_vec[2] * rod_vec[2]); }; #[cfg(feature = "parallel")] par_azip!((rmat in ori.axis_iter_mut(Axis(2)), rod_vec in self.ori.axis_iter(Axis(1))) { f(rmat, rod_vec); }); #[cfg(not(feature = "parallel"))] azip!((rmat in ori.axis_iter_mut(Axis(2)), rod_vec in self.ori.axis_iter(Axis(1))) { f(rmat, rod_vec); }); } ///Converts the Rodrigues vector representation over to an axial vector representation which has the following properties ///shape (4, nelems), memory order = fortran/column major. ///This operation is done inplace and does not create a new structure fn to_ang_axis_inplace(&self, ang_axis: &mut AngAxis){ let mut ori = ang_axis.ori_view_mut(); let new_nelem = ori.len_of(Axis(1)); let nelem = self.ori.len_of(Axis(1)); assert!(new_nelem == nelem, "The number of elements in the original ori field do no match up with the new field. The old field had {} elements, and the new field has {} elements", nelem, new_nelem); let f = |mut ang_axis: ArrayViewMut1::<f64>, ref rod_vec: ArrayView1::<f64>| { ang_axis[0] = rod_vec[0]; ang_axis[1] = rod_vec[1]; ang_axis[2] = rod_vec[2]; ang_axis[3] = 2.0_f64 * rod_vec[3].atan(); }; #[cfg(feature = "parallel")] par_azip!((ang_axis in ori.axis_iter_mut(Axis(1)), rod_vec in self.ori.axis_iter(Axis(1))) { f(ang_axis, rod_vec); }); #[cfg(not(feature = "parallel"))] azip!((ang_axis in ori.axis_iter_mut(Axis(1)), rod_vec in self.ori.axis_iter(Axis(1))) { f(ang_axis, rod_vec); }); } ///Converts the Rodrigues vector representation over to a compact axial vector representation which has the following properties ///shape (3, nelems), memory order = fortran/column major. ///This operation is done inplace and does not create a new structure fn to_ang_axis_comp_inplace(&self, ang_axis_comp: &mut AngAxisComp){ let mut ori = ang_axis_comp.ori_view_mut(); let new_nelem = ori.len_of(Axis(1)); let nelem = self.ori.len_of(Axis(1)); assert!(new_nelem == nelem, "The number of elements in the original ori field do no match up with the new field. The old field had {} elements, and the new field has {} elements", nelem, new_nelem); let f = |mut ang_axis: ArrayViewMut1::<f64>, ref rod_vec: ArrayView1::<f64>| { let phi = 2.0_f64 * rod_vec[3].atan(); ang_axis[0] = rod_vec[0] * phi; ang_axis[1] = rod_vec[1] * phi; ang_axis[2] = rod_vec[2] * phi; }; #[cfg(feature = "parallel")] par_azip!((ang_axis in ori.axis_iter_mut(Axis(1)), rod_vec in self.ori.axis_iter(Axis(1))) { f(ang_axis, rod_vec); }); #[cfg(not(feature = "parallel"))] azip!((ang_axis in ori.axis_iter_mut(Axis(1)), rod_vec in self.ori.axis_iter(Axis(1))) { f(ang_axis, rod_vec); }); } ///Returns a clone of the Rodrigues vector. ///This operation is done inplace and does not create a new structure fn to_rod_vec_inplace(&self, rod_vec: &mut RodVec){ let mut ori = rod_vec.ori_view_mut(); let new_nelem = ori.len_of(Axis(1)); let nelem = self.ori.len_of(Axis(1)); assert!(new_nelem == nelem, "The number of elements in the original ori field do no match up with the new field. The old field had {} elements, and the new field has {} elements", nelem, new_nelem); ori.assign(&self.ori); } ///Converts the Rodrigues vector representation over to a compact Rodrigues which has the following properties ///shape (3, nelems), memory order = fortran/column major. ///This operation is done inplace and does not create a new structure fn to_rod_vec_comp_inplace(&self, rod_vec_comp: &mut RodVecComp){ let mut ori = rod_vec_comp.ori_view_mut(); let new_nelem = ori.len_of(Axis(1)); let nelem = self.ori.len_of(Axis(1)); assert!(new_nelem == nelem, "The number of elements in the original ori field do no match up with the new field. The old field had {} elements, and the new field has {} elements", nelem, new_nelem); let f = |mut rod_vec_comp: ArrayViewMut1::<f64>, ref rod_vec: ArrayView1::<f64>| { rod_vec_comp[0] = rod_vec[0] * rod_vec[3]; rod_vec_comp[1] = rod_vec[1] * rod_vec[3]; rod_vec_comp[2] = rod_vec[2] * rod_vec[3]; }; #[cfg(feature = "parallel")] par_azip!((rod_vec_comp in ori.axis_iter_mut(Axis(1)), rod_vec in self.ori.axis_iter(Axis(1))) { f(rod_vec_comp, rod_vec); }); #[cfg(not(feature = "parallel"))] azip!((rod_vec_comp in ori.axis_iter_mut(Axis(1)), rod_vec in self.ori.axis_iter(Axis(1))) { f(rod_vec_comp, rod_vec); }); } ///Converts the Rodrigues vector representation over to a unit quaternion which has the following properties ///shape (4, nelems), memory order = fortran/column major. ///This operation is done inplace and does not create a new structure fn to_quat_inplace(&self, quat: &mut Quat){ let mut ori = quat.ori_view_mut(); let new_nelem = ori.len_of(Axis(1)); let nelem = self.ori.len_of(Axis(1)); assert!(new_nelem == nelem, "The number of elements in the original ori field do no match up with the new field. The old field had {} elements, and the new field has {} elements", nelem, new_nelem); let f = |mut quat: ArrayViewMut1::<f64>, ref rod_vec: ArrayView1::<f64>| { let phi = rod_vec[3].atan(); let s = f64::sin(phi); quat[0] = f64::cos(phi); quat[1] = s * rod_vec[0]; quat[2] = s * rod_vec[1]; quat[3] = s * rod_vec[2]; }; #[cfg(feature = "parallel")] par_azip!((quat in ori.axis_iter_mut(Axis(1)), rod_vec in self.ori.axis_iter(Axis(1))) { f(quat, rod_vec); }); #[cfg(not(feature = "parallel"))] azip!((quat in ori.axis_iter_mut(Axis(1)), rod_vec in self.ori.axis_iter(Axis(1))) { f(quat, rod_vec); }); } ///Converts the Rodrigues vector representation over to a homochoric representation which has the following properties ///shape (4, nelems), memory order = fortran/column major. ///This operation is done inplace and does not create a new structure fn to_homochoric_inplace(&self, homochoric: &mut Homochoric){ let ang_axis = self.to_ang_axis(); ang_axis.to_homochoric_inplace(homochoric); } }//End of impl Ori_Conv of Rodrigues Vector ///A series of commonly used operations to rotate vector data by a given rotation impl RotVector for RodVec{ ///rot_vector takes in a 2D array view of a series of vectors. It then rotates these vectors using the ///given Rodrigues vectors. The newly rotated vectors are then returned. This function requires the ///number of elements in the Rodrigues vectors to be either 1 or nelems. ///The unrotated vector might also contain either 1 or nelems number of elements. ///If this condition is not met the function will error out. ///vec - the vector to be rotated must have dimensions 3xnelems or 3x1. ///Output - the rotated vector and has dimensions 3xnelems. fn rot_vector(&self, vec: ArrayView2<f64>) -> Array2<f64>{ let nelems = vec.len_of(Axis(1)); let rnelems = self.ori.len_of(Axis(1)); let rows = vec.len_of(Axis(0)); assert!((rows == 3), "The number of rows must be 3. The number of rows provided is {}", rows); assert!( (nelems == rnelems) | (rnelems == 1) | (nelems == 1), "The number of elements in the vector field must be equal to the number of elements in the Rodrigues vector structure, or their must only be one element in Rodrigues vector. The final case is that there must only be one element in the vector field. There are currently {} elements in vector and {} elements in Rodrigues vector", nelems, rnelems); let mnelems = cmp::max(rnelems, nelems); let mut rvec = Array2::<f64>::zeros((3, mnelems).f()); //We need to see if we have more than one rotation matrix that we're multiplying by if rnelems == nelems { //The rotations here can be given by the following set of equations as found on Wikipedia: //https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula#Statement #[cfg(feature = "parallel")] par_azip!((rvec in rvec.axis_iter_mut(Axis(1)), ref vec in vec.axis_iter(Axis(1)), ref rod_vec in self.ori.axis_iter(Axis(1))) { rod_vec_rot_vec(&rod_vec, &vec, rvec); }); #[cfg(not(feature = "parallel"))] azip!((rvec in rvec.axis_iter_mut(Axis(1)), ref vec in vec.axis_iter(Axis(1)), ref rod_vec in self.ori.axis_iter(Axis(1))) { rod_vec_rot_vec(&rod_vec, &vec, rvec); }); } else if rnelems == 1{ //We just have one Rodrigues vector so perform pretty much the above to get all of our values let rod_vec = self.ori.index_axis(Axis(1), 0); #[cfg(feature = "parallel")] par_azip!((rvec in rvec.axis_iter_mut(Axis(1)), ref vec in vec.axis_iter(Axis(1))) { rod_vec_rot_vec(&rod_vec, &vec, rvec); }); #[cfg(not(feature = "parallel"))] azip!((rvec in rvec.axis_iter_mut(Axis(1)), ref vec in vec.axis_iter(Axis(1))) { rod_vec_rot_vec(&rod_vec, &vec, rvec); }); }else{ //We just have one Rodrigues vector so perform pretty much the above to get all of our values let vec = vec.index_axis(Axis(1), 0); #[cfg(feature = "parallel")] par_azip!((rvec in rvec.axis_iter_mut(Axis(1)), ref rod_vec in self.ori.axis_iter(Axis(1))) { rod_vec_rot_vec(&rod_vec, &vec, rvec); }); #[cfg(not(feature = "parallel"))] azip!((rvec in rvec.axis_iter_mut(Axis(1)), ref rod_vec in self.ori.axis_iter(Axis(1))) { rod_vec_rot_vec(&rod_vec, &vec, rvec); }); }//End if-else //Now we just need to return the rvec value rvec }//End of rot_vector ///rot_vector_mut takes in a 2D array view of a series of vectors and a mutable 2D ArrayView of the ///rotated vector. It then rotates these vectors using the given Rodrigues vector. The newly rotated /// vectors are assigned to the supplied rotated vector, rvec. This function requires the ///number of elements in the Rodrigues vector to be either 1 or nelems. ///The unrotated vector might also contain either 1 or nelems number of elements. ///It also requires the number of elements in rvec and vec to be equal. ///If these conditions are not met the function will error out. ///vec - the vector to be rotated must have dimensions 3xnelems or 3x1. ///rvec - the rotated vector and has dimensions 3xnelems. fn rot_vector_mut(&self, vec: ArrayView2<f64>, mut rvec: ArrayViewMut2<f64>) { let nelems = vec.len_of(Axis(1)); let rvnelems = rvec.len_of(Axis(1)); let rnelems = self.ori.len_of(Axis(1)); let mnelems = cmp::max(rnelems, nelems); let rows = vec.len_of(Axis(0)); assert!((rows == 3), "The number of rows must be 3. The number of rows provided is {}", rows); assert!((mnelems == rvnelems), "The number of elements in the unrotated vector or Rodrigues vector field must be equal to the number of elements in the supplied rotated vector field. There are currently {} elements in the unrotated vector or Rodrigues vector field and {} elements in the rotated vector field", mnelems, rvnelems); assert!( (nelems == rnelems) | (rnelems == 1) | (nelems == 1), "The number of elements in the vector field must be equal to the number of elements in the Rodrigues vector structure, or their must only be one element in Rodrigues vector. The final case is that there must only be one element in the vector field. There are currently {} elements in vector and {} elements in Rodrigues vector", nelems, rnelems); //We need to see if we have more than one Rodrigues vector that we're multiplying by if rnelems == nelems { //The rotations here can be given by the following set of equations as found on Wikipedia: //https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula#Statement #[cfg(feature = "parallel")] par_azip!((rvec in rvec.axis_iter_mut(Axis(1)), ref vec in vec.axis_iter(Axis(1)), ref rod_vec in self.ori.axis_iter(Axis(1))) { rod_vec_rot_vec(&rod_vec, &vec, rvec); }); #[cfg(not(feature = "parallel"))] azip!((rvec in rvec.axis_iter_mut(Axis(1)), ref vec in vec.axis_iter(Axis(1)), ref rod_vec in self.ori.axis_iter(Axis(1))) { rod_vec_rot_vec(&rod_vec, &vec, rvec); }); } else if rnelems == 1{ //We just have one Rodrigues vector so perform pretty much the above to get all of our values let rod_vec = self.ori.index_axis(Axis(1), 0); #[cfg(feature = "parallel")] par_azip!((rvec in rvec.axis_iter_mut(Axis(1)), ref vec in vec.axis_iter(Axis(1))) { rod_vec_rot_vec(&rod_vec, &vec, rvec); }); #[cfg(not(feature = "parallel"))] azip!((rvec in rvec.axis_iter_mut(Axis(1)), ref vec in vec.axis_iter(Axis(1))) { rod_vec_rot_vec(&rod_vec, &vec, rvec); }); }else{ //We just have one Rodrigues vector so perform pretty much the above to get all of our values let vec = vec.index_axis(Axis(1), 0); #[cfg(feature = "parallel")] par_azip!((rvec in rvec.axis_iter_mut(Axis(1)), ref rod_vec in self.ori.axis_iter(Axis(1))) { rod_vec_rot_vec(&rod_vec, &vec, rvec); }); #[cfg(not(feature = "parallel"))] azip!((rvec in rvec.axis_iter_mut(Axis(1)), ref rod_vec in self.ori.axis_iter(Axis(1))) { rod_vec_rot_vec(&rod_vec, &vec, rvec); }); }//End if-else }//End of rot_vector_mut ///rot_vector_inplace takes in a mutable 2D array view of a series of vectors. It then rotates these vectors using the ///given Rodrigues vectors. The newly rotated vectors are assigned to original vector. This function requires the ///number of elements in the Rodrigues vector to be either 1 or nelems where vec has nelems in it. ///If this condition is not met the function will error out. ///vec - the vector to be rotated must have dimensions 3xnelems. fn rot_vector_inplace(&self, mut vec: ArrayViewMut2<f64>){ let nelems = vec.len_of(Axis(1)); let rnelems = self.ori.len_of(Axis(1)); let rows = vec.len_of(Axis(0)); assert!((rows == 3), "The number of rows must be 3. The number of rows provided is {}", rows); assert!( (nelems == rnelems) | (rnelems == 1), "The number of elements in the vector field must be equal to the number of elements in the Rodrigues vector structure, or their must only be one element in Rodrigues vector. There are currently {} elements in vector and {} elements in Rodrigues vector", nelems, rnelems); //We need to see if we have more than one Rodrigues vector that we're multiplying by if rnelems == nelems { //The rotations here can be given by the following set of equations as found on Wikipedia: //https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula#Statement #[cfg(feature = "parallel")] par_azip!((mut vec in vec.axis_iter_mut(Axis(1)), ref rod_vec in self.ori.axis_iter(Axis(1))) { let mut rvec = Array1::<f64>::zeros((3).f()); rod_vec_rot_vec(&rod_vec, &vec.view(), rvec.view_mut()); vec.assign(&rvec); }); #[cfg(not(feature = "parallel"))] azip!((mut vec in vec.axis_iter_mut(Axis(1)), ref rod_vec in self.ori.axis_iter(Axis(1))) { let mut rvec = Array1::<f64>::zeros((3).f()); rod_vec_rot_vec(&rod_vec, &vec.view(), rvec.view_mut()); vec.assign(&rvec); }); } else{ //We just have one Rodrigues vector so perform pretty much the above to get all of our values let rod_vec = self.ori.index_axis(Axis(1), 0); #[cfg(feature = "parallel")] par_azip!((mut vec in vec.axis_iter_mut(Axis(1))) { let mut rvec = Array1::<f64>::zeros((3).f()); rod_vec_rot_vec(&rod_vec, &vec.view(), rvec.view_mut()); vec.assign(&rvec); }); #[cfg(not(feature = "parallel"))] azip!((mut vec in vec.axis_iter_mut(Axis(1))) { let mut rvec = Array1::<f64>::zeros((3).f()); rod_vec_rot_vec(&rod_vec, &vec.view(), rvec.view_mut()); vec.assign(&rvec); }); }//End if-else }//End of rot_vector_inplace }//Endo of Impl RotVector ///All of the Rodrigues vector vector rotation operations can be described by using the below series of functions. ///This also reduces the amount of repetive code that existed earlier within rot_vector. fn rod_vec_rot_vec(rod_vec: &ArrayView1<f64>, vec: &ArrayView1<f64>, mut rvec: ArrayViewMut1<f64>) { let (sin_theta, cos_theta) = f64::sin_cos(2.0_f64 * rod_vec[3].atan()); let min_cos = 1.0_f64 - cos_theta; let dp_mcos = min_cos * (rod_vec[0] * vec[0] + rod_vec[1] * vec[1] + rod_vec[2] * vec[2]); let mut cross_prod = Array1::<f64>::zeros((3).f()); cross_prod[0] = -rod_vec[2] * vec[1] + rod_vec[1] * vec[2]; cross_prod[1] = rod_vec[2] * vec[0] - rod_vec[0] * vec[2]; cross_prod[2] = -rod_vec[1] * vec[0] + rod_vec[0] * vec[1]; rvec[0] = vec[0] * cos_theta + cross_prod[0] * sin_theta + rod_vec[0] * dp_mcos; rvec[1] = vec[1] * cos_theta + cross_prod[1] * sin_theta + rod_vec[1] * dp_mcos; rvec[2] = vec[2] * cos_theta + cross_prod[2] * sin_theta + rod_vec[2] * dp_mcos; }
43.649526
135
0.598165
fcc3fa073f3979ce3976e9a13130b74e9bb6dd08
4,106
//! Program state processor use crate::{ state::{ enums::GovernanceAccountType, governance::{ assert_valid_create_governance_args, get_token_governance_address_seeds, Governance, GovernanceConfig, }, realm::get_realm_data, token_owner_record::get_token_owner_record_data_for_realm, }, tools::spl_token::{assert_spl_token_owner_is_signer, set_spl_token_account_authority}, }; use solana_program::{ account_info::{next_account_info, AccountInfo}, entrypoint::ProgramResult, program_pack::Pack, pubkey::Pubkey, rent::Rent, sysvar::Sysvar, }; use spl_governance_tools::account::create_and_serialize_account_signed; use spl_token::{instruction::AuthorityType, state::Account}; /// Processes CreateTokenGovernance instruction pub fn process_create_token_governance( program_id: &Pubkey, accounts: &[AccountInfo], config: GovernanceConfig, transfer_account_authorities: bool, ) -> ProgramResult { let account_info_iter = &mut accounts.iter(); let realm_info = next_account_info(account_info_iter)?; // 0 let token_governance_info = next_account_info(account_info_iter)?; // 1 let governed_token_info = next_account_info(account_info_iter)?; // 2 let governed_token_owner_info = next_account_info(account_info_iter)?; // 3 let token_owner_record_info = next_account_info(account_info_iter)?; // 4 let payer_info = next_account_info(account_info_iter)?; // 5 let spl_token_info = next_account_info(account_info_iter)?; // 6 let system_info = next_account_info(account_info_iter)?; // 7 let rent_sysvar_info = next_account_info(account_info_iter)?; // 8 let rent = &Rent::from_account_info(rent_sysvar_info)?; let governance_authority_info = next_account_info(account_info_iter)?; // 9 assert_valid_create_governance_args(program_id, &config, realm_info)?; let realm_data = get_realm_data(program_id, realm_info)?; let token_owner_record_data = get_token_owner_record_data_for_realm(program_id, token_owner_record_info, realm_info.key)?; token_owner_record_data.assert_token_owner_or_delegate_is_signer(governance_authority_info)?; let voter_weight = token_owner_record_data.resolve_voter_weight( program_id, account_info_iter, realm_info.key, &realm_data, )?; token_owner_record_data.assert_can_create_governance(&realm_data, voter_weight)?; let token_governance_data = Governance { account_type: GovernanceAccountType::TokenGovernance, realm: *realm_info.key, governed_account: *governed_token_info.key, config, proposals_count: 0, reserved: [0; 8], }; create_and_serialize_account_signed::<Governance>( payer_info, token_governance_info, &token_governance_data, &get_token_governance_address_seeds(realm_info.key, governed_token_info.key), program_id, system_info, rent, )?; if transfer_account_authorities { set_spl_token_account_authority( governed_token_info, governed_token_owner_info, token_governance_info.key, AuthorityType::AccountOwner, spl_token_info, )?; // If the token account has close_authority then transfer it as well let token_account_data = Account::unpack(&governed_token_info.data.borrow())?; // Note: The code assumes owner==close_authority // If this is not the case then the caller should set close_authority accordingly before making the transfer if token_account_data.close_authority.is_some() { set_spl_token_account_authority( governed_token_info, governed_token_owner_info, token_governance_info.key, AuthorityType::CloseAccount, spl_token_info, )?; } } else { assert_spl_token_owner_is_signer(governed_token_info, governed_token_owner_info)?; } Ok(()) }
35.094017
122
0.704092
5bc469e9e0c9e4125855a3d9a266932b2b49a15f
3,533
use crate::path; use anyhow::Result; use cargo_toml::{self, Dependency, DependencyDetail, Edition, Product}; use std::{ fs::{self, File}, io::Write, path::{Path, PathBuf}, }; pub fn prepare_manifest_file( template_path: impl AsRef<Path>, source_path: impl AsRef<Path>, ) -> Result<(PathBuf, PathBuf)> { // Parepare temporary directory let dir = path::create_temp_dir(); let temp_manifest_path = dir.join("Cargo.toml"); let manifest_dir = template_path.as_ref().parent().expect("should have parent"); // Generate manifest base on template let template = fs::read_to_string(&template_path).unwrap_or_else(|err| { panic!( "Failed to read manifest: {:?}\n Error: {:?}", template_path.as_ref(), err ); }); let content = generate_manifest(&template, source_path, |rel_path| { path::canonicalize(&manifest_dir, &rel_path) }); let mut temp_manifest = File::create(&temp_manifest_path).unwrap_or_else(|err| { panic!( "Failed to create manifest file: {:?}\n Error: {:?}", temp_manifest_path, err ); }); temp_manifest.write_all(content.as_bytes())?; temp_manifest.flush()?; Ok((temp_manifest_path, dir)) } fn generate_manifest<P, F>(template: &str, source_path: P, rel_to_abs: F) -> String where P: AsRef<Path>, F: Fn(&str) -> PathBuf, { let mut manifest = cargo_toml::Manifest::from_slice(template.as_bytes()).unwrap_or_else(|err| { panic!("Failed to parse manifest\n Error: {:?}", err); }); // Apply modifications: deps, lib manifest.dependencies = manifest .dependencies .into_iter() .map(|(crate_name, dep)| match dep.clone() { Dependency::Detailed(detail) => { if let Some(rel_path) = detail.clone().path { let crate_dir = rel_to_abs(&rel_path); let detail = DependencyDetail { path: Some(String::from(crate_dir.to_str().unwrap())), ..detail }; (crate_name, Dependency::Detailed(detail)) } else { (crate_name, dep) } } _ => (crate_name, dep), }) .collect(); let source_path = String::from(source_path.as_ref().to_str().unwrap()); manifest.lib = Some(Product { path: Some(source_path), edition: Some(Edition::E2018), ..Default::default() }); toml::to_string(&manifest).unwrap_or_else(|err| { panic!("Failed to serialize manifest\n Error: {:?}", err); }) } #[cfg(test)] mod tests { use super::*; use crate::diff::print; use std::str::FromStr; #[test] fn test_generate_manifest() { let source_path = PathBuf::from_str("/home/macro-harness/projects/awesome/tests/test_macro.rs").unwrap(); print( include_str!("../fixtures/Cargo.expected.toml"), generate_manifest( include_str!("../fixtures/Cargo.template.toml"), source_path, |rel_path| { let rel = String::from(rel_path); let name = rel.split('/').last().unwrap(); PathBuf::from_str(format!("/home/macro-harness/projects/{}", name).as_str()) .unwrap() }, ) .as_str(), ); } }
31.828829
99
0.546844
624ef9ed0d70fcb3df754a8b29d6067ed552c4d0
4,746
//! This crate aims to provide a comprehensive toolkit //! for working with [RDF] and [Linked Data] in Rust. //! //! RDF is a data model //! designed to exchange knowledge on the Web //! in an interoperable way. //! Each piece of knowledge in RDF (a *statement*) //! is represented by a [triple], made of three [term]s. //! A set of [triple]s forms an RDF [graph]. //! Finally, several [graph]s can be grouped in a collection //! called a [dataset], where each [graph] is identified by a unique name. //! //! [RDF]: https://www.w3.org/TR/rdf-primer/ //! [Linked Data]: http://linkeddata.org/ //! //! # Generalized vs. Strict RDF model //! //! The data model supported by this crate is in fact //! a superset of the RDF data model as defined by the W3C. //! When the distinction matters, //! they will be called, respectively, //! the *generalized* RDF model, and the *strict* RDF model. //! //! # Getting Started //! //! Following a short example how to build a graph, mutate it and serialize it //! back. //! //! ``` //! use sophia::graph::{*, inmem::FastGraph}; //! use sophia::ns::Namespace; //! use sophia::parser::turtle; //! use sophia::serializer::*; //! use sophia::serializer::nt::NtSerializer; //! use sophia::triple::stream::TripleSource; //! //! let example = r#" //! @prefix : <http://example.org/>. //! @prefix foaf: <http://xmlns.com/foaf/0.1/>. //! //! :alice foaf:name "Alice"; //! foaf:mbox <mailto:[email protected]> . //! //! :bob foaf:name "Bob". //! "#; //! let mut graph: FastGraph = turtle::parse_str(example).collect_triples()?; //! //! let ex = Namespace::new("http://example.org/")?; //! let foaf = Namespace::new("http://xmlns.com/foaf/0.1/")?; //! graph.insert( //! &ex.get("bob")?, //! &foaf.get("knows")?, //! &ex.get("alice")?, //! )?; //! //! let mut nt_stringifier = NtSerializer::new_stringifier(); //! let example2 = nt_stringifier.serialize_graph(&mut graph)?.as_str(); //! println!("The resulting graph\n{}", example2); //! # Ok::<(), Box<dyn std::error::Error>>(()) //! ``` #![deny(missing_docs)] pub mod query; /// This module re-exports symbols from /// [`sophia_api::dataset`], [`sophia_indexed::dataset`] and [`sophia_inmem::dataset`]. pub mod dataset { pub use sophia_api::dataset::*; pub use sophia_indexed::dataset as indexed; pub use sophia_inmem::dataset as inmem; } /// This module re-exports symbols from /// [`sophia_iri`]. pub mod iri { pub use sophia_iri::*; } /// This module re-exports symbols from /// [`sophia_api::graph`], [`sophia_indexed::graph`] and [`sophia_inmem::graph`]. pub mod graph { pub use sophia_api::graph::*; pub use sophia_indexed::graph as indexed; pub use sophia_inmem::graph as inmem; } /// This module re-exports symbols from /// [`sophia_api::ns`]. pub mod ns { pub use sophia_api::ns::*; } /// This module re-exports symbols from /// [`sophia_api::parser`] and [`sophia_turtle::parser`]. /// If the `xml` feature is enabled, it also re-exports /// [`sophia_xml::parser`] as [`xml`](crate::parser::xml). pub mod parser { pub use sophia_api::parser::*; pub use sophia_turtle::parser::gtrig; pub use sophia_turtle::parser::nq; pub use sophia_turtle::parser::nt; pub use sophia_turtle::parser::trig; pub use sophia_turtle::parser::turtle; #[cfg(feature = "xml")] pub use sophia_xml::parser as xml; #[deprecated(since = "0.7.0", note = "please use `sophia_xml` instead")] #[cfg(feature = "xml")] pub mod xml_legacy; } /// This module re-exports symbols from /// [`sophia_api::prefix`]. pub mod prefix { pub use sophia_api::prefix::*; } /// This module re-exports symbols from /// [`sophia_api::quad`]. pub mod quad { pub use sophia_api::quad::*; } /// This module re-exports symbols from /// [`sophia_api::serializer`] and [`sophia_turtle::serializer`]. /// If the `xml` feature is enabled, it also re-exports /// [`sophia_xml::serializer`] as [`xml`](crate::serializer::xml). pub mod serializer { pub use sophia_api::serializer::*; pub use sophia_turtle::serializer::nq; pub use sophia_turtle::serializer::nt; pub use sophia_turtle::serializer::trig; pub use sophia_turtle::serializer::turtle; #[cfg(feature = "xml")] pub use sophia_xml::serializer as xml; } /// This module re-exports symbols from /// [`sophia_api::sparql`]. pub mod sparql { pub use sophia_api::sparql::*; } /// This module re-exports symbols from /// This module re-exports symbols from /// [`sophia_api::term`] /// and /// [`sophia_term`]. pub mod term { pub use sophia_api::term::*; pub use sophia_term::*; } /// This module re-exports symbols from /// [`sophia_api::triple`]. pub mod triple { pub use sophia_api::triple::*; }
31.852349
87
0.647914
fe74db9ea0c5542b2dc9cec66e5f55e8cc3f3c52
102
pub mod airdrop; pub mod auction; pub mod helpers; pub mod lockdrop; pub mod lp_staking; pub mod tax;
14.571429
19
0.764706
1c07c7d5464a0336eb26923930edd53e2e96190e
8,170
#![allow(unused_imports)] use std::env; use std::fs; use std::path::{Path, PathBuf}; #[cfg(target_os = "linux")] fn find_julia() -> Option<String> { if let Ok(path) = env::var("JULIA_DIR") { return Some(path); } if Path::new("/usr/include/julia/julia.h").exists() { return Some("/usr".to_string()); } None } #[cfg(target_os = "windows")] fn flags() -> Vec<String> { let julia_dir = env::var("JULIA_DIR").expect("Julia cannot be found. You can specify the Julia installation path with the JULIA_DIR environment variable."); let cygwin_path = env::var("CYGWIN_DIR").expect("Cygwin cannot be found. You can specify the Cygwin installation path with the CYGWIN_DIR environment variable."); let jl_include_path = format!("-I{}/include/julia/", julia_dir); let cygwin_include_path = format!("-I{}/usr/include", cygwin_path); let w32api_include_path = format!("-I{}/usr/include/w32api", cygwin_path); let jl_lib_path = format!("-L{}/bin/", julia_dir); println!("cargo:rustc-flags={}", &jl_lib_path); println!("cargo:rustc-link-lib=julia"); vec![ jl_include_path, cygwin_include_path, w32api_include_path, jl_lib_path, ] } #[cfg(target_os = "linux")] fn flags() -> Vec<String> { let flags = match find_julia() { Some(julia_dir) => { let jl_include_path = format!("-I{}/include/julia/", julia_dir); let jl_lib_path = format!("-L{}/lib/", julia_dir); println!("cargo:rustc-flags={}", &jl_lib_path); vec![jl_include_path, jl_lib_path] } None => Vec::new(), }; println!("cargo:rustc-link-lib=julia"); flags } fn main() { let mut out_path = PathBuf::from(env::var("OUT_DIR").unwrap()); out_path.push("bindings.rs"); println!("cargo:rerun-if-changed=wrapper.h"); println!("cargo:rerun-if-env-changed=JULIA_DIR"); println!("cargo:rerun-if-env-changed=CYGWIN_DIR"); if env::var("CARGO_FEATURE_DOCS_RS").is_ok() { fs::copy("dummy-bindings.rs", &out_path) .expect("Couldn't create bindings from dummy bindings."); return; } let flags = flags(); // Only generate bindings if it's used by Jlrs let bindings = bindgen::Builder::default() .clang_args(&flags) .header("wrapper.h") .size_t_is_usize(true) // Initializing and stopping .whitelist_function("jl_init__threading") .whitelist_function("jl_is_initialized") .whitelist_function("jl_atexit_hook") // Gc .whitelist_function("jl_pgcstack") .whitelist_function("jl_get_ptls_states") .whitelist_function("jl_gc_queue_root") // boxing and unboxing primitives .whitelist_function("jl_box_bool") .whitelist_function("jl_box_char") .whitelist_function("jl_box_int8") .whitelist_function("jl_unbox_int8") .whitelist_function("jl_box_int16") .whitelist_function("jl_unbox_int16") .whitelist_function("jl_box_int32") .whitelist_function("jl_unbox_int32") .whitelist_function("jl_box_int64") .whitelist_function("jl_unbox_int64") .whitelist_function("jl_box_uint8") .whitelist_function("jl_unbox_uint8") .whitelist_function("jl_box_uint16") .whitelist_function("jl_unbox_uint16") .whitelist_function("jl_box_uint32") .whitelist_function("jl_unbox_uint32") .whitelist_function("jl_box_uint64") .whitelist_function("jl_unbox_uint64") .whitelist_function("jl_box_float32") .whitelist_function("jl_unbox_float32") .whitelist_function("jl_box_float64") .whitelist_function("jl_unbox_float64") // call functions .whitelist_function("jl_call0") .whitelist_function("jl_call1") .whitelist_function("jl_call2") .whitelist_function("jl_call3") .whitelist_function("jl_call") .whitelist_function("jl_exception_occurred") .whitelist_function("jl_eval_string") // symbols and globals .whitelist_function("jl_symbol_n") .whitelist_function("jl_get_global") .whitelist_function("jl_set_global") // structs .whitelist_function("jl_field_index") .whitelist_function("jl_get_nth_field") .whitelist_function("jl_get_nth_field_noalloc") .whitelist_function("jl_get_field") .whitelist_function("jl_field_isdefined") .whitelist_function("jl_compute_fieldtypes") // tuples .whitelist_function("jl_apply_type") .whitelist_function("jl_new_structv") .whitelist_function("jl_tupletype_fill") .whitelist_function("jl_apply_tuple_type_v") .whitelist_function("jl_new_struct_uninit") // n-dimensional arrays .whitelist_function("jl_apply_array_type") .whitelist_function("jl_array_eltype") .whitelist_function("jl_alloc_array_1d") .whitelist_function("jl_alloc_array_2d") .whitelist_function("jl_alloc_array_3d") .whitelist_function("jl_new_array") .whitelist_function("jl_ptr_to_array_1d") .whitelist_function("jl_ptr_to_array") // strings .whitelist_function("jl_pchar_to_string") .whitelist_function("jl_typeof_str") .whitelist_function("jl_typename_str") // modules .whitelist_var("jl_base_module") .whitelist_var("jl_core_module") .whitelist_var("jl_main_module") // types .whitelist_type("jl_value_t") .whitelist_type("jl_taggedvalue_t") .whitelist_type("jl_datatype_t") .whitelist_var("jl_nothing") .whitelist_var("jl_bool_type") .whitelist_var("jl_char_type") .whitelist_var("jl_int8_type") .whitelist_var("jl_int16_type") .whitelist_var("jl_int32_type") .whitelist_var("jl_int64_type") .whitelist_var("jl_uint8_type") .whitelist_var("jl_uint16_type") .whitelist_var("jl_uint32_type") .whitelist_var("jl_uint64_type") .whitelist_var("jl_float32_type") .whitelist_var("jl_float64_type") .whitelist_var("jl_ssavalue_type") .whitelist_var("jl_slotnumber_type") .whitelist_var("jl_typedslot_type") .whitelist_var("jl_expr_type") .whitelist_var("jl_globalref_type") .whitelist_var("jl_gotonode_type") .whitelist_var("jl_pinode_type") .whitelist_var("jl_phinode_type") .whitelist_var("jl_phicnode_type") .whitelist_var("jl_upsilonnode_type") .whitelist_var("jl_quotenode_type") .whitelist_var("jl_newvarnode_type") .whitelist_var("jl_linenumbernode_type") .whitelist_var("jl_method_instance_type") .whitelist_var("jl_code_instance_type") .whitelist_var("jl_code_info_type") .whitelist_var("jl_method_type") .whitelist_var("jl_methtable_type") .whitelist_var("jl_string_type") .whitelist_var("jl_datatype_type") .whitelist_var("jl_array_typename") .whitelist_var("jl_module_type") .whitelist_var("jl_nothing") .whitelist_var("jl_tuple_typename") .whitelist_var("jl_namedtuple_typename") .whitelist_var("jl_simplevector_type") .whitelist_var("jl_uniontype_type") .whitelist_var("jl_tvar_type") .whitelist_var("jl_unionall_type") .whitelist_var("jl_typename_type") .whitelist_var("jl_symbol_type") .whitelist_var("jl_task_type") .whitelist_var("jl_typeofbottom_type") .whitelist_var("jl_addrspace_pointer_typename") .whitelist_var("jl_pointer_type") .whitelist_var("jl_vecelement_typename") .whitelist_var("jl_ref_type") .whitelist_var("jl_type_type") .whitelist_var("jl_intrinsic_type") .rustfmt_bindings(true) .generate() .expect("Unable to generate bindings"); // Write the bindings to the $OUT_DIR/bindings.rs file. bindings .write_to_file(&out_path) .expect("Couldn't write bindings!"); }
37.824074
166
0.657405
28ea8473b08f44f0a44b64cd80e72dfffadd2e87
1,198
use crate::workflow_step::*; use crate::game_wrapper::GameWrapper; use crate::basic_types::Termination; #[derive(Debug)] pub struct CheckmateFilter { input_vec_name: String, output_vec_name: String, discard_vec_name: String, flag_name: String, } /// chess_analytics_build::register_step_builder "CheckmateFilter" CheckmateFilter impl CheckmateFilter { pub fn try_new(configuration: Vec<String>) -> Result<Box<dyn Step>, String> { let matches = load_step_config!("CheckmateFilter", "step_arg_configs/checkmate_filter.yaml", configuration); Ok(Box::new(CheckmateFilter { input_vec_name: matches.value_of("input").unwrap().to_string(), output_vec_name: matches.value_of("output").unwrap().to_string(), discard_vec_name: matches.value_of("discard").unwrap().to_string(), flag_name: matches.value_of("finish_flag").unwrap().to_string() })) } pub fn filter(game: GameWrapper, _filter: &CheckmateFilter) -> bool { game.termination == Termination::Normal && game.moves.last().unwrap().mates } } impl<'a> Step for CheckmateFilter { filter_template!(&CheckmateFilter::filter); }
35.235294
116
0.691987