hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
1d7084a4d241b0da35692ff4a62d745bd1d2a2cf
1,751
//! RSA-related algorithms use super::{oaep, pkcs1, pss}; use crate::algorithm; /// RSA algorithms (signing and encryption) #[derive(Copy, Clone, Debug, Eq, PartialEq)] #[repr(u8)] pub enum Algorithm { /// RSA encryption with Optimal Asymmetric Encryption Padding (OAEP) Oaep(oaep::Algorithm), /// RSA PKCS#1v1.5: legacy signature and encryption algorithms Pkcs1(pkcs1::Algorithm), /// RSASSA-PSS: Probabilistic Signature Scheme Pss(pss::Algorithm), } impl Algorithm { /// Convert an unsigned byte tag into an `Algorithm` (if valid) pub fn from_u8(tag: u8) -> Result<Self, algorithm::Error> { Ok(match tag { 0x01..=0x04 => Algorithm::Pkcs1(pkcs1::Algorithm::from_u8(tag)?), 0x05..=0x08 => Algorithm::Pss(pss::Algorithm::from_u8(tag)?), 0x19..=0x1c => Algorithm::Oaep(oaep::Algorithm::from_u8(tag)?), _ => fail!( algorithm::ErrorKind::TagInvalid, "unknown RSA algorithm ID: 0x{:02x}", tag ), }) } /// Serialize algorithm ID as a byte pub fn to_u8(self) -> u8 { match self { Algorithm::Oaep(alg) => alg.to_u8(), Algorithm::Pkcs1(alg) => alg.to_u8(), Algorithm::Pss(alg) => alg.to_u8(), } } } impl_algorithm_serializers!(Algorithm); impl From<oaep::Algorithm> for Algorithm { fn from(alg: oaep::Algorithm) -> Algorithm { Algorithm::Oaep(alg) } } impl From<pkcs1::Algorithm> for Algorithm { fn from(alg: pkcs1::Algorithm) -> Algorithm { Algorithm::Pkcs1(alg) } } impl From<pss::Algorithm> for Algorithm { fn from(alg: pss::Algorithm) -> Algorithm { Algorithm::Pss(alg) } }
27.359375
77
0.592804
039d57c225c65b79f76f16a1032898f3566357bd
28,387
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. mod boringssl; mod ecc; mod frame; mod state; use { anyhow::{bail, Error}, boringssl::{Bignum, EcGroupId}, fidl_fuchsia_wlan_mlme::AuthenticateResultCodes as ResultCode, frame::{AntiCloggingTokenMsg, CommitMsg, ConfirmMsg}, log::warn, mundane::{hash::Digest, hmac}, wlan_common::ie::rsn::akm::{self, Akm}, wlan_common::mac::MacAddr, }; /// Maximum number of incorrect frames sent before SAE fails. const DOT11_RSNA_SAE_SYNC: u16 = 30; /// A shared key computed by an SAE handshake. #[derive(Clone, PartialEq, Debug)] pub struct Key { pub pmk: Vec<u8>, pub pmkid: Vec<u8>, } /// Types of timeout that are used by SAE handshakes. Duration and scheduling of these timeouts /// is left to the user of this library. #[derive(Debug)] pub enum Timeout { /// Timeout before the most recent message(s) should be resent. Retransmission, /// Timeout before the PMK produced by a successful handshake is considered invalid. KeyExpiration, } #[derive(Debug)] pub enum RejectReason { /// We experienced a failure that was unrelated to data received from the peer. This likely /// means we are not in a good state. InternalError(Error), /// Data received from the peer failed validation, and we cannot generate a PMK. AuthFailed, /// The peer has failed to respond or sent incorrect responses too many times. TooManyRetries, } impl From<Error> for RejectReason { fn from(e: Error) -> Self { Self::InternalError(e) } } #[derive(Debug)] pub struct AuthFrameRx<'a> { seq: u16, status_code: ResultCode, body: &'a [u8], } #[derive(Debug, Clone, Eq, PartialEq)] pub struct AuthFrameTx { seq: u16, status_code: ResultCode, body: Vec<u8>, } /// An update generated to progress an SAE handshake. These updates should generally be converted /// into a frame and sent to the SAE peer. #[derive(Debug)] pub enum SaeUpdate { /// Send an auth frame to the peer. SendFrame(AuthFrameTx), /// Indicates the handshake is complete. The handshake should *not* be deleted at this point. Complete(Key), /// Indicates that the handshake has failed and must be aborted or restarted. Reject(RejectReason), /// Request the user of the library to set or reset a timeout. If this timeout expires, it /// should be passed to SaeHandshake::handle_timeout. ResetTimeout(Timeout), /// Request the user of the library to cancel a timeout that was previously set. CancelTimeout(Timeout), } pub type SaeUpdateSink = Vec<SaeUpdate>; /// IEEE 802.11-2016 12.4: Simultaneous Authentication of Equals (SAE) /// /// An SAE handshake with a peer is a symmetric handshake that may be used in place of open /// authentication as the AKM. A full handshake consists of both peers sending a Commit and Confirm /// frame, at which point they have both derived a shared key that is unique to those peers and that /// session. /// /// Structs implementing this trait are responsible for handling both a successful SAE handshake, /// various failure modes, and edge cases such as retries and timeouts. /// /// None of the functions in this trait return errors. Instead, non-fatal errors are logged, and /// fatal errors push an SaeUpdate::Reject to the update sink. Once an SaeUpdate::Reject is pushed, /// all further operations are no-ops. pub trait SaeHandshake { /// Initiate SAE by sending the first commit message. If the peer STA sends the first commit /// message, handle_commit should be called first and initiate_sae should never be called. fn initiate_sae(&mut self, sink: &mut SaeUpdateSink); fn handle_commit(&mut self, sink: &mut SaeUpdateSink, commit_msg: &CommitMsg); fn handle_confirm(&mut self, sink: &mut SaeUpdateSink, confirm_msg: &ConfirmMsg); fn handle_timeout(&mut self, sink: &mut SaeUpdateSink, timeout: Timeout); fn handle_frame(&mut self, sink: &mut SaeUpdateSink, frame: &AuthFrameRx) { match frame::parse(frame) { Ok(parse) => match parse { frame::ParseSuccess::Commit(commit) => self.handle_commit(sink, &commit), frame::ParseSuccess::Confirm(confirm) => self.handle_confirm(sink, &confirm), frame::ParseSuccess::AntiCloggingToken(_act) => { warn!("Anti-clogging tokens not yet supported"); } }, Err(e) => warn!("Failed to parse SAE auth frame: {}", e), } } } /// Creates a new SAE handshake for the given group ID and authentication parameters. pub fn new_sae_handshake( group_id: u16, akm: Akm, password: Vec<u8>, mac: MacAddr, peer_mac: MacAddr, ) -> Result<Box<dyn SaeHandshake>, Error> { let (h, cn) = match akm.suite_type { akm::SAE | akm::FT_SAE => (h, cn), _ => bail!("Cannot construct SAE handshake with AKM {:?}", akm), }; let params = internal::SaeParameters { h, cn, password, sta_a_mac: mac, sta_b_mac: peer_mac }; match group_id { 19 => { // Elliptic curve group 19 is the default supported group. let ec_group = Box::new(ecc::Group::new(EcGroupId::P256)?); Ok(Box::new(state::SaeHandshakeImpl::new(ec_group, params)?)) } _ => bail!("Unsupported SAE group id: {}", group_id), } } /// Creates a new SAE handshake in response to a first message from a peer, using the FCG indiated /// by the peer if possible. In a successful handshake, this will immediately push a Commit and /// Confirm to the given update sink. pub fn join_sae_handshake( sink: &mut SaeUpdateSink, first_frame: &AuthFrameRx, akm: Akm, password: Vec<u8>, mac: MacAddr, peer_mac: MacAddr, ) -> Result<Box<dyn SaeHandshake>, Error> { let parsed_frame = frame::parse(first_frame)?; match parsed_frame { frame::ParseSuccess::Commit(commit) => { let mut handshake = new_sae_handshake(commit.group_id, akm, password, mac, peer_mac)?; handshake.handle_commit(sink, &commit); Ok(handshake) } _ => bail!("Recieved incorrect first frame of SAE handshake"), } } // Internal mod for structs with mod-public visibility. mod internal { use super::*; /// IEEE 802.11-2016 12.4.4 /// SAE may use many different finite cyclic groups (FCGs) to compute the various values used /// during the handshake. This trait allows our SAE implementation to seamlessly handle /// different classes of FCG. IEEE 802.11-2016 defines support for both elliptic curve groups /// and finite field cryptography groups. /// /// All functions provided by this trait will only return an Error when something internal has /// gone wrong. pub trait FiniteCyclicGroup { /// Different classes of FCG have different Element types, but scalars can always be /// represented by a Bignum. type Element; fn group_id(&self) -> u16; /// IEEE 802.11-2016 12.4.3 /// Generates a new password element, a secret value shared by the two peers in SAE. fn generate_pwe(&self, params: &SaeParameters) -> Result<Self::Element, Error>; /// IEEE 12.4.4.1 /// These three operators are used to manipulate FCG elements for the purposes of the /// Diffie-Hellman key exchange used by SAE. fn scalar_op( &self, scalar: &Bignum, element: &Self::Element, ) -> Result<Self::Element, Error>; fn elem_op( &self, element1: &Self::Element, element2: &Self::Element, ) -> Result<Self::Element, Error>; fn inverse_op(&self, element: Self::Element) -> Result<Self::Element, Error>; /// Returns the prime order of the FCG. fn order(&self) -> Result<Bignum, Error>; /// IEEE 802.11-2016 12.4.5.4 /// Maps the given secret element to the shared secret value. Returns None if this is the /// identity element for this FCG, indicating that we have in invalid secret element. fn map_to_secret_value(&self, element: &Self::Element) -> Result<Option<Bignum>, Error>; /// IEEE 802.11-2016 12.4.2: The FCG Element must convert into an octet string such /// that it may be included in the confirmation hash when completing SAE. fn element_to_octets(&self, element: &Self::Element) -> Result<Vec<u8>, Error>; /// Convert octets into an element. Returns None if the given octet string does not /// contain a valid element for this group. fn element_from_octets(&self, octets: &[u8]) -> Result<Option<Self::Element>, Error>; /// Return the expected size of scalar and element values when serialized into a frame. fn scalar_size(&self) -> Result<usize, Error> { self.order().map(|order| order.len()) } fn element_size(&self) -> Result<usize, Error>; } #[derive(Clone)] pub struct SaeParameters { // IEEE 802.11-2016 12.4.2: SAE theoretically supports arbitrary H and CN functions, // although the standard only uses HMAC-SHA-256. pub h: fn(salt: &[u8], ikm: &[u8]) -> Vec<u8>, #[allow(unused)] pub cn: fn(key: &[u8], counter: u16, data: Vec<&[u8]>) -> Vec<u8>, // IEEE 802.11-2016 12.4.3 pub password: Vec<u8>, // IEEE 802.11-2016 12.4.4.2.2: The two MacAddrs are needed for generating a password seed. pub sta_a_mac: MacAddr, pub sta_b_mac: MacAddr, } impl SaeParameters { pub fn pwd_seed(&self, counter: u8) -> Vec<u8> { let (big_mac, little_mac) = match self.sta_a_mac.cmp(&self.sta_b_mac) { std::cmp::Ordering::Less => (self.sta_b_mac, self.sta_a_mac), _ => (self.sta_a_mac, self.sta_b_mac), }; let mut salt = vec![]; salt.extend_from_slice(&big_mac[..]); salt.extend_from_slice(&little_mac[..]); let mut ikm = self.password.clone(); ikm.push(counter); (self.h)(&salt[..], &ikm[..]) } } } fn h(salt: &[u8], ikm: &[u8]) -> Vec<u8> { let mut hasher = hmac::HmacSha256::new(salt); hasher.update(ikm); hasher.finish().bytes().to_vec() } fn cn(key: &[u8], counter: u16, data: Vec<&[u8]>) -> Vec<u8> { let mut hasher = hmac::HmacSha256::new(key); hasher.update(&counter.to_le_bytes()[..]); for data_part in data { hasher.update(data_part); } hasher.finish().bytes().to_vec() } #[cfg(test)] mod tests { use { super::{internal::*, *}, hex::FromHex, wlan_common::assert_variant, }; // IEEE 802.11-2016 Annex J.10 SAE test vector const TEST_PWD: &'static str = "thisisreallysecret"; const TEST_STA_A: MacAddr = [0x7b, 0x88, 0x56, 0x20, 0x2d, 0x8d]; const TEST_STA_B: MacAddr = [0xe2, 0x47, 0x1c, 0x0a, 0x5a, 0xcb]; const TEST_PWD_SEED: [u8; 32] = [ 0x69, 0xf6, 0x90, 0x99, 0x83, 0x67, 0x53, 0x92, 0xd0, 0xa3, 0xa8, 0x82, 0x47, 0xff, 0xef, 0x20, 0x41, 0x3e, 0xe9, 0x72, 0x15, 0x87, 0x29, 0x42, 0x44, 0x15, 0xe1, 0x39, 0x46, 0xec, 0xc2, 0x06, ]; #[test] fn pwd_seed() { let params = SaeParameters { h, cn, password: Vec::from(TEST_PWD), sta_a_mac: TEST_STA_A, sta_b_mac: TEST_STA_B, }; let seed = params.pwd_seed(1); assert_eq!(&seed[..], &TEST_PWD_SEED[..]); } #[test] fn symmetric_pwd_seed() { let params = SaeParameters { h, cn, password: Vec::from(TEST_PWD), // The password seed should not change depending on the order of mac addresses. sta_a_mac: TEST_STA_B, sta_b_mac: TEST_STA_A, }; let seed = params.pwd_seed(1); assert_eq!(&seed[..], &TEST_PWD_SEED[..]); } #[test] fn bad_akm() { let akm = Akm::new_dot11(akm::PSK); let res = new_sae_handshake(19, akm, Vec::from(TEST_PWD), TEST_STA_A, TEST_STA_B); assert!(res.is_err()); assert!(format!("{}", res.err().unwrap()) .contains("Cannot construct SAE handshake with AKM 00-0F-AC:2")); } #[test] fn bad_fcg() { let akm = Akm::new_dot11(akm::SAE); let res = new_sae_handshake(200, akm, Vec::from(TEST_PWD), TEST_STA_A, TEST_STA_B); assert!(res.is_err()); assert!(format!("{}", res.err().unwrap()).contains("Unsupported SAE group id: 200")); } struct TestHandshake { sta1: Box<dyn SaeHandshake>, sta2: Box<dyn SaeHandshake>, } // Helper structs for differentiating Commit/Confirm messages once they've been converted into // generic auth frames. #[derive(Clone, Eq, PartialEq, Debug)] struct CommitTx(AuthFrameTx); #[derive(Clone, Eq, PartialEq, Debug)] struct ConfirmTx(AuthFrameTx); struct CommitRx<'a>(AuthFrameRx<'a>); struct ConfirmRx<'a>(AuthFrameRx<'a>); fn to_rx(frame: &AuthFrameTx) -> AuthFrameRx { AuthFrameRx { seq: frame.seq, status_code: frame.status_code, body: &frame.body[..] } } impl CommitTx { fn to_rx(&self) -> CommitRx { CommitRx(to_rx(&self.0)) } } impl ConfirmTx { fn to_rx(&self) -> ConfirmRx { ConfirmRx(to_rx(&self.0)) } } impl<'a> CommitRx<'a> { fn msg(&'a self) -> CommitMsg<'a> { assert_variant!(frame::parse(&self.0), Ok(frame::ParseSuccess::Commit(commit)) => commit) } } impl<'a> ConfirmRx<'a> { fn msg(&'a self) -> ConfirmMsg<'a> { assert_variant!(frame::parse(&self.0), Ok(frame::ParseSuccess::Confirm(confirm)) => confirm) } } fn expect_commit(sink: &mut Vec<SaeUpdate>) -> CommitTx { let mut commit = assert_variant!(sink.remove(0), SaeUpdate::SendFrame(frame) => frame); assert_variant!(frame::parse(&to_rx(&commit)), Ok(frame::ParseSuccess::Commit(msg))); CommitTx(commit) } fn expect_confirm(sink: &mut Vec<SaeUpdate>) -> ConfirmTx { let mut confirm = assert_variant!(sink.remove(0), SaeUpdate::SendFrame(frame) => frame); assert_variant!(frame::parse(&to_rx(&confirm)), Ok(frame::ParseSuccess::Confirm(msg))); ConfirmTx(confirm) } // Test helper to advance through successful steps of an SAE handshake. impl TestHandshake { fn new() -> Self { let akm = Akm::new_dot11(akm::SAE); let mut sta1 = new_sae_handshake(19, akm.clone(), Vec::from(TEST_PWD), TEST_STA_A, TEST_STA_B) .unwrap(); let mut sta2 = new_sae_handshake(19, akm, Vec::from(TEST_PWD), TEST_STA_B, TEST_STA_A).unwrap(); Self { sta1, sta2 } } fn sta1_init(&mut self) -> CommitTx { let mut sink = vec![]; self.sta1.initiate_sae(&mut sink); assert_eq!(sink.len(), 2); let commit = expect_commit(&mut sink); assert_variant!(sink.remove(0), SaeUpdate::ResetTimeout(Timeout::Retransmission)); commit } fn sta2_handle_commit(&mut self, commit1: CommitRx) -> (CommitTx, ConfirmTx) { let mut sink = vec![]; self.sta2.handle_commit(&mut sink, &commit1.msg()); assert_eq!(sink.len(), 3); let commit2 = expect_commit(&mut sink); let confirm2 = expect_confirm(&mut sink); assert_variant!(sink.remove(0), SaeUpdate::ResetTimeout(Timeout::Retransmission)); (commit2, confirm2) } fn sta1_handle_commit(&mut self, commit2: CommitRx) -> ConfirmTx { let mut sink = vec![]; self.sta1.handle_commit(&mut sink, &commit2.msg()); assert_eq!(sink.len(), 2); let confirm1 = expect_confirm(&mut sink); assert_variant!(sink.remove(0), SaeUpdate::ResetTimeout(Timeout::Retransmission)); confirm1 } fn sta1_handle_confirm(&mut self, confirm2: ConfirmRx) -> Key { Self::__internal_handle_confirm(&mut self.sta1, confirm2.msg()) } fn sta2_handle_confirm(&mut self, confirm1: ConfirmRx) -> Key { Self::__internal_handle_confirm(&mut self.sta2, confirm1.msg()) } fn __internal_handle_confirm(sta: &mut Box<dyn SaeHandshake>, confirm: ConfirmMsg) -> Key { let mut sink = vec![]; sta.handle_confirm(&mut sink, &confirm); assert_eq!(sink.len(), 3); assert_variant!(sink.remove(0), SaeUpdate::CancelTimeout(Timeout::Retransmission)); assert_variant!(sink.remove(0), SaeUpdate::ResetTimeout(Timeout::KeyExpiration)); assert_variant!(sink.remove(0), SaeUpdate::Complete(key) => key) } } #[test] fn sae_handshake_success() { let mut handshake = TestHandshake::new(); let commit1 = handshake.sta1_init(); let (commit2, confirm2) = handshake.sta2_handle_commit(commit1.to_rx()); let confirm1 = handshake.sta1_handle_commit(commit2.to_rx()); let key1 = handshake.sta1_handle_confirm(confirm2.to_rx()); let key2 = handshake.sta2_handle_confirm(confirm1.to_rx()); assert_eq!(key1, key2); } #[test] fn password_mismatch() { let akm = Akm::new_dot11(akm::SAE); let mut sta1 = new_sae_handshake(19, akm.clone(), Vec::from(TEST_PWD), TEST_STA_A, TEST_STA_B) .unwrap(); let mut sta2 = new_sae_handshake(19, akm, Vec::from("other_pwd"), TEST_STA_B, TEST_STA_A).unwrap(); let mut handshake = TestHandshake { sta1, sta2 }; let commit1 = handshake.sta1_init(); let (commit2, confirm2) = handshake.sta2_handle_commit(commit1.to_rx()); let confirm1 = handshake.sta1_handle_commit(commit2.to_rx()); let mut sink1 = vec![]; handshake.sta1.handle_confirm(&mut sink1, &confirm2.to_rx().msg()); let mut sink2 = vec![]; handshake.sta2.handle_confirm(&mut sink2, &confirm1.to_rx().msg()); // The confirm is dropped both ways. assert_eq!(sink1.len(), 0); assert_eq!(sink2.len(), 0); } #[test] fn retry_commit_on_unexpected_confirm() { let mut handshake = TestHandshake::new(); let commit1 = handshake.sta1_init(); let (commit2, confirm2) = handshake.sta2_handle_commit(commit1.clone().to_rx()); let mut sink = vec![]; handshake.sta1.handle_confirm(&mut sink, &confirm2.to_rx().msg()); assert_eq!(sink.len(), 2); let commit1_retry = expect_commit(&mut sink); assert_variant!(sink.remove(0), SaeUpdate::ResetTimeout(Timeout::Retransmission)); // We retransmit the same commit in response to a faulty confirm. assert_eq!(commit1, commit1_retry); } #[test] fn ignore_wrong_confirm() { let mut handshake = TestHandshake::new(); let commit1 = handshake.sta1_init(); let (commit2, confirm2) = handshake.sta2_handle_commit(commit1.to_rx()); let confirm1 = handshake.sta1_handle_commit(commit2.to_rx()); let mut sink = vec![]; let mut confirm2_wrong = ConfirmTx(frame::write_confirm(1, &[1; 32][..])); handshake.sta1.handle_confirm(&mut sink, &confirm2_wrong.to_rx().msg()); assert_eq!(sink.len(), 0); // Ignored. // STA1 should still be able to handle a subsequent correct confirm. handshake.sta1_handle_confirm(confirm2.to_rx()); } #[test] fn handle_resent_commit() { let mut handshake = TestHandshake::new(); let commit1 = handshake.sta1_init(); let (commit2, confirm2) = handshake.sta2_handle_commit(commit1.clone().to_rx()); let (commit2_retry, confirm2_retry) = handshake.sta2_handle_commit(commit1.to_rx()); // The resent commit message should be unchanged, but the resent confirm should increment // sc and produce a different value. assert_eq!(commit2, commit2_retry); assert_eq!(confirm2.to_rx().msg().send_confirm, 1); assert_eq!(confirm2_retry.to_rx().msg().send_confirm, 2); assert!(confirm2.to_rx().msg().confirm != confirm2_retry.to_rx().msg().confirm); // Now complete the handshake. let confirm1 = handshake.sta1_handle_commit(commit2_retry.to_rx()); let key1 = handshake.sta1_handle_confirm(confirm2_retry.to_rx()); let key2 = handshake.sta2_handle_confirm(confirm1.to_rx()); assert_eq!(key1, key2); } #[test] fn completed_handshake_handles_resent_confirm() { let mut handshake = TestHandshake::new(); let commit1 = handshake.sta1_init(); let (commit2, confirm2) = handshake.sta2_handle_commit(commit1.clone().to_rx()); let (commit2_retry, confirm2_retry) = handshake.sta2_handle_commit(commit1.to_rx()); // Send STA1 the second confirm message first. let confirm1 = handshake.sta1_handle_commit(commit2.to_rx()); let key1 = handshake.sta1_handle_confirm(confirm2.clone().to_rx()); // STA1 should respond to the second confirm with its own confirm. let mut sink = vec![]; handshake.sta1.handle_confirm(&mut sink, &confirm2_retry.to_rx().msg()); assert_eq!(sink.len(), 1); let confirm1_retry = expect_confirm(&mut sink); assert!(confirm1.to_rx().msg().confirm != confirm1_retry.to_rx().msg().confirm); assert_eq!(confirm1_retry.to_rx().msg().send_confirm, u16::max_value()); // STA2 should complete the handshake with the resent confirm. let key2 = handshake.sta2_handle_confirm(confirm1_retry.to_rx()); assert_eq!(key1, key2); // STA1 should silently drop either of our confirm frames now. handshake.sta1.handle_confirm(&mut sink, &confirm2_retry.to_rx().msg()); assert!(sink.is_empty()); handshake.sta1.handle_confirm(&mut sink, &confirm2.to_rx().msg()); assert!(sink.is_empty()); // STA1 should also silently drop an incorrect confirm, even if send_confirm is incremented. let confirm2_wrong = ConfirmMsg { send_confirm: 10, confirm: &[0xab; 32][..] }; handshake.sta1.handle_confirm(&mut sink, &confirm2_wrong); assert!(sink.is_empty()); } #[test] fn completed_handshake_ignores_commit() { let mut handshake = TestHandshake::new(); let commit1 = handshake.sta1_init(); let (commit2, confirm2) = handshake.sta2_handle_commit(commit1.to_rx()); handshake.sta1_handle_commit(commit2.to_rx()); handshake.sta1_handle_confirm(confirm2.clone().to_rx()); // STA1 has completed it's side of the handshake. let mut sink = vec![]; handshake.sta1.handle_confirm(&mut sink, &confirm2.to_rx().msg()); assert!(sink.is_empty()); } #[test] fn bad_first_commit_rejects_auth() { let mut handshake = TestHandshake::new(); let commit1_wrong = CommitMsg { group_id: 19, scalar: &[0xab; 32][..], element: &[0xcd; 64][..], token: None, }; let mut sink = vec![]; handshake.sta1.handle_commit(&mut sink, &commit1_wrong); assert_eq!(sink.len(), 1); assert_variant!(sink.remove(0), SaeUpdate::Reject(RejectReason::AuthFailed)); } #[test] fn bad_second_commit_ignored() { let mut handshake = TestHandshake::new(); let mut commit1 = handshake.sta1_init(); let (_commit1, _confirm2) = handshake.sta2_handle_commit(commit1.to_rx()); let commit2_wrong = CommitMsg { group_id: 19, scalar: &[0xab; 32][..], element: &[0xcd; 64][..], token: None, }; let mut sink = vec![]; handshake.sta1.handle_commit(&mut sink, &commit2_wrong); assert_eq!(sink.len(), 0); } #[test] fn reflected_commit_discarded() { let mut handshake = TestHandshake::new(); let mut commit1 = handshake.sta1_init(); let mut sink = vec![]; handshake.sta1.handle_commit(&mut sink, &commit1.to_rx().msg()); assert_eq!(sink.len(), 1); assert_variant!(sink.remove(0), SaeUpdate::ResetTimeout(Timeout::Retransmission)); } #[test] fn maximum_commit_retries() { let mut handshake = TestHandshake::new(); let mut commit1 = handshake.sta1_init(); let (commit2, confirm2) = handshake.sta2_handle_commit(commit1.clone().to_rx()); // STA2 should allow DOT11_RSNA_SAE_SYNC retry operations before giving up. for i in 0..DOT11_RSNA_SAE_SYNC { let (commit2_retry, confirm2_retry) = handshake.sta2_handle_commit(commit1.clone().to_rx()); assert_eq!(commit2, commit2_retry); assert_eq!(confirm2_retry.to_rx().msg().send_confirm, i + 2); } // The last straw! let mut sink = vec![]; handshake.sta2.handle_commit(&mut sink, &commit1.to_rx().msg()); assert_eq!(sink.len(), 1); assert_variant!(sink.remove(0), SaeUpdate::Reject(RejectReason::TooManyRetries)); } #[test] fn completed_exchange_fails_after_retries() { let mut handshake = TestHandshake::new(); let mut commit1 = handshake.sta1_init(); let (commit2, confirm2) = handshake.sta2_handle_commit(commit1.clone().to_rx()); // STA2 should allow DOT11_RSNA_SAE_SYNC retry operations before giving up. We subtract 1 // here for the reason explained in the note below. for i in 0..(DOT11_RSNA_SAE_SYNC - 1) { let (commit2_retry, confirm2_retry) = handshake.sta2_handle_commit(commit1.clone().to_rx()); assert_eq!(commit2, commit2_retry); assert_eq!(confirm2_retry.to_rx().msg().send_confirm, i + 2); } let mut sink = vec![]; // Generate 3 different confirm messages for our testing... let confirm1_sc1 = handshake.sta1_handle_commit(commit2.clone().to_rx()); handshake.sta1.handle_commit(&mut sink, &commit2.to_rx().msg()); assert_eq!(sink.len(), 3); sink.remove(0); let confirm1_sc2 = expect_confirm(&mut sink); sink.clear(); let confirm1_invalid = ConfirmMsg { send_confirm: 3, confirm: &[0xab; 32][..] }; // STA2 completes the handshake. However, one more indication that STA1 is misbehaving will // immediately kill the authentication. handshake.sta2_handle_confirm(confirm1_sc1.clone().to_rx()); // NOTE: We run all of the operations here two times. This is because of a quirk in the SAE // state machine: while only certain operations *increment* sync, all invalid operations // will *check* sync. We can test whether sync is being incremented by running twice to see // if this pushes us over the DOT11_RSNA_SAE_SYNC threshold. // STA2 ignores commits. handshake.sta2.handle_commit(&mut sink, &commit1.to_rx().msg()); handshake.sta2.handle_commit(&mut sink, &commit1.to_rx().msg()); assert_eq!(sink.len(), 0); // STA2 ignores invalid confirm. handshake.sta2.handle_confirm(&mut sink, &confirm1_invalid); handshake.sta2.handle_confirm(&mut sink, &confirm1_invalid); assert_eq!(sink.len(), 0); // STA2 ignores old confirm. handshake.sta2.handle_confirm(&mut sink, &confirm1_sc1.to_rx().msg()); handshake.sta2.handle_confirm(&mut sink, &confirm1_sc1.to_rx().msg()); assert_eq!(sink.len(), 0); // But another valid confirm increments sync! handshake.sta2.handle_confirm(&mut sink, &confirm1_sc2.to_rx().msg()); assert_eq!(sink.len(), 1); expect_confirm(&mut sink); handshake.sta2.handle_confirm(&mut sink, &confirm1_sc2.to_rx().msg()); assert_eq!(sink.len(), 1); assert_variant!(sink.remove(0), SaeUpdate::Reject(RejectReason::TooManyRetries)); } }
39.98169
100
0.625216
ddfe338a76435cdb974d73be95dfc7bdb7410c48
2,488
use super::datasource_provider::DatasourceProvider; use crate::common::provider_names::*; use datamodel_connector::Connector; use mongodb_datamodel_connector::MongoDbDatamodelConnector; use sql_datamodel_connector::SqlDatamodelConnectors; pub struct SqliteDatasourceProvider; impl DatasourceProvider for SqliteDatasourceProvider { fn is_provider(&self, provider: &str) -> bool { provider == SQLITE_SOURCE_NAME } fn canonical_name(&self) -> &str { SQLITE_SOURCE_NAME } fn connector(&self) -> &'static dyn Connector { SqlDatamodelConnectors::SQLITE } } pub struct CockroachDbDatasourceProvider; impl DatasourceProvider for CockroachDbDatasourceProvider { fn is_provider(&self, provider: &str) -> bool { provider == COCKROACHDB_SOURCE_NAME } fn canonical_name(&self) -> &str { COCKROACHDB_SOURCE_NAME } fn connector(&self) -> &'static dyn Connector { SqlDatamodelConnectors::POSTGRES } } pub struct PostgresDatasourceProvider; impl DatasourceProvider for PostgresDatasourceProvider { fn is_provider(&self, provider: &str) -> bool { provider == POSTGRES_SOURCE_NAME || provider == POSTGRES_SOURCE_NAME_HEROKU } fn canonical_name(&self) -> &str { POSTGRES_SOURCE_NAME } fn connector(&self) -> &'static dyn Connector { SqlDatamodelConnectors::POSTGRES } } pub struct MySqlDatasourceProvider; impl DatasourceProvider for MySqlDatasourceProvider { fn is_provider(&self, provider: &str) -> bool { provider == MYSQL_SOURCE_NAME } fn canonical_name(&self) -> &str { MYSQL_SOURCE_NAME } fn connector(&self) -> &'static dyn Connector { SqlDatamodelConnectors::MYSQL } } pub struct MsSqlDatasourceProvider; impl DatasourceProvider for MsSqlDatasourceProvider { fn is_provider(&self, provider: &str) -> bool { provider == MSSQL_SOURCE_NAME } fn canonical_name(&self) -> &str { MSSQL_SOURCE_NAME } fn connector(&self) -> &'static dyn Connector { SqlDatamodelConnectors::MSSQL } } pub struct MongoDbDatasourceProvider; impl DatasourceProvider for MongoDbDatasourceProvider { fn is_provider(&self, provider: &str) -> bool { provider == MONGODB_SOURCE_NAME } fn canonical_name(&self) -> &str { MONGODB_SOURCE_NAME } fn connector(&self) -> &'static dyn Connector { &MongoDbDatamodelConnector } }
24.392157
83
0.690113
0ac3ea6ebf8881d0a06fddc6716fb5930835960c
471
// !!! THIS IS A GENERATED FILE !!! // ANY MANUAL EDITS MAY BE OVERWRITTEN AT ANY TIME // Files autogenerated with cargo build (build/wasitests.rs). #[test] fn test_fd_append() { assert_wasi_output!( "../../wasitests/fd_append.wasm", "fd_append", vec![], vec![( ".".to_string(), ::std::path::PathBuf::from("wasitests/test_fs/temp") ),], vec![], "../../wasitests/fd_append.out" ); }
24.789474
64
0.537155
917411d4c79a8490c88961bd974043fa069fc42e
14,928
#![feature(proc_macro_hygiene, decl_macro)] #![cfg_attr(test, deny(warnings))] #[macro_use] extern crate lazy_static; extern crate rand; extern crate reqwest; extern crate serde_json; #[macro_use] extern crate rocket; extern crate rust_team_data; extern crate sass_rs; extern crate siphasher; extern crate toml; extern crate rocket_contrib; #[macro_use] extern crate serde; extern crate fluent_bundle; extern crate regex; extern crate handlebars; mod cache; mod caching; mod category; mod headers; mod i18n; mod production; mod redirect; mod rust_version; mod sponsors; mod teams; use production::User; use teams::encode_zulip_stream; use std::collections::hash_map::DefaultHasher; use std::env; use std::fs; use std::hash::Hasher; use std::path::{Path, PathBuf}; use rand::seq::SliceRandom; use rocket::{ http::{RawStr, Status}, request::{FromParam, Request}, response::{NamedFile, Redirect}, }; use rocket_contrib::templates::Template; use sass_rs::{compile_file, Options}; use category::Category; use caching::{Cached, Caching}; use handlebars_fluent::{loader::Loader, FluentHelper}; use i18n::{create_loader, LocaleInfo, SupportedLocale, TeamHelper, EXPLICIT_LOCALE_INFO}; use rocket::http::hyper::header::CacheDirective; const ZULIP_DOMAIN: &str = "https://rust-lang.zulipchat.com"; lazy_static! { static ref ASSETS: AssetFiles = { let app_css_file = compile_sass("app"); let fonts_css_file = compile_sass("fonts"); let vendor_css_file = concat_vendor_css(vec!["tachyons"]); let app_js_file = concat_app_js(vec!["tools-install"]); AssetFiles { css: CSSFiles { app: app_css_file, fonts: fonts_css_file, vendor: vendor_css_file, }, js: JSFiles { app: app_js_file }, } }; static ref PONTOON_ENABLED: bool = env::var("RUST_WWW_PONTOON").is_ok(); } #[derive(Serialize)] struct Context<T: ::serde::Serialize> { page: String, title: String, parent: &'static str, is_landing: bool, data: T, lang: String, baseurl: String, pontoon_enabled: bool, assets: &'static AssetFiles, locales: &'static [LocaleInfo], is_translation: bool, } impl<T: ::serde::Serialize> Context<T> { fn new(page: String, title_id: &str, is_landing: bool, data: T, lang: String) -> Self { let helper = create_loader(); let title = if title_id.is_empty() { "".into() } else { helper.lookup(&lang, title_id, None) }; Self { page, title, parent: LAYOUT, is_landing, data, baseurl: baseurl(&lang), is_translation: lang != "en-US", lang, pontoon_enabled: *PONTOON_ENABLED, assets: &ASSETS, locales: EXPLICIT_LOCALE_INFO, } } } #[derive(Clone, Serialize)] struct CSSFiles { app: String, fonts: String, vendor: String, } #[derive(Clone, Serialize)] struct JSFiles { app: String, } #[derive(Clone, Serialize)] struct AssetFiles { css: CSSFiles, js: JSFiles, } static LAYOUT: &str = "components/layout"; static ENGLISH: &str = "en-US"; fn baseurl(lang: &str) -> String { if lang == "en-US" { String::new() } else { format!("/{}", lang) } } #[get("/components/<_file..>", rank = 1)] fn components(_file: PathBuf) -> Template { not_found_locale(ENGLISH.into()) } #[get("/<locale>/components/<_file..>", rank = 11)] fn components_locale(locale: SupportedLocale, _file: PathBuf) -> Template { not_found_locale(locale.0) } #[get("/logos/<file..>", rank = 1)] fn logos(file: PathBuf) -> Option<Cached<NamedFile>> { NamedFile::open(Path::new("static/logos").join(file)) .ok() .map(|file| file.cached(vec![CacheDirective::MaxAge(3600)])) } #[get("/static/<file..>", rank = 1)] fn files(file: PathBuf) -> Option<Cached<NamedFile>> { NamedFile::open(Path::new("static/").join(file)) .ok() .map(|file| file.cached(vec![CacheDirective::MaxAge(3600)])) } #[get("/")] fn index() -> Template { render_index(ENGLISH.into()) } #[get("/<locale>", rank = 3)] fn index_locale(locale: SupportedLocale) -> Template { render_index(locale.0) } #[get("/<category>")] fn category(category: Category) -> Template { render_category(category, ENGLISH.into()) } #[get("/<locale>/<category>", rank = 11)] fn category_locale(category: Category, locale: SupportedLocale) -> Template { render_category(category, locale.0) } #[get("/governance")] fn governance() -> Result<Template, Status> { render_governance(ENGLISH.into()) } #[get("/governance/<section>/<team>", rank = 2)] fn team(section: String, team: String) -> Result<Template, Result<Redirect, Status>> { render_team(section, team, ENGLISH.into()) } #[get("/<locale>/governance", rank = 10)] fn governance_locale(locale: SupportedLocale) -> Result<Template, Status> { render_governance(locale.0) } #[get("/<locale>/governance/<section>/<team>", rank = 12)] fn team_locale( section: String, team: String, locale: SupportedLocale, ) -> Result<Template, Result<Redirect, Status>> { render_team(section, team, locale.0) } #[get("/production/users")] fn production() -> Template { render_production(ENGLISH.into()) } #[get("/<locale>/production/users", rank = 10)] fn production_locale(locale: SupportedLocale) -> Template { render_production(locale.0) } #[get("/sponsors")] fn sponsors() -> Template { render_sponsors(ENGLISH.into()) } #[get("/<locale>/sponsors", rank = 10)] fn sponsors_locale(locale: SupportedLocale) -> Template { render_sponsors(locale.0) } #[get("/<category>/<subject>", rank = 4)] fn subject(category: Category, subject: String) -> Result<Template, Status> { render_subject(category, subject, ENGLISH.into()) } #[get("/<locale>/<category>/<subject>", rank = 14)] fn subject_locale( category: Category, subject: String, locale: SupportedLocale, ) -> Result<Template, Status> { render_subject(category, subject, locale.0) } fn load_users_data() -> Vec<Vec<User>> { let mut rng = rand::thread_rng(); let mut users = production::get_info().expect("couldn't get production users data"); users.shuffle(&mut rng); users.chunks(3).map(|s| s.to_owned()).collect() } #[get("/<dest>", rank = 19)] fn redirect(dest: redirect::Destination) -> Redirect { Redirect::permanent(dest.uri) } #[get("/pdfs/<dest>")] fn redirect_pdfs(dest: redirect::Destination) -> Redirect { Redirect::permanent("/static/pdfs/".to_owned() + dest.uri) } #[get("/en-US", rank = 1)] fn redirect_bare_en_us() -> Redirect { Redirect::permanent("/") } #[get("/<_locale>", rank = 20)] fn redirect_bare_locale(_locale: redirect::Locale) -> Redirect { Redirect::temporary("/") } #[get("/en-US/<dest>", rank = 1)] fn redirect_en_us(dest: redirect::Destination) -> Redirect { Redirect::permanent(dest.uri) } #[get("/<_locale>/<dest>", rank = 20)] fn redirect_locale(_locale: redirect::Locale, dest: redirect::Destination) -> Redirect { // Temporary until locale support is restored. Redirect::temporary(dest.uri) } #[catch(404)] fn not_found(req: &Request) -> Template { let lang = if let Some(next) = req.uri().segments().next() { if let Ok(lang) = SupportedLocale::from_param(RawStr::from_str(next)) { lang.0 } else { ENGLISH.into() } } else { ENGLISH.into() }; not_found_locale(lang) } fn not_found_locale(lang: String) -> Template { let page = "404"; let context = Context::new("404".into(), "error404-page-title", false, (), lang); Template::render(page, &context) } #[catch(500)] fn catch_error() -> Template { not_found_locale(ENGLISH.into()) } fn hash_css(css: &str) -> String { let mut hasher = DefaultHasher::new(); hasher.write(css.as_bytes()); hasher.finish().to_string() } fn compile_sass(filename: &str) -> String { let scss_file = format!("./src/styles/{}.scss", filename); let css = compile_file(&scss_file, Options::default()) .unwrap_or_else(|_| panic!("couldn't compile sass: {}", &scss_file)); let css_sha = format!("{}_{}", filename, hash_css(&css)); let css_file = format!("./static/styles/{}.css", css_sha); fs::write(&css_file, css.into_bytes()) .unwrap_or_else(|_| panic!("couldn't write css file: {}", &css_file)); String::from(&css_file[1..]) } fn concat_vendor_css(files: Vec<&str>) -> String { let mut concatted = String::new(); for filestem in files { let vendor_path = format!("./static/styles/{}.css", filestem); let contents = fs::read_to_string(vendor_path).expect("couldn't read vendor css"); concatted.push_str(&contents); } let css_sha = format!("vendor_{}", hash_css(&concatted)); let css_path = format!("./static/styles/{}.css", &css_sha); fs::write(&css_path, &concatted).expect("couldn't write vendor css"); String::from(&css_path[1..]) } fn concat_app_js(files: Vec<&str>) -> String { let mut concatted = String::new(); for filestem in files { let vendor_path = format!("./static/scripts/{}.js", filestem); let contents = fs::read_to_string(vendor_path).expect("couldn't read app js"); concatted.push_str(&contents); } let js_sha = format!("app_{}", hash_css(&concatted)); let js_path = format!("./static/scripts/{}.js", &js_sha); fs::write(&js_path, &concatted).expect("couldn't write app js"); String::from(&js_path[1..]) } fn render_index(lang: String) -> Template { #[derive(Serialize)] struct IndexData { rust_version: String, rust_release_post: String, } let page = "index".to_string(); let data = IndexData { rust_version: rust_version::rust_version().unwrap_or_else(String::new), rust_release_post: rust_version::rust_release_post() .map_or_else(String::new, |v| format!("https://blog.rust-lang.org/{}", v)), }; let context = Context::new(page.clone(), "", true, data, lang); Template::render(page, &context) } fn render_category(category: Category, lang: String) -> Template { let page = category.index(); let title_id = format!("{}-page-title", category.name()); let context = Context::new(category.name().to_string(), &title_id, false, (), lang); Template::render(page, &context) } fn render_production(lang: String) -> Template { let page = "production/users".to_string(); let context = Context::new( page.clone(), "production-users-page-title", false, load_users_data(), lang, ); Template::render(page, &context) } fn render_sponsors(lang: String) -> Template { let page = "sponsors/index".to_string(); let context = Context::new( page.clone(), "sponsors-page-title", false, sponsors::render_data(&lang), lang, ); Template::render(page, &context) } fn render_governance(lang: String) -> Result<Template, Status> { match teams::index_data() { Ok(data) => { let page = "governance/index".to_string(); let context = Context::new(page.clone(), "governance-page-title", false, data, lang); Ok(Template::render(page, &context)) } Err(err) => { eprintln!("error while loading the governance page: {}", err); Err(Status::InternalServerError) } } } fn render_team( section: String, team: String, lang: String, ) -> Result<Template, Result<Redirect, Status>> { match teams::page_data(&section, &team) { Ok(data) => { let page = "governance/group".to_string(); let name = format!("governance-team-{}-name", data.team.name); let context = Context::new(page.clone(), &name, false, data, lang); Ok(Template::render(page, &context)) } Err(err) => { if err.is::<teams::TeamNotFound>() { match (section.as_str(), team.as_str()) { // Old teams URLs ("teams", "language-and-compiler") | ("teams", "operations") => { Err(Ok(Redirect::temporary("/governance"))) } _ => Err(Err(Status::NotFound)), } } else { eprintln!("error while loading the team page: {}", err); Err(Err(Status::InternalServerError)) } } } } fn render_subject(category: Category, subject: String, lang: String) -> Result<Template, Status> { // Rocket's Template::render method is not really designed to accept arbitrary templates: if a // template is missing, it just returns a Status::InternalServerError, without a way to // distinguish it from a syntax error in the template itself. // // To work around the problem we check whether the template exists beforehand. let path = Path::new("templates") .join(category.name()) .join(format!("{}.hbs", subject)); if !path.is_file() { return Err(Status::NotFound); } let page = format!("{}/{}", category.name(), subject.as_str()); let title_id = format!("{}-{}-page-title", category.name(), subject); let context = Context::new(subject, &title_id, false, (), lang); Ok(Template::render(page, &context)) } fn main() { let templating = Template::custom(|engine| { engine .handlebars .register_helper("fluent", Box::new(FluentHelper::new(create_loader()))); engine .handlebars .register_helper("team-text", Box::new(TeamHelper::new())); engine .handlebars .register_helper("encode-zulip-stream", Box::new(encode_zulip_stream)); }); rocket::ignite() .attach(templating) .attach(headers::InjectHeaders) .mount( "/", routes![ index, category, governance, team, production, sponsors, subject, files, logos, components, index_locale, category_locale, governance_locale, team_locale, production_locale, sponsors_locale, subject_locale, components_locale, redirect, redirect_pdfs, redirect_bare_en_us, redirect_bare_locale, redirect_en_us, redirect_locale ], ) .register(catchers![not_found, catch_error]) .launch(); }
28.06015
98
0.605506
f49c18302fbcedfc29cfb57f861360257657be89
2,367
fn main() { println!("Hello, world!"); let words = vec![String::from("time"), String::from("time"), String::from("time"), String::from("time")]; let a = Solution::minimum_length_encoding(words); println!("{}",a); } pub struct Solution{} pub struct Solution1{} impl Solution { pub fn minimum_length_encoding(words: Vec<String>) -> i32 { let mut words_index: Vec<Vec<u8>> = Vec::new(); let mut all = 0; let mut same = 0; for i in words { let mut index = vec![]; let temp = i.chars(); for j in temp { index.push(j as u8); } index.reverse(); all += index.len()+1; words_index.push(index); } words_index.sort_unstable_by(|a,b| a.len().partial_cmp(&b.len()).unwrap()); 'outer: for i in 0..words_index.len()-1 { 'middle: for k in 0..(words_index.len()-i-1) { 'inner: for j in 0..words_index[i].len() { if words_index[i][j] != words_index[i+1+k][j] { continue 'middle; } } same += words_index[i].len()+1; continue 'outer; } } (all-same) as i32 } } impl Solution1 { pub fn minimum_length_encoding(words: Vec<String>) -> i32 { let mut words_index: Vec<Vec<char>> = Vec::new(); let mut all = 0; let mut same = 0; for i in words { let mut index = vec![]; let temp = i.chars(); for j in temp { index.push(j); } index.reverse(); all += index.len()+1; words_index.push(index); } words_index.sort_unstable_by(|a,b| a.len().partial_cmp(&b.len()).unwrap()); 'outer: for i in 0..words_index.len()-1 { 'middle: for k in 0..(words_index.len()-i-1) { 'inner: for j in 0..words_index[i].len() { if words_index[i][j] != words_index[i+1+k][j] { continue 'middle; } } same += words_index[i].len()+1; continue 'outer; } } (all-same) as i32 } }
30.74026
109
0.445289
f4caf6af7b48c07cb12a6bfde4edcd58fdd77369
2,344
use std::env; use std::error::Error; use std::fs; pub struct Config { pub query: String, pub filename: String, pub case_sensitive: bool, } impl Config { pub fn new(mut args: env::Args) -> Result<Config, &'static str> { // parse and return configuration arguments // discard first arg since it returns name of program which is not useful args.next(); let query = match args.next() { Some(arg) => arg, None => return Err("Didn't get a query string"), }; let filename = match args.next() { Some(arg) => arg, None => return Err("Didn't get a file name"), }; let case_sensitive = env::var("CASE_INSENSITIVE").is_err(); Ok(Config { query, filename, case_sensitive, }) } } pub fn run(config: Config) -> Result<(), Box<dyn Error>> { // Read file. let contents = fs::read_to_string(config.filename)?; let results = if config.case_sensitive { search(&config.query, &contents) } else { search_case_insensitive(&config.query, &contents) }; for line in results { println!("{}", line); } Ok(()) } pub fn search<'a>(query: &str, contents: &'a str) -> Vec<&'a str> { // handle case sensitive character search. contents .lines() .filter(|line| line.contains(query)) .collect() } pub fn search_case_insensitive<'a>(query: &str, contents: &'a str) -> Vec<&'a str> { // perform case insensitive search. let query = query.to_lowercase(); contents .lines() .filter(|line| line.to_lowercase().contains(&query)) .collect() } #[cfg(test)] mod tests { use super::*; #[test] fn case_sensitive() { let query = "duct"; let contents = "\ Rust: safe, fast, productive. Pick three. Duct tape."; assert_eq!(vec!["safe, fast, productive."], search(query, contents)); assert_ne!(vec!["Pick three."], search(query, contents)) } #[test] fn case_insensitive() { let query = "rUsT"; let contents = "\ Rust: safe, fast, productive. Pick three. Trust me."; assert_eq!( vec!["Rust:", "Trust me."], search_case_insensitive(query, contents) ) } }
22.757282
84
0.556741
cc63cb5bd233fb5b373b5b894e44e2efbe2ef21d
4,368
use std::fs; use std::path::PathBuf; use anyhow::{Context, Result}; use dialects::DialectName; use clap::{App, Arg}; use move_executor::compile_and_execute_script; use shared::errors::ExecCompilerError; use shared::results::ExecutionError; use std::str::FromStr; use utils::{io, leaked_fpath, FilesSourceText, MoveFilePath}; fn get_files_for_error_reporting( script: (MoveFilePath, String), deps: Vec<(MoveFilePath, String)>, ) -> FilesSourceText { let mut mapping = FilesSourceText::with_capacity(deps.len() + 1); for (fpath, text) in vec![script].into_iter().chain(deps.into_iter()) { mapping.insert(fpath, text); } mapping } fn main() -> Result<()> { let cli_arguments = App::new("Move Executor") .version("0.1.0") .arg( Arg::with_name("SCRIPT") .required(true) .help("Path to script to execute"), ) .arg( Arg::from_usage("-d --dialect=[DIALECT]") .possible_values(&["libra", "dfinance"]) .default_value("libra") .help("Move language dialect"), ) .arg( Arg::from_usage("-s --sender [SENDER_ADDRESS]") .required(true) .help("Address of the current user"), ) .arg( Arg::from_usage("-m --modules [MODULE_PATHS]") .multiple(true) .number_of_values(1) .help("Path to module file / modules folder to use as dependency. \nCould be used more than once: '-m ./stdlib -m ./modules'"), ) .arg(Arg::from_usage("--genesis [GENESIS_CONTENTS]").help("JSON-based genesis contents")) .arg( Arg::from_usage("--args [SCRIPT_ARGS]") .help(r#"Number of script main() function arguments in quotes, e.g. "10 20 30""#), ) .get_matches(); let script_fpath = leaked_fpath(cli_arguments.value_of("SCRIPT").unwrap()); let script_source_text = fs::read_to_string(script_fpath) .with_context(|| format!("Cannot open {:?}", script_fpath))?; let modules_fpaths = cli_arguments .values_of("modules") .unwrap_or_default() .map(|path| path.into()) .collect::<Vec<PathBuf>>(); let deps = io::load_move_module_files(modules_fpaths)?; let genesis_json_contents = match cli_arguments.value_of("genesis") { Some(contents) => { serde_json::from_str(contents).context("JSON passed to --genesis is invalid")? } None => serde_json::json!([]), }; let dialect = cli_arguments.value_of("dialect").unwrap(); let sender_address = cli_arguments.value_of("sender").unwrap(); let args: Vec<String> = cli_arguments .value_of("args") .unwrap_or_default() .split_ascii_whitespace() .map(String::from) .collect(); let res = compile_and_execute_script( (script_fpath, script_source_text.clone()), &deps, dialect, sender_address, genesis_json_contents, args, ); match res { Ok(changes) => { let out = serde_json::to_string_pretty(&changes).expect("Should always be serializable"); print!("{}", out); Ok(()) } Err(error) => { let error = match error.downcast::<ExecCompilerError>() { Ok(compiler_error) => { let files_mapping = get_files_for_error_reporting((script_fpath, script_source_text), deps); let dialect = DialectName::from_str(&dialect).unwrap().get_dialect(); let transformed_errors = compiler_error.transform_with_source_map(); dialect.print_compiler_errors_and_exit(files_mapping, transformed_errors); } Err(error) => error, }; let error = match error.downcast::<ExecutionError>() { Ok(exec_error) => { let out = serde_json::to_string_pretty(&exec_error) .expect("Should always be serializable"); print!("{}", out); std::process::exit(1) } Err(error) => error, }; Err(error) } } }
35.225806
143
0.558379
f815958dab24b21d1a7a1f2be3038a57b45f94f2
32,040
// (c) 2016 Productize SPRL <[email protected]> // extension: .lib // format: old-style use std::fmt; use std::result; use std::str::FromStr; use std::path::PathBuf; // get from parent use util::read_file; use parse_split_quote_aware; use schematic; use str_error; use checkfix::{self, CheckFix, CheckFixData, Config}; use KicadError; /// a Kicad symbolic file #[derive(Debug, Default)] pub struct SymbolLib { /// the symbols pub symbols: Vec<Symbol>, } // DEF name reference unused text_offset draw_pinnumber draw_pinname unit_count units_locked option_flag /// a symbol #[derive(Debug, Clone)] pub struct Symbol { /// name pub name: String, /// reference pub reference: String, /// text offset pub text_offset: f64, /// draw pinnumber pub draw_pinnumber: bool, /// draw pinname pub draw_pinname: bool, /// unit count pub unit_count: i64, /// is the unit locked pub unit_locked: bool, /// is it a power symbol pub is_power: bool, /// fields pub fields: Vec<Field>, /// aliases pub aliases: Vec<String>, /// draw pub draw: Vec<Draw>, } // F n “text” posx posy dimension orientation visibility hjustify vjustify/italic/bold “name” // F0 "#PWR" 0 0 30 H I C CNN /// a field #[derive(Debug, Clone)] pub struct Field { /// field number pub i: i64, /// value pub value: String, /// X coordinate pub x: f64, /// Y coordinate pub y: f64, /// dimension pub dimension: i64, /// orientation pub orientation: schematic::Orientation, /// if the field is visible pub visible: bool, /// horizontal justification pub hjustify: schematic::Justify, /// vertical justification pub vjustify: schematic::Justify, /// italic pub italic: bool, /// bold pub bold: bool, /// name of the field pub name: String, } /// a drawing #[derive(Debug, Clone)] pub enum Draw { /// a pin Pin(Pin), /// a rectangle Rectangle(Rectangle), /// a non-parsed drawing part Other(String), } // U (up) D (down) R (right) L (left). /// pin orientation #[derive(Debug, Clone, PartialEq)] pub enum PinOrientation { /// up Up, // U /// down Down, // D /// left Left, // L /// right Right, // R } /// pin type #[derive(Debug, Clone, PartialEq)] pub enum PinType { /// input Input, // I /// output Output, // O /// bidi Bidi, // B /// tristate Tristate, // T /// passive Passive, // P /// unspecified Unspecified, // U /// power output PowerInput, // W /// power input PowerOutput, // w /// open collector OpenCollector, // C /// open emitter OpenEmitter, // E /// not connected NotConnected, // N } /// pin shape #[derive(Debug, Clone, PartialEq)] pub enum PinShape { /// line Line, // None (default) /// inverted Inverted, // I /// clock Clock, // C /// inverted clock InvertedClock, // CI /// input low InputLow, // L /// clock low ClockLow, // CL /// output low OutputLow, // V /// falling edge clock FallingEdgeClock, // F /// non logic NonLogic, // X } // X name number posx posy length orientation Snum Snom unit convert Etype [shape]. // X P1 1 -200 200 150 R 50 50 1 1 P // X +3.3V 1 0 0 0 U 30 30 0 0 W N /// draw a pin #[derive(Debug, Clone, Default)] pub struct Pin { /// name of the pin pub name: String, /// number of the pin, which doesn't have to be an actual number pub number: String, /// x position of the pin pub x: i64, /// y position of the pin pub y: i64, /// length of the pin pub len: i64, /// orientation of the pin pub orientation: PinOrientation, /// pin number text size pub num_size: i64, /// pin name text size pub name_size: i64, /// unit ?? pub unit: i64, /// convert ?? pub convert: i64, /// pin type pub pin_type: PinType, /// pin visible pub pin_visible: bool, /// pin shape pub pin_shape: PinShape, } // S -800 1200 800 -1200 0 1 10 f // S startx starty endx endy unit convert thickness cc // cc = N F or F ( F = filled Rectangle,; f = . filled Rectangle, N = transparent background) /// draw a rectangle #[derive(Debug, Clone, Default)] pub struct Rectangle { /// x-coordinate of first corner of rectangle pub x1: i64, /// y-coordinate of first corner of rectangle pub y1: i64, /// x-coordinate of second corner of rectangle pub x2: i64, /// y-coordinate of second corner of rectangle pub y2: i64, /// unit ?? pub unit: i64, /// convert ??? pub convert: i64, /// thickness of the line pub thickness: i64, /// `Fill` of the rectangle pub fill: Fill, } #[derive(Debug, Clone, PartialEq)] /// fill for a rectangle pub enum Fill { /// filled with foreground color FilledForeground, /// filled with background color FilledBackground, /// not filled Transparent, } impl Default for Fill { fn default() -> Fill { Fill::Transparent } } impl Fill { fn make(s: &str) -> Result<Fill, KicadError> { match s { "F" => Ok(Fill::FilledForeground), "f" => Ok(Fill::FilledBackground), "N" => Ok(Fill::Transparent), _ => Err(format!("unknown fill type {}", s).into()), } } } impl SymbolLib { /// find a symbol in a symbol lib pub fn find<F>(&self, filter: F) -> Option<&Symbol> where F: Fn(&Symbol) -> bool, { for symbol in &self.symbols { if filter(symbol) { return Some(symbol); } } None } } impl fmt::Display for SymbolLib { fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { writeln!(f, "EESchema-LIBRARY Version 2.3")?; writeln!(f, "#encoding utf-8")?; writeln!(f, "#")?; for i in &self.symbols { write!(f, "{}", i)? } writeln!(f, "#End Library") } } impl Symbol { /// create a new symbol pub fn new(name: String, reference: String) -> Symbol { Symbol { name: name, reference: reference, text_offset: 0.0, draw_pinnumber: false, draw_pinname: false, unit_count: 1, unit_locked: false, is_power: false, fields: vec![], aliases: vec![], draw: vec![], } } /// set the name of the symbol pub fn set_name(&mut self, name: &str) { if char_at(&self.name, 0) == '~' { self.name = format!("~{}", name) } else { self.name = name.to_string() } let field = &mut self.fields[1]; field.value = name.to_string() } /// get the list of pins on the symbol pub fn pins(&self) -> Vec<&Pin> { let mut v: Vec<&Pin> = vec![]; for d in &self.draw { if let Draw::Pin(ref pin) = *d { v.push(pin) } } v } /// is a symbol a power symbol? pub fn is_power(&self) -> bool { self.reference.as_str() == "#PWR" && self.pins().len() == 1 } /// is a symbol a graphics item? pub fn is_graphics(&self) -> bool { self.reference.starts_with('#') && self.pins().is_empty() } /// is a symbol a basic symbol? pub fn is_basic(&self) -> bool { match self.name.as_str() { "L" | "R" | "C" | "D" => true, _ => false, } } } impl fmt::Display for Symbol { fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { writeln!(f, "# {}", &&self.fields[1].value)?; writeln!(f, "#")?; writeln!( f, "DEF {} {} 0 {} {} {} {} {} {}", self.name, self.reference, self.text_offset, if self.draw_pinnumber { "Y" } else { "N" }, if self.draw_pinname { "Y" } else { "N" }, self.unit_count, if self.unit_locked { "L" } else { "F" }, if self.is_power { "P" } else { "N" }, )?; for field in &self.fields { writeln!(f, "{}", field)? } if !self.aliases.is_empty() { write!(f, "ALIAS")?; for alias in &self.aliases { write!(f, " ")?; write!(f, "{}", alias)?; } writeln!(f, "")?; } writeln!(f, "DRAW")?; for draw in &self.draw { writeln!(f, "{}", draw)? } writeln!(f, "ENDDRAW")?; writeln!(f, "ENDDEF")?; writeln!(f, "#") } } impl Default for Field { fn default() -> Field { Field { i: 0, value: String::from(""), x: 0.0, y: 0.0, dimension: 0, orientation: schematic::Orientation::Horizontal, visible: false, hjustify: schematic::Justify::Center, vjustify: schematic::Justify::Center, italic: false, bold: false, name: String::from(""), } } } impl fmt::Display for Field { fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { write!( f, "F{} \"{}\" {} {} {} {} {} {} {}{}{}", self.i, self.value, self.x, self.y, self.dimension, self.orientation, if self.visible { "V" } else { "I" }, self.hjustify, self.vjustify, if self.italic { "I" } else { "N" }, if self.bold { "I" } else { "N" }, )?; if self.i > 3 { write!(f, " \"{}\"", self.name)? }; Ok(()) } } impl fmt::Display for Draw { fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { match *self { Draw::Other(ref s) => write!(f, "{}", s), Draw::Pin(ref p) => write!(f, "{}", p), Draw::Rectangle(ref p) => write!(f, "{}", p), } } } impl fmt::Display for Rectangle { fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { write!(f, "S {} {} ", self.x1, self.y1)?; write!(f, "{} {} ", self.x2, self.y2)?; write!(f, "{} {} ", self.unit, self.convert)?; write!(f, "{} {}", self.thickness, self.fill) } } impl fmt::Display for Fill { fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { match *self { Fill::FilledForeground => write!(f, "F"), Fill::FilledBackground => write!(f, "f"), Fill::Transparent => write!(f, "N"), } } } impl fmt::Display for Pin { fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { write!(f, "X {} {} ", self.name, self.number)?; write!(f, "{} {} {} ", self.x, self.y, self.len)?; write!(f, "{} ", self.orientation)?; write!(f, "{} {} ", self.num_size, self.name_size)?; write!(f, "{} {} ", self.unit, self.convert)?; write!(f, "{}", self.pin_type)?; if self.pin_visible { if self.pin_shape != PinShape::Line { write!(f, " {}", self.pin_shape) } else { Ok(()) } } else { write!(f, " N{}", self.pin_shape) } } } impl fmt::Display for PinOrientation { fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { match *self { PinOrientation::Up => write!(f, "{}", 'U'), PinOrientation::Down => write!(f, "{}", 'D'), PinOrientation::Left => write!(f, "{}", 'L'), PinOrientation::Right => write!(f, "{}", 'R'), } } } impl Default for PinOrientation { fn default() -> PinOrientation { PinOrientation::Up } } impl PinOrientation { fn make(s: &str) -> Result<PinOrientation, KicadError> { match s { "U" => Ok(PinOrientation::Up), "D" => Ok(PinOrientation::Down), "L" => Ok(PinOrientation::Left), "R" => Ok(PinOrientation::Right), _ => Err(format!("unknown pin orientation {}", s).into()), } } } impl fmt::Display for PinType { fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { match *self { PinType::Input => write!(f, "{}", 'I'), PinType::Output => write!(f, "{}", 'O'), PinType::Bidi => write!(f, "{}", 'B'), PinType::Tristate => write!(f, "{}", 'T'), PinType::Passive => write!(f, "{}", 'P'), PinType::Unspecified => write!(f, "{}", 'U'), PinType::PowerInput => write!(f, "{}", 'W'), PinType::PowerOutput => write!(f, "{}", 'w'), PinType::OpenCollector => write!(f, "{}", 'C'), PinType::OpenEmitter => write!(f, "{}", 'E'), PinType::NotConnected => write!(f, "{}", 'N'), } } } impl Default for PinType { fn default() -> PinType { PinType::Input } } impl PinType { fn make(s: &str) -> Result<PinType, KicadError> { match s { "I" => Ok(PinType::Input), "O" => Ok(PinType::Output), "B" => Ok(PinType::Bidi), "T" => Ok(PinType::Tristate), "P" => Ok(PinType::Passive), "U" => Ok(PinType::Unspecified), "W" => Ok(PinType::PowerInput), "w" => Ok(PinType::PowerOutput), "C" => Ok(PinType::OpenCollector), "E" => Ok(PinType::OpenEmitter), "N" => Ok(PinType::NotConnected), _ => Err(format!("unknown pin type {}", s).into()), } } } impl fmt::Display for PinShape { fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { match *self { PinShape::Line => Ok(()), PinShape::Inverted => write!(f, "{}", 'I'), PinShape::Clock => write!(f, "{}", 'C'), PinShape::InvertedClock => write!(f, "{}", "CI"), PinShape::InputLow => write!(f, "{}", 'L'), PinShape::ClockLow => write!(f, "{}", "CL"), PinShape::OutputLow => write!(f, "{}", "V"), PinShape::FallingEdgeClock => write!(f, "{}", "F"), PinShape::NonLogic => write!(f, "{}", "X"), } } } impl Default for PinShape { fn default() -> PinShape { PinShape::Line } } impl PinShape { fn make(s: &str) -> Result<PinShape, KicadError> { if s.is_empty() { Ok(PinShape::Line) } else { let s = if s.starts_with('N') { &s[1..] } else { &s[..] }; match s { "I" => Ok(PinShape::Inverted), "C" => Ok(PinShape::Clock), "CI" => Ok(PinShape::InvertedClock), "L" => Ok(PinShape::InputLow), "CL" => Ok(PinShape::ClockLow), "V" => Ok(PinShape::OutputLow), "F" => Ok(PinShape::FallingEdgeClock), "X" => Ok(PinShape::NonLogic), "" => Ok(PinShape::Line), _ => Err(format!("unknown pinshape {}", s).into()), } } } fn visible_from_str(s: &str) -> bool { if s.is_empty() { false } else { !s.starts_with('N') } } } macro_rules! assume_line { ($s:expr, $exp:expr) => ( if $s.eof() { return str_error("end of file reached".to_string()) } if $s.here() != $exp { return str_error(format!("expected '{}', got '{}'", $exp, $s.here())) } $s.i += 1; ) } #[derive(Debug)] struct ParseState { i: usize, v: Vec<String>, } impl ParseState { fn new(v2: Vec<&str>) -> ParseState { ParseState { i: 0, v: v2.iter().map(|x| String::from(*x)).collect(), } } fn here(&self) -> String { (self.v[self.i]).clone() } fn next(&mut self) { self.i += 1; } fn eof(&self) -> bool { self.i >= self.v.len() } } fn assume_string(e: &'static str, s: &str) -> Result<(), KicadError> { if *e != *s { return str_error(format!("expecting: {}, actually: {}", e, s)); } Ok(()) } fn i64_from_string(p: &ParseState, s: &str) -> Result<i64, KicadError> { match i64::from_str(s) { Ok(i) => Ok(i), _ => str_error(format!("int parse error in {}; line: {}", s, p.here())), } } fn f64_from_string(p: &ParseState, s: &str) -> Result<f64, KicadError> { match f64::from_str(s) { Ok(i) => Ok(i), _ => str_error(format!("float parse error in {}; line: {}", s, p.here())), } } fn bool_from_string(s: &str, t: &'static str, f: &'static str) -> Result<bool, KicadError> { if &s[..] == t { return Ok(true); } if &s[..] == f { return Ok(false); } str_error(format!("unknown boolean {}, expected {} or {}", s, t, f)) } fn char_at(s: &str, p: usize) -> char { let v: Vec<char> = s.chars().collect(); v[..][p] } fn parse_symbol(p: &mut ParseState) -> Result<Symbol, KicadError> { p.next(); // skip line like # name assume_line!(p, "#"); let s = p.here(); let v = &parse_split_quote_aware(&s)?; if v.len() != 10 { return str_error(format!("unexpected elements in {}", s)); } assume_string("DEF", &v[0])?; let mut s = Symbol::new(v[1].clone(), v[2].clone()); s.text_offset = f64_from_string(p, &v[4])?; s.draw_pinnumber = bool_from_string(&v[5], "Y", "N")?; s.draw_pinname = bool_from_string(&v[6], "Y", "N")?; s.unit_count = i64_from_string(p, &v[7])?; s.unit_locked = bool_from_string(&v[8], "L", "F")?; s.is_power = bool_from_string(&v[9], "P", "N")?; p.next(); loop { let s2 = p.here(); if char_at(&s2, 0) == 'F' { let f = parse_field(p, &s2)?; s.fields.push(f); p.next(); } else { break; } } if &p.here() == "$FPLIST" { p.next(); // skip FPLIST for now while !p.eof() { if &p.here() == "$ENDFPLIST" { p.next(); break; } p.next() } } if p.here().starts_with("ALIAS") { let v = parse_split_quote_aware(&p.here())?; for alias in v.into_iter().skip(1) { s.aliases.push(alias) } p.next(); } // TODO draw assume_line!(p, "DRAW"); while !p.eof() { let s2 = p.here(); if &s2 == "ENDDRAW" { p.next(); break; } if s2.starts_with("X ") { let pin = parse_pin(p, &s2)?; s.draw.push(Draw::Pin(pin)); } else if s2.starts_with("S ") { let rect = parse_rect(p, &s2)?; s.draw.push(Draw::Rectangle(rect)); } else { s.draw.push(Draw::Other(s2.clone())); } p.next() } assume_line!(p, "ENDDEF"); assume_line!(p, "#"); Ok(s) } #[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))] fn bool_from<T: PartialEq + fmt::Display>(i: T, t: T, f: T) -> Result<bool, KicadError> { if i == t { return Ok(true); } if i == f { return Ok(false); } str_error(format!("unknown boolean {}, expected {} or {}", i, t, f)) } // F0 "L" 0 50 40 H V C CNN fn parse_field(p: &mut ParseState, line: &str) -> Result<Field, KicadError> { let mut f = Field::default(); let v = &parse_split_quote_aware(line)?; if v.len() != 9 && v.len() != 10 { return str_error(format!("unexpected elements in {}", line)); } f.i = i64_from_string(p, &String::from(&v[0][1..]))?; let name = if v.len() == 10 { v[9].clone() } else { match f.i { 0 => String::from("Reference"), 1 => String::from("Value"), 2 => String::from("Footprint"), 3 => String::from("UserDocLink"), _ => return str_error("expecting name for componentfield > 3".to_string()), } }; f.value = v[1].clone(); f.x = f64_from_string(p, &v[2])?; f.y = f64_from_string(p, &v[3])?; f.dimension = i64_from_string(p, &v[4])?; f.orientation = schematic::Orientation::new(char_at(&v[5], 0))?; f.visible = bool_from_string(&v[6], "V", "I")?; f.hjustify = schematic::Justify::new(char_at(&v[7], 0))?; f.vjustify = schematic::Justify::new(char_at(&v[8], 0))?; f.italic = bool_from(char_at(&v[8], 1), 'I', 'N')?; f.bold = bool_from(char_at(&v[8], 2), 'B', 'N')?; f.name = name; Ok(f) } // X +3.3V 1 0 0 0 U 30 30 0 0 W N fn parse_pin(p: &mut ParseState, line: &str) -> Result<Pin, KicadError> { let mut pin = Pin::default(); let v = &parse_split_quote_aware(line)?; if v.len() != 12 && v.len() != 13 { return str_error(format!("unexpected elements in {}", line)); } pin.name = v[1].clone(); pin.number = v[2].clone(); pin.x = i64_from_string(p, &v[3])?; pin.y = i64_from_string(p, &v[4])?; pin.len = i64_from_string(p, &v[5])?; pin.orientation = PinOrientation::make(&v[6])?; pin.num_size = i64_from_string(p, &v[7])?; pin.name_size = i64_from_string(p, &v[8])?; pin.unit = i64_from_string(p, &v[9])?; pin.convert = i64_from_string(p, &v[10])?; pin.pin_type = PinType::make(&v[11])?; pin.pin_visible = true; if v.len() == 13 { pin.pin_visible = PinShape::visible_from_str(&v[12]); pin.pin_shape = PinShape::make(&v[12])?; } Ok(pin) } // S -800 1200 800 -1200 0 1 10 f fn parse_rect(p: &mut ParseState, line: &str) -> Result<Rectangle, KicadError> { let mut rect = Rectangle::default(); let v = &parse_split_quote_aware(line)?; if v.len() != 9 { return str_error(format!("unexpected elements in {}", line)); } rect.x1 = i64_from_string(p, &v[1])?; rect.y1 = i64_from_string(p, &v[2])?; rect.x2 = i64_from_string(p, &v[3])?; rect.y2 = i64_from_string(p, &v[4])?; rect.unit = i64_from_string(p, &v[5])?; rect.convert = i64_from_string(p, &v[6])?; rect.thickness = i64_from_string(p, &v[7])?; rect.fill = Fill::make(&v[8])?; Ok(rect) } fn parse(s: &str) -> Result<SymbolLib, KicadError> { let mut lib = SymbolLib::default(); let v: Vec<&str> = s.lines().collect(); let p = &mut ParseState::new(v); assume_line!(p, "EESchema-LIBRARY Version 2.3"); assume_line!(p, "#encoding utf-8"); assume_line!(p, "#"); while !p.eof() { // println!("here: {}", &p.here()); if &p.here() == "#End Library" { break; } let s = parse_symbol(p)?; // println!("new symbol: {}", &s); lib.symbols.push(s) } Ok(lib) } /// parse a &str to a symbol lib pub fn parse_str(s: &str) -> Result<SymbolLib, KicadError> { parse(s) } /// parse a file to a symbol lib pub fn parse_file(filename: &PathBuf) -> Result<SymbolLib, KicadError> { let name = filename.to_str().unwrap(); let s = read_file(name)?; parse(&s[..]) } struct SymbolField<'a> { symbol: &'a Symbol, field: &'a Field, } impl<'a> CheckFix for SymbolField<'a> { fn check(&self, _: &Config) -> Vec<CheckFixData> { let symbol = self.symbol; let field = self.field; let mut v = vec![]; // 4.8 All text fields use a common size of 50mils (1.27mm) if field.dimension != 50 { v.push(CheckFixData::new(4, 8, field, "field text is not 50mil")); } if field.i == 0 { // 4.9 The Reference field contains the appropriate Reference Designator if symbol.is_graphics() { if field.visible { v.push(CheckFixData::new( 4, 9, symbol.name.clone(), "reference field should be invisible for graphics", )); } } else { if !field.visible && !symbol.is_power() { v.push(CheckFixData::new( 4, 9, symbol.name.clone(), "reference field should be visible for normal symbols", )); } } } else if field.i == 1 { // 4.9 The Value field contains the name of the symbol and is visible. For power and graphical symbols, the value field must be invisible if symbol.is_graphics() { if field.visible { v.push(CheckFixData::new( 4, 9, symbol.name.clone(), "value field should be invisible for graphics", )); } } else if symbol.is_power() { if field.visible { v.push(CheckFixData::new( 4, 9, symbol.name.clone(), "value field should be invisible for power", )); } } else { if !field.visible { v.push(CheckFixData::new( 4, 9, symbol.name.clone(), "value field should be visible for normal symbols", )); } } } else if field.i == 2 { // 4.9 The Footprint field is filled according to rule 4.12 (below) and is invisible if field.visible { v.push(CheckFixData::new( 4, 9, symbol.name.clone(), "Footprint field should be invisible", )); } } else if field.i == 3 { // 4.9 The Datasheet field is left blank and is invisible if field.visible { v.push(CheckFixData::new( 4, 9, symbol.name.clone(), "Datasheet field should be invisible", )); } } v } } impl CheckFix for Pin { fn check(&self, _: &Config) -> Vec<CheckFixData> { let mut v = vec![]; let name = format!("{}:{}", self.name, self.number); // 4.1 Using a 100mil grid, pin origin must lie on grid nodes (IEC-60617) if (self.x % 10) != 0 { v.push(CheckFixData::new( 4, 1, name.clone(), "pin x not on 100mil grid", )); } if (self.y % 10) != 0 { v.push(CheckFixData::new( 4, 1, name.clone(), "pin y not on 100mil grid", )); } // 4.1 Pin length can be incremented in steps of 50mils (1.27mm) if required e.g. for long pin numbers if (self.len % 5) != 0 { v.push(CheckFixData::new( 4, 1, name.clone(), "pin length not on 50mil grid", )); } // 4.1 Pins should have a length of at least 100mils (2.54mm) if self.len < 100 { v.push(CheckFixData::info( 4, 1, name.clone(), "pin length < 100mil", )); } // 4.1 Pin length should not be more than 300mils (7.62mm) if self.len > 300 { v.push(CheckFixData::info( 4, 1, name.clone(), "pin length > 300mil", )); } // 4.7 NC pins should be of type NC if self.name.to_lowercase().contains("nc") { if self.pin_type != PinType::NotConnected { v.push(CheckFixData::new( 4, 7, name.clone(), "Pin should be of type Not Connected", )) } } // 4.7 NC pins should be invisible, others should be visible if self.pin_type == PinType::NotConnected { if self.pin_visible { v.push(CheckFixData::new( 4, 7, name.clone(), "Pin should be invisible", )) } } else { if !self.pin_visible { v.push(CheckFixData::new( 4, 7, name.clone(), "Pin should be visible", )) } } // 4.8 All text fields use a common size of 50mils (1.27mm) if self.num_size != 50 { v.push(CheckFixData::new( 4, 8, name.clone(), "Pin Number should be 50mil", )) } // 4.8 All text fields use a common size of 50mils (1.27mm) if self.name_size != 50 { v.push(CheckFixData::new( 4, 8, name.clone(), "Pin Name should be 50mil", )) } v } } impl CheckFix for Rectangle { fn check(&self, _: &Config) -> Vec<CheckFixData> { let mut v = vec![]; // 4.2 Fill style of symbol body is set to Fill background if self.fill != Fill::FilledBackground { v.push(CheckFixData::new(4, 2, self, "Rectangle is not filled")) } // 4.2 Symbol body has a line width of 10mils (0.254mm) if self.thickness != 10 { v.push(CheckFixData::new( 4, 2, self, "Rectangle is not using a 10mil line", )) } // TODO 4.2 Origin is placed in the middle of symbol // TODO 4.2 IEC-style symbols are used whenever possibl v } } impl CheckFix for Draw { fn check(&self, config: &Config) -> Vec<CheckFixData> { let mut v = vec![]; match *self { Draw::Pin(ref pin) => { let p = pin.check(config); for i in p { v.push(i) } } Draw::Rectangle(ref rect) => { let p = rect.check(config); for i in p { v.push(i) } } Draw::Other(_) => (), } v } } impl CheckFix for Symbol { fn check(&self, config: &Config) -> Vec<CheckFixData> { let mut v = vec![]; // 1.7 valid name let name = if self.name.starts_with('~') { self.name.chars().skip(1).collect::<String>() } else { self.name.clone() }; let allowed_1_7 = checkfix::allowed_1_7_items(&name); if !allowed_1_7.is_empty() { v.push(CheckFixData::More(allowed_1_7).flatter()) } for field in &self.fields { let f = SymbolField { symbol: self, field: field, }; let f = f.check(config); if !f.is_empty() { v.push(CheckFixData::More(f).flatter()) } } if !(self.is_power() || self.is_graphics()) { for draw in &self.draw { let f = draw.check(config); if !f.is_empty() { v.push(CheckFixData::More(f).flatter()) } } } v } }
28.658318
149
0.475562
119bbd25f525c92ba309157b8a397cc11f0ab908
3,645
//! Interatomic potential functions. use crate::internal::Float; use crate::potentials::Potential; /// [Buckingham](https://lammps.sandia.gov/doc/pair_buck.html#description) potential. #[derive(Clone, Copy, Debug)] pub struct Buckingham { /// Energy units. pub a: Float, /// Distance units. pub rho: Float, /// Energy units. pub c: Float, } impl Buckingham { /// Returns a new [`Buckingham`] potential. pub fn new(a: Float, rho: Float, c: Float) -> Buckingham { Buckingham { a, rho, c } } } impl Potential for Buckingham {} /// [Damped Shifted Force](https://lammps.sandia.gov/doc/pair_coul.html#description) potential. #[derive(Clone, Copy, Debug)] pub struct DampedShiftedForce { /// Damping parameter. pub alpha: Float, /// Cutoff radius pub cutoff: Float, } impl DampedShiftedForce { /// Returns a new [`DampedShiftedForce`] potential. pub fn new(alpha: Float, cutoff: Float) -> DampedShiftedForce { DampedShiftedForce {alpha, cutoff} } } impl Potential for DampedShiftedForce {} /// [Harmonic](https://lammps.sandia.gov/doc/bond_harmonic.html#description) oscillator potential. #[derive(Clone, Copy, Debug)] pub struct Harmonic { /// Spring constant. pub k: Float, /// Equilibrium displacement distance. pub x0: Float, } impl Harmonic { /// Returns a new [`Harmonic`] potential. pub fn new(k: Float, x0: Float) -> Harmonic { Harmonic { k, x0 } } } impl Potential for Harmonic {} /// [Lennard-Jones](https://lammps.sandia.gov/doc/pair_lj.html#description) 12/6 potential. #[derive(Clone, Copy, Debug)] pub struct LennardJones { /// Depth of the potential well. pub epsilon: Float, /// Distance at which the pair potential energy is zero. pub sigma: Float, } impl LennardJones { /// Returns a new [`Lennard-Jones`] potential. pub fn new(epsilon: Float, sigma: Float) -> LennardJones { LennardJones { epsilon, sigma } } } impl Potential for LennardJones {} /// [Mie](https://lammps.sandia.gov/doc/pair_mie.html#description) potential. #[derive(Clone, Copy, Debug)] pub struct Mie { /// Depth of the potential well. pub epsilon: Float, /// Distance at which the pair potential energy is zero. pub sigma: Float, /// Exponent on the attractive term. pub gamma_a: Float, /// Exponent on the repulsize term. pub gamma_r: Float, } impl Mie { /// Returns a new [`Mie`] potential. pub fn new(epsilon: Float, sigma: Float, gamma_a: Float, gamma_r: Float) -> Mie { Mie { epsilon, sigma, gamma_a, gamma_r, } } } impl Potential for Mie {} /// [Morse](https://lammps.sandia.gov/doc/pair_morse.html#description) potential. #[derive(Clone, Copy, Debug)] pub struct Morse { /// Width of the potential well. pub a: Float, /// Depth of the potential well. pub d_e: Float, /// Equilibrium bond distance. pub r_e: Float, } impl Morse { /// Returns a new [`Morse`] potential. pub fn new(a: Float, d_e: Float, r_e: Float) -> Morse { Morse { a, d_e, r_e } } } impl Potential for Morse {} /// Standard [Coulombic](https://lammps.sandia.gov/doc/pair_coul.html#description) potential. #[derive(Clone, Copy, Debug)] pub struct StandardCoulombic { /// Dielectric constant (unitless). pub dielectric: Float, } impl StandardCoulombic { /// Returns a new [`StandardCoulombic`] potential. pub fn new(dielectric: Float) -> StandardCoulombic { StandardCoulombic { dielectric } } } impl Potential for StandardCoulombic {}
25.3125
98
0.648834
39945a993bace9dac8ec5565d35a10b6926d90d9
1,820
//! Plugin to work with locales. //! It handles flutter/localization type message. use log::debug; use super::prelude::*; pub const PLUGIN_NAME: &str = module_path!(); pub const CHANNEL_NAME: &str = "flutter/lifecycle"; pub struct LifecyclePlugin { channel: Weak<BasicMessageChannel>, handler: Arc<RwLock<Handler>>, } impl Default for LifecyclePlugin { fn default() -> Self { Self { channel: Weak::new(), handler: Arc::new(RwLock::new(Handler)), } } } impl Plugin for LifecyclePlugin { fn plugin_name() -> &'static str { PLUGIN_NAME } fn init_channels(&mut self, registrar: &mut ChannelRegistrar) { let handler = Arc::downgrade(&self.handler); self.channel = registrar.register_channel(BasicMessageChannel::new( CHANNEL_NAME, handler, &string_codec::CODEC, )); } } impl LifecyclePlugin { pub fn send_app_is_inactive(&self) { debug!("Sending app is inactive"); if let Some(channel) = self.channel.upgrade() { channel.send(&Value::String("AppLifecycleState.inactive".to_owned())); } } pub fn send_app_is_resumed(&self) { debug!("Sending app is resumed"); if let Some(channel) = self.channel.upgrade() { channel.send(&Value::String("AppLifecycleState.resumed".to_owned())); } } pub fn send_app_is_paused(&self) { debug!("Sending app is paused"); if let Some(channel) = self.channel.upgrade() { channel.send(&Value::String("AppLifecycleState.paused".to_owned())); } } } struct Handler; impl MessageHandler for Handler { fn on_message(&mut self, _: Value, _: RuntimeData) -> Result<Value, MessageError> { Ok(Value::Null) } }
26
87
0.612637
69cb658cce8d7840d596e5e7f04dc82e7af995c4
703
/* * Copyright 2019 Cargill Incorporated * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ----------------------------------------------------------------------------- */ pub mod database;
39.055556
80
0.657183
394499ab43ab2d17b2f16f929bb9fcb59e1e398e
13,398
// This file is generated by rust-protobuf 2.27.1. Do not edit // @generated // https://github.com/rust-lang/rust-clippy/issues/702 #![allow(unknown_lints)] #![allow(clippy::all)] #![allow(unused_attributes)] #![cfg_attr(rustfmt, rustfmt::skip)] #![allow(box_pointers)] #![allow(dead_code)] #![allow(missing_docs)] #![allow(non_camel_case_types)] #![allow(non_snake_case)] #![allow(non_upper_case_globals)] #![allow(trivial_casts)] #![allow(unused_imports)] #![allow(unused_results)] //! Generated file from `tensorflow/core/protobuf/cluster.proto` /// Generated files are compatible only with the same version /// of protobuf runtime. // const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_27_1; #[derive(PartialEq,Clone,Default)] pub struct JobDef { // message fields pub name: ::std::string::String, pub tasks: ::std::collections::HashMap<i32, ::std::string::String>, // special fields pub unknown_fields: ::protobuf::UnknownFields, pub cached_size: ::protobuf::CachedSize, } impl<'a> ::std::default::Default for &'a JobDef { fn default() -> &'a JobDef { <JobDef as ::protobuf::Message>::default_instance() } } impl JobDef { pub fn new() -> JobDef { ::std::default::Default::default() } // string name = 1; pub fn get_name(&self) -> &str { &self.name } pub fn clear_name(&mut self) { self.name.clear(); } // Param is passed by value, moved pub fn set_name(&mut self, v: ::std::string::String) { self.name = v; } // Mutable pointer to the field. // If field is not initialized, it is initialized with default value first. pub fn mut_name(&mut self) -> &mut ::std::string::String { &mut self.name } // Take field pub fn take_name(&mut self) -> ::std::string::String { ::std::mem::replace(&mut self.name, ::std::string::String::new()) } // repeated .tensorflow.JobDef.TasksEntry tasks = 2; pub fn get_tasks(&self) -> &::std::collections::HashMap<i32, ::std::string::String> { &self.tasks } pub fn clear_tasks(&mut self) { self.tasks.clear(); } // Param is passed by value, moved pub fn set_tasks(&mut self, v: ::std::collections::HashMap<i32, ::std::string::String>) { self.tasks = v; } // Mutable pointer to the field. pub fn mut_tasks(&mut self) -> &mut ::std::collections::HashMap<i32, ::std::string::String> { &mut self.tasks } // Take field pub fn take_tasks(&mut self) -> ::std::collections::HashMap<i32, ::std::string::String> { ::std::mem::replace(&mut self.tasks, ::std::collections::HashMap::new()) } } impl ::protobuf::Message for JobDef { fn is_initialized(&self) -> bool { true } fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { while !is.eof()? { let (field_number, wire_type) = is.read_tag_unpack()?; match field_number { 1 => { ::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.name)?; }, 2 => { ::protobuf::rt::read_map_into::<::protobuf::types::ProtobufTypeInt32, ::protobuf::types::ProtobufTypeString>(wire_type, is, &mut self.tasks)?; }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, }; } ::std::result::Result::Ok(()) } // Compute sizes of nested messages #[allow(unused_variables)] fn compute_size(&self) -> u32 { let mut my_size = 0; if !self.name.is_empty() { my_size += ::protobuf::rt::string_size(1, &self.name); } my_size += ::protobuf::rt::compute_map_size::<::protobuf::types::ProtobufTypeInt32, ::protobuf::types::ProtobufTypeString>(2, &self.tasks); my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size } fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { if !self.name.is_empty() { os.write_string(1, &self.name)?; } ::protobuf::rt::write_map_with_cached_sizes::<::protobuf::types::ProtobufTypeInt32, ::protobuf::types::ProtobufTypeString>(2, &self.tasks, os)?; os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } fn get_cached_size(&self) -> u32 { self.cached_size.get() } fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { &self.unknown_fields } fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { &mut self.unknown_fields } fn as_any(&self) -> &dyn (::std::any::Any) { self as &dyn (::std::any::Any) } fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { self as &mut dyn (::std::any::Any) } fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> { self } fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { Self::descriptor_static() } fn new() -> JobDef { JobDef::new() } fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; descriptor.get(|| { let mut fields = ::std::vec::Vec::new(); fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>( "name", |m: &JobDef| { &m.name }, |m: &mut JobDef| { &mut m.name }, )); fields.push(::protobuf::reflect::accessor::make_map_accessor::<_, ::protobuf::types::ProtobufTypeInt32, ::protobuf::types::ProtobufTypeString>( "tasks", |m: &JobDef| { &m.tasks }, |m: &mut JobDef| { &mut m.tasks }, )); ::protobuf::reflect::MessageDescriptor::new_pb_name::<JobDef>( "JobDef", fields, file_descriptor_proto() ) }) } fn default_instance() -> &'static JobDef { static instance: ::protobuf::rt::LazyV2<JobDef> = ::protobuf::rt::LazyV2::INIT; instance.get(JobDef::new) } } impl ::protobuf::Clear for JobDef { fn clear(&mut self) { self.name.clear(); self.tasks.clear(); self.unknown_fields.clear(); } } impl ::std::fmt::Debug for JobDef { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { ::protobuf::text_format::fmt(self, f) } } impl ::protobuf::reflect::ProtobufValue for JobDef { fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { ::protobuf::reflect::ReflectValueRef::Message(self) } } #[derive(PartialEq,Clone,Default)] pub struct ClusterDef { // message fields pub job: ::protobuf::RepeatedField<JobDef>, // special fields pub unknown_fields: ::protobuf::UnknownFields, pub cached_size: ::protobuf::CachedSize, } impl<'a> ::std::default::Default for &'a ClusterDef { fn default() -> &'a ClusterDef { <ClusterDef as ::protobuf::Message>::default_instance() } } impl ClusterDef { pub fn new() -> ClusterDef { ::std::default::Default::default() } // repeated .tensorflow.JobDef job = 1; pub fn get_job(&self) -> &[JobDef] { &self.job } pub fn clear_job(&mut self) { self.job.clear(); } // Param is passed by value, moved pub fn set_job(&mut self, v: ::protobuf::RepeatedField<JobDef>) { self.job = v; } // Mutable pointer to the field. pub fn mut_job(&mut self) -> &mut ::protobuf::RepeatedField<JobDef> { &mut self.job } // Take field pub fn take_job(&mut self) -> ::protobuf::RepeatedField<JobDef> { ::std::mem::replace(&mut self.job, ::protobuf::RepeatedField::new()) } } impl ::protobuf::Message for ClusterDef { fn is_initialized(&self) -> bool { for v in &self.job { if !v.is_initialized() { return false; } }; true } fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { while !is.eof()? { let (field_number, wire_type) = is.read_tag_unpack()?; match field_number { 1 => { ::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.job)?; }, _ => { ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; }, }; } ::std::result::Result::Ok(()) } // Compute sizes of nested messages #[allow(unused_variables)] fn compute_size(&self) -> u32 { let mut my_size = 0; for value in &self.job { let len = value.compute_size(); my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; }; my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); self.cached_size.set(my_size); my_size } fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { for v in &self.job { os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?; os.write_raw_varint32(v.get_cached_size())?; v.write_to_with_cached_sizes(os)?; }; os.write_unknown_fields(self.get_unknown_fields())?; ::std::result::Result::Ok(()) } fn get_cached_size(&self) -> u32 { self.cached_size.get() } fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { &self.unknown_fields } fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { &mut self.unknown_fields } fn as_any(&self) -> &dyn (::std::any::Any) { self as &dyn (::std::any::Any) } fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { self as &mut dyn (::std::any::Any) } fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> { self } fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { Self::descriptor_static() } fn new() -> ClusterDef { ClusterDef::new() } fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; descriptor.get(|| { let mut fields = ::std::vec::Vec::new(); fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<JobDef>>( "job", |m: &ClusterDef| { &m.job }, |m: &mut ClusterDef| { &mut m.job }, )); ::protobuf::reflect::MessageDescriptor::new_pb_name::<ClusterDef>( "ClusterDef", fields, file_descriptor_proto() ) }) } fn default_instance() -> &'static ClusterDef { static instance: ::protobuf::rt::LazyV2<ClusterDef> = ::protobuf::rt::LazyV2::INIT; instance.get(ClusterDef::new) } } impl ::protobuf::Clear for ClusterDef { fn clear(&mut self) { self.job.clear(); self.unknown_fields.clear(); } } impl ::std::fmt::Debug for ClusterDef { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { ::protobuf::text_format::fmt(self, f) } } impl ::protobuf::reflect::ProtobufValue for ClusterDef { fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { ::protobuf::reflect::ReflectValueRef::Message(self) } } static file_descriptor_proto_data: &'static [u8] = b"\ \n&tensorflow/core/protobuf/cluster.proto\x12\ntensorflow\"\x8b\x01\n\ \x06JobDef\x12\x12\n\x04name\x18\x01\x20\x01(\tR\x04name\x123\n\x05tasks\ \x18\x02\x20\x03(\x0b2\x1d.tensorflow.JobDef.TasksEntryR\x05tasks\x1a8\n\ \nTasksEntry\x12\x10\n\x03key\x18\x01\x20\x01(\x05R\x03key\x12\x14\n\x05\ value\x18\x02\x20\x01(\tR\x05value:\x028\x01\"2\n\nClusterDef\x12$\n\x03\ job\x18\x01\x20\x03(\x0b2\x12.tensorflow.JobDefR\x03jobB\x87\x01\n\x1aor\ g.tensorflow.distruntimeB\rClusterProtosP\x01ZUgithub.com/tensorflow/ten\ sorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto\xf8\x01\x01\ b\x06proto3\ "; static file_descriptor_proto_lazy: ::protobuf::rt::LazyV2<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::rt::LazyV2::INIT; fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto { ::protobuf::Message::parse_from_bytes(file_descriptor_proto_data).unwrap() } pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto { file_descriptor_proto_lazy.get(|| { parse_descriptor_proto() }) }
32.59854
162
0.58979
e952c2b0967d5a058c382f2b57333c7979cb15fd
3,546
use alloc::boxed::Box; use alloc::format; use alloc::string::{String, ToString}; use alloc::vec::Vec; use core::any::Any; use crate::cop0::cause_extract_exception; use crate::exception_handler::drain_seen_exception; use crate::tests::traps::Immediate; mod address_error_exception; mod cart_memory; mod cop0; mod exception_instructions; mod overflow_exception; mod startup; mod soft_asserts; mod testlist; mod tlb; mod traps; pub enum Level { // Very basic functionality - if this is broken, expect things to go bad BasicFunctionality, // Basic functionality that is rarely used RarelyUsed, // Some weird hardware quirk - this probably won't matter too much Weird, } pub trait Test { fn name(&self) -> &str; fn level(&self) -> Level; /// Returns a set of values to run the test with. /// Tests that don't support multiple values can return an empty Vec and will still /// get called once, in which case the value argument should be ignored fn values(&self) -> Vec<Box<dyn Any>>; fn run(&self, value: &Box<dyn Any>) -> Result<(), String>; } pub fn run() { let mut succeeded = 0; let mut failed = 0; fn test_value(test: &Box<dyn Test>, value: &Box::<dyn Any>, failed: &mut u32, succeeded: &mut u32) { fn value_desc(value: &Box<dyn Any>) -> String { match (*value).downcast_ref::<u32>() { Some(v) => return format!("{:?}", v), None => {}, } match (*value).downcast_ref::<(bool, i64, i64)>() { Some(v) => return format!("{:?}", v), None => {}, } match (*value).downcast_ref::<(bool, u64, u64)>() { Some(v) => return format!("{:?}", v), None => {} } match (*value).downcast_ref::<(bool, u64, Immediate)>() { Some(v) => return format!("{:?}", v), None => {} } return "(value)".to_string(); } let test_result = test.run(&value); match drain_seen_exception() { Some(exception) => { // If the test caused an exception, don't even bother looking at the result. Just count it as failed crate::println!("Test \"{:?}\' with '{:?}' failed with exception: {:?}", test.name(), value_desc(value), cause_extract_exception(exception.cause)); *failed += 1; } None => { match test_result { Ok(_) => { *succeeded += 1 } Err(error) => { crate::println!("Test \"{:?}\' with '{:?}' failed: {}", test.name(), value_desc(value), error); *failed += 1; } } } } } let dummy_test_value: Box<dyn Any> = Box::new(0u32); for test in testlist::tests() { let values = test.values(); if values.len() == 0 { test_value(&test, &dummy_test_value, &mut failed, &mut succeeded); } else { for value in values { test_value(&test, &value, &mut failed, &mut succeeded); } } } crate::println!(); if (failed + succeeded) == 0 { crate::println!("Done, but no tests were executed"); } else { crate::println!("Done! Tests: {}. Failed: {}. Success rate: {}%", failed + succeeded, failed, succeeded * 100 / (failed + succeeded)); } }
31.945946
163
0.523125
8f3065580177c775b8b0c5d11e434dc551e4c13b
3,451
//! integration of concrete business logic and abstract interfaces use std::sync::Mutex; use async_trait::async_trait; use serde::Serialize; use dyn_conn::{ BizPoolFunctionality, ConnGeneratorFunctionality, ConnInfo, ConnInfoFunctionality, ConnMember, ConnStore, ConnUtil, Driver, }; use ua_persistence::ConnectionInformation; use ua_service::{DaoMY, DaoOptions, DaoPG}; use crate::error::ServiceError; #[derive(Serialize, Clone)] pub struct CI(ConnectionInformation); impl From<ConnectionInformation> for CI { fn from(ci: ConnectionInformation) -> Self { CI(ci) } } impl CI { pub fn new(conn_info: ConnInfo) -> Self { let drv = if conn_info.driver == Driver::Postgres { "postgres" } else { "mysql" }; CI(ConnectionInformation { id: None, name: "".to_owned(), description: None, driver: drv.to_owned(), username: conn_info.username, password: conn_info.password, host: conn_info.host, port: conn_info.port, database: conn_info.database, }) } pub fn ci(&self) -> ConnectionInformation { self.0.clone() } } impl ConnInfoFunctionality for CI { fn to_conn_info(&self) -> ConnInfo { let drv = if self.0.driver == "postgres" { Driver::Postgres } else { Driver::Mysql }; ConnInfo { driver: drv, username: self.0.username.clone(), password: self.0.password.clone(), host: self.0.host.clone(), port: self.0.port.clone(), database: self.0.database.clone(), } } } pub struct UaConn(DaoOptions); impl UaConn { pub fn dao(&self) -> &DaoOptions { &self.0 } } #[async_trait] impl BizPoolFunctionality for UaConn { async fn disconnect(&self) { match &self.0 { DaoOptions::PG(p) => { p.pool.close().await; } DaoOptions::MY(p) => { p.pool.close().await; } } } } #[async_trait] impl ConnGeneratorFunctionality<CI, UaConn> for UaConn { type ErrorType = ServiceError; async fn check_connection(conn_info: &ConnInfo) -> Result<bool, Self::ErrorType> { match conn_info.driver { Driver::Postgres => Ok(DaoPG::connectable(&conn_info.to_string()).await), Driver::Mysql => Ok(DaoMY::connectable(&conn_info.to_string()).await), } } async fn conn_establish( conn_info: &ConnInfo, ) -> Result<ConnMember<CI, UaConn>, Self::ErrorType> { let uri = &conn_info.to_string(); match conn_info.driver { Driver::Postgres => { let dao = DaoOptions::PG(DaoPG::new(uri, 10).await?); Ok(ConnMember { info: CI::new(conn_info.clone()), biz_pool: UaConn(dao), }) } Driver::Mysql => { let dao = DaoOptions::MY(DaoMY::new(uri, 10).await?); Ok(ConnMember { info: CI::new(conn_info.clone()), biz_pool: UaConn(dao), }) } } } } pub type UaStore = ConnStore<CI, UaConn>; pub type MutexUaStore = Mutex<UaStore>; pub type UaConnInfo = ConnectionInformation; pub type UaUtil = ConnUtil;
26.751938
98
0.554332
f4bc597304097c85b91ca0d17a807e042da92391
15,023
// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use prelude::v1::*; use os::unix::prelude::*; use collections::HashMap; use env; use ffi::{OsString, OsStr, CString, CStr}; use fmt; use io::{self, Error, ErrorKind}; use libc::{self, pid_t, c_void, c_int, gid_t, uid_t}; use ptr; use sys::pipe::AnonPipe; use sys::{self, c, cvt, cvt_r}; use sys::fs::{File, OpenOptions}; //////////////////////////////////////////////////////////////////////////////// // Command //////////////////////////////////////////////////////////////////////////////// #[derive(Clone)] pub struct Command { pub program: CString, pub args: Vec<CString>, pub env: Option<HashMap<OsString, OsString>>, pub cwd: Option<CString>, pub uid: Option<uid_t>, pub gid: Option<gid_t>, pub detach: bool, // not currently exposed in std::process } impl Command { pub fn new(program: &OsStr) -> Command { Command { program: program.to_cstring().unwrap(), args: Vec::new(), env: None, cwd: None, uid: None, gid: None, detach: false, } } pub fn arg(&mut self, arg: &OsStr) { self.args.push(arg.to_cstring().unwrap()) } pub fn args<'a, I: Iterator<Item = &'a OsStr>>(&mut self, args: I) { self.args.extend(args.map(|s| s.to_cstring().unwrap())) } fn init_env_map(&mut self) { if self.env.is_none() { self.env = Some(env::vars_os().collect()); } } pub fn env(&mut self, key: &OsStr, val: &OsStr) { self.init_env_map(); self.env.as_mut().unwrap().insert(key.to_os_string(), val.to_os_string()); } pub fn env_remove(&mut self, key: &OsStr) { self.init_env_map(); self.env.as_mut().unwrap().remove(&key.to_os_string()); } pub fn env_clear(&mut self) { self.env = Some(HashMap::new()) } pub fn cwd(&mut self, dir: &OsStr) { self.cwd = Some(dir.to_cstring().unwrap()) } } //////////////////////////////////////////////////////////////////////////////// // Processes //////////////////////////////////////////////////////////////////////////////// /// Unix exit statuses #[derive(PartialEq, Eq, Clone, Copy, Debug)] pub enum ExitStatus { /// Normal termination with an exit code. Code(i32), /// Termination by signal, with the signal number. /// /// Never generated on Windows. Signal(i32), } impl ExitStatus { pub fn success(&self) -> bool { *self == ExitStatus::Code(0) } pub fn code(&self) -> Option<i32> { match *self { ExitStatus::Code(c) => Some(c), _ => None } } } impl fmt::Display for ExitStatus { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ExitStatus::Code(code) => write!(f, "exit code: {}", code), ExitStatus::Signal(code) => write!(f, "signal: {}", code), } } } /// The unique id of the process (this should never be negative). pub struct Process { pid: pid_t } pub enum Stdio { Inherit, Piped(AnonPipe), None, } const CLOEXEC_MSG_FOOTER: &'static [u8] = b"NOEX"; impl Process { pub unsafe fn kill(&self) -> io::Result<()> { try!(cvt(libc::funcs::posix88::signal::kill(self.pid, libc::SIGKILL))); Ok(()) } pub fn spawn(cfg: &Command, in_fd: Stdio, out_fd: Stdio, err_fd: Stdio) -> io::Result<Process> { let dirp = cfg.cwd.as_ref().map(|c| c.as_ptr()).unwrap_or(ptr::null()); let (envp, _a, _b) = make_envp(cfg.env.as_ref()); let (argv, _a) = make_argv(&cfg.program, &cfg.args); let (input, output) = try!(sys::pipe::anon_pipe()); let pid = unsafe { match libc::fork() { 0 => { drop(input); Process::child_after_fork(cfg, output, argv, envp, dirp, in_fd, out_fd, err_fd) } n if n < 0 => return Err(Error::last_os_error()), n => n, } }; let p = Process{ pid: pid }; drop(output); let mut bytes = [0; 8]; // loop to handle EINTR loop { match input.read(&mut bytes) { Ok(0) => return Ok(p), Ok(8) => { assert!(combine(CLOEXEC_MSG_FOOTER) == combine(&bytes[4.. 8]), "Validation on the CLOEXEC pipe failed: {:?}", bytes); let errno = combine(&bytes[0.. 4]); assert!(p.wait().is_ok(), "wait() should either return Ok or panic"); return Err(Error::from_raw_os_error(errno)) } Err(ref e) if e.kind() == ErrorKind::Interrupted => {} Err(e) => { assert!(p.wait().is_ok(), "wait() should either return Ok or panic"); panic!("the CLOEXEC pipe failed: {:?}", e) }, Ok(..) => { // pipe I/O up to PIPE_BUF bytes should be atomic assert!(p.wait().is_ok(), "wait() should either return Ok or panic"); panic!("short read on the CLOEXEC pipe") } } } fn combine(arr: &[u8]) -> i32 { let a = arr[0] as u32; let b = arr[1] as u32; let c = arr[2] as u32; let d = arr[3] as u32; ((a << 24) | (b << 16) | (c << 8) | (d << 0)) as i32 } } // And at this point we've reached a special time in the life of the // child. The child must now be considered hamstrung and unable to // do anything other than syscalls really. Consider the following // scenario: // // 1. Thread A of process 1 grabs the malloc() mutex // 2. Thread B of process 1 forks(), creating thread C // 3. Thread C of process 2 then attempts to malloc() // 4. The memory of process 2 is the same as the memory of // process 1, so the mutex is locked. // // This situation looks a lot like deadlock, right? It turns out // that this is what pthread_atfork() takes care of, which is // presumably implemented across platforms. The first thing that // threads to *before* forking is to do things like grab the malloc // mutex, and then after the fork they unlock it. // // Despite this information, libnative's spawn has been witnessed to // deadlock on both OSX and FreeBSD. I'm not entirely sure why, but // all collected backtraces point at malloc/free traffic in the // child spawned process. // // For this reason, the block of code below should contain 0 // invocations of either malloc of free (or their related friends). // // As an example of not having malloc/free traffic, we don't close // this file descriptor by dropping the FileDesc (which contains an // allocation). Instead we just close it manually. This will never // have the drop glue anyway because this code never returns (the // child will either exec() or invoke libc::exit) unsafe fn child_after_fork(cfg: &Command, mut output: AnonPipe, argv: *const *const libc::c_char, envp: *const libc::c_void, dirp: *const libc::c_char, in_fd: Stdio, out_fd: Stdio, err_fd: Stdio) -> ! { fn fail(output: &mut AnonPipe) -> ! { let errno = sys::os::errno() as u32; let bytes = [ (errno >> 24) as u8, (errno >> 16) as u8, (errno >> 8) as u8, (errno >> 0) as u8, CLOEXEC_MSG_FOOTER[0], CLOEXEC_MSG_FOOTER[1], CLOEXEC_MSG_FOOTER[2], CLOEXEC_MSG_FOOTER[3] ]; // pipe I/O up to PIPE_BUF bytes should be atomic, and then we want // to be sure we *don't* run at_exit destructors as we're being torn // down regardless assert!(output.write(&bytes).is_ok()); unsafe { libc::_exit(1) } } let setup = |src: Stdio, dst: c_int| { let fd = match src { Stdio::Inherit => return true, Stdio::Piped(pipe) => pipe.into_fd(), // If a stdio file descriptor is set to be ignored, we open up // /dev/null into that file descriptor. Otherwise, the first // file descriptor opened up in the child would be numbered as // one of the stdio file descriptors, which is likely to wreak // havoc. Stdio::None => { let mut opts = OpenOptions::new(); opts.read(dst == libc::STDIN_FILENO); opts.write(dst != libc::STDIN_FILENO); let devnull = CStr::from_ptr(b"/dev/null\0".as_ptr() as *const _); if let Ok(f) = File::open_c(devnull, &opts) { f.into_fd() } else { return false } } }; cvt_r(|| libc::dup2(fd.raw(), dst)).is_ok() }; if !setup(in_fd, libc::STDIN_FILENO) { fail(&mut output) } if !setup(out_fd, libc::STDOUT_FILENO) { fail(&mut output) } if !setup(err_fd, libc::STDERR_FILENO) { fail(&mut output) } if let Some(u) = cfg.gid { if libc::setgid(u as libc::gid_t) != 0 { fail(&mut output); } } if let Some(u) = cfg.uid { // When dropping privileges from root, the `setgroups` call // will remove any extraneous groups. If we don't call this, // then even though our uid has dropped, we may still have // groups that enable us to do super-user things. This will // fail if we aren't root, so don't bother checking the // return value, this is just done as an optimistic // privilege dropping function. let _ = c::setgroups(0, ptr::null()); if libc::setuid(u as libc::uid_t) != 0 { fail(&mut output); } } if cfg.detach { // Don't check the error of setsid because it fails if we're the // process leader already. We just forked so it shouldn't return // error, but ignore it anyway. let _ = libc::setsid(); } if !dirp.is_null() && libc::chdir(dirp) == -1 { fail(&mut output); } if !envp.is_null() { *sys::os::environ() = envp as *const _; } let _ = libc::execvp(*argv, argv as *mut _); fail(&mut output) } pub fn id(&self) -> u32 { self.pid as u32 } pub fn wait(&self) -> io::Result<ExitStatus> { let mut status = 0 as c_int; try!(cvt_r(|| unsafe { c::waitpid(self.pid, &mut status, 0) })); Ok(translate_status(status)) } pub fn try_wait(&self) -> Option<ExitStatus> { let mut status = 0 as c_int; match cvt_r(|| unsafe { c::waitpid(self.pid, &mut status, c::WNOHANG) }) { Ok(0) => None, Ok(n) if n == self.pid => Some(translate_status(status)), Ok(n) => panic!("unknown pid: {}", n), Err(e) => panic!("unknown waitpid error: {}", e), } } } fn make_argv(prog: &CString, args: &[CString]) -> (*const *const libc::c_char, Vec<*const libc::c_char>) { let mut ptrs: Vec<*const libc::c_char> = Vec::with_capacity(args.len()+1); // Convert the CStrings into an array of pointers. Note: the // lifetime of the various CStrings involved is guaranteed to be // larger than the lifetime of our invocation of cb, but this is // technically unsafe as the callback could leak these pointers // out of our scope. ptrs.push(prog.as_ptr()); ptrs.extend(args.iter().map(|tmp| tmp.as_ptr())); // Add a terminating null pointer (required by libc). ptrs.push(ptr::null()); (ptrs.as_ptr(), ptrs) } fn make_envp(env: Option<&HashMap<OsString, OsString>>) -> (*const c_void, Vec<Vec<u8>>, Vec<*const libc::c_char>) { // On posixy systems we can pass a char** for envp, which is a // null-terminated array of "k=v\0" strings. Since we must create // these strings locally, yet expose a raw pointer to them, we // create a temporary vector to own the CStrings that outlives the // call to cb. if let Some(env) = env { let mut tmps = Vec::with_capacity(env.len()); for pair in env { let mut kv = Vec::new(); kv.push_all(pair.0.as_bytes()); kv.push('=' as u8); kv.push_all(pair.1.as_bytes()); kv.push(0); // terminating null tmps.push(kv); } let mut ptrs: Vec<*const libc::c_char> = tmps.iter() .map(|tmp| tmp.as_ptr() as *const libc::c_char) .collect(); ptrs.push(ptr::null()); (ptrs.as_ptr() as *const _, tmps, ptrs) } else { (0 as *const _, Vec::new(), Vec::new()) } } fn translate_status(status: c_int) -> ExitStatus { #![allow(non_snake_case)] #[cfg(any(target_os = "linux", target_os = "android"))] mod imp { pub fn WIFEXITED(status: i32) -> bool { (status & 0xff) == 0 } pub fn WEXITSTATUS(status: i32) -> i32 { (status >> 8) & 0xff } pub fn WTERMSIG(status: i32) -> i32 { status & 0x7f } } #[cfg(any(target_os = "macos", target_os = "ios", target_os = "freebsd", target_os = "dragonfly", target_os = "bitrig", target_os = "openbsd"))] mod imp { pub fn WIFEXITED(status: i32) -> bool { (status & 0x7f) == 0 } pub fn WEXITSTATUS(status: i32) -> i32 { status >> 8 } pub fn WTERMSIG(status: i32) -> i32 { status & 0o177 } } if imp::WIFEXITED(status) { ExitStatus::Code(imp::WEXITSTATUS(status)) } else { ExitStatus::Signal(imp::WTERMSIG(status)) } }
35.854415
82
0.514411
eda7e783011fd71a3ac05ae36cd74ec936397c52
2,970
use { super::BlendContext, crate::data::{Row, Value}, std::{fmt::Debug, rc::Rc}, }; #[derive(Debug)] enum Content<'a> { Some { table_alias: &'a str, columns: Rc<[String]>, row: Option<&'a Row>, }, None, } #[derive(Debug)] pub struct FilterContext<'a> { content: Content<'a>, next: Option<Rc<FilterContext<'a>>>, next2: Option<Rc<BlendContext<'a>>>, } impl<'a> FilterContext<'a> { pub fn new( table_alias: &'a str, columns: Rc<[String]>, row: Option<&'a Row>, next: Option<Rc<FilterContext<'a>>>, ) -> Self { Self { content: Content::Some { table_alias, columns, row, }, next, next2: None, } } pub fn concat( filter_context: Option<Rc<FilterContext<'a>>>, blend_context: Option<Rc<BlendContext<'a>>>, ) -> Self { Self { content: Content::None, next: filter_context, next2: blend_context, } } pub fn get_value(&'a self, target: &str) -> Option<&'a Value> { if let Content::Some { columns, row, .. } = &self.content { let value = columns .iter() .position(|column| column == target) .map(|index| row.and_then(|row| row.get_value(index))); if let Some(value) = value { return value; } } match (&self.next, &self.next2) { (None, None) => None, (Some(fc), None) => fc.get_value(target), (None, Some(bc)) => bc.get_value(target), (Some(fc), Some(bc)) => match bc.get_value(target) { v @ Some(_) => v, None => fc.get_value(target), }, } } pub fn get_alias_value(&'a self, target_alias: &str, target: &str) -> Option<&'a Value> { if let Content::Some { table_alias, columns, row, } = &self.content { let get_value = || { if table_alias != &target_alias { return None; } columns .iter() .position(|column| column == target) .map(|index| row.and_then(|row| row.get_value(index))) }; if let Some(value) = get_value() { return value; } } match (&self.next, &self.next2) { (None, None) => None, (Some(fc), None) => fc.get_alias_value(target_alias, target), (None, Some(bc)) => bc.get_alias_value(target_alias, target), (Some(fc), Some(bc)) => match bc.get_alias_value(target_alias, target) { v @ Some(_) => v, None => fc.get_alias_value(target_alias, target), }, } } }
27
93
0.456229
e6a2f70bcefa41a403e2e5f2704b3f8ca8632403
4,879
pub trait Log { fn log(&self, window_title: String, command_line: String); fn get_last_entry(&self) -> (String, String); } pub fn log(window_title: String, command_line: String) { let logger = unsafe { &*LOGGER }; logger.log(window_title, command_line); } pub fn get_last_entry() -> (String, String) { let logger = unsafe { &*LOGGER }; logger.get_last_entry() } pub fn set_logger<M>(make_logger: M) where M: FnOnce() -> Box<Log>, { unsafe { LOGGER = mem::transmute(make_logger()); } } static mut LOGGER: *const Log = &NopLogger; struct NopLogger; impl Log for NopLogger { fn log(&self, _: String, _: String) {} fn get_last_entry(&self) -> (String, String) { ("".to_string(), "".to_string()) } } //////////////////////////////////////////////////////////////////////////////////////// extern crate winapi; use self::winapi::{um::minwinbase, um::winnt}; use super::win32helper; use std::fs::File; use std::mem; struct Entry { timestamp: minwinbase::SYSTEMTIME, duration_in_seconds: u32, window_title: String, command_line: String, } pub struct Logger { file: File, interval_in_seconds: u32, max_entries_before_flush: u32, count: u32, last_entry: Entry, entries: Vec<Entry>, } impl Log for Logger { fn log(&self, window_title: String, command_line: String) { unsafe { let logger: &mut Logger = mem::transmute(self as *const Logger); logger.add_entry(window_title, command_line); } } fn get_last_entry(&self) -> (String, String) { unsafe { let logger: &mut Logger = mem::transmute(self as *const Logger); logger.get_last_entry() } } } impl Logger { pub fn new(interval_in_seconds: u32, flush_interval_in_minutes: u32) -> Logger { use std::env; use std::fs::OpenOptions; use std::os::windows::fs::OpenOptionsExt; let file_name = env::var("LOCALAPPDATA").unwrap() + "\\record-usage.csv"; let last_entry = Entry { timestamp: win32helper::get_local_time(), duration_in_seconds: 0, window_title: String::new(), command_line: String::new(), }; let max_entries_before_flush = flush_interval_in_minutes * 60 / interval_in_seconds; Logger { file: OpenOptions::new() .append(true) .create(true) .share_mode(winnt::FILE_SHARE_READ) .open(file_name) .unwrap(), interval_in_seconds: interval_in_seconds, max_entries_before_flush: max_entries_before_flush, count: 0, last_entry: last_entry, entries: Vec::<Entry>::with_capacity(max_entries_before_flush as usize), } } pub fn add_entry(&mut self, window_title: String, command_line: String) { self.count += 1; if self.count >= self.max_entries_before_flush { self.flush(); } let entry = Entry { timestamp: win32helper::get_local_time(), duration_in_seconds: self.interval_in_seconds, window_title: window_title.to_owned(), command_line: command_line.to_owned(), }; if self.last_entry.duration_in_seconds == 0 { self.last_entry = entry; return; } if self.last_entry.window_title == window_title && self.last_entry.command_line == command_line { self.last_entry.duration_in_seconds += self.interval_in_seconds; return; } self.entries.push(mem::replace(&mut self.last_entry, entry)); } pub fn get_last_entry(&self) -> (String, String) { ( self.last_entry.window_title.clone(), self.last_entry.command_line.clone(), ) } fn flush(&mut self) { use std::io::Write; let entry = Entry { timestamp: win32helper::get_local_time(), duration_in_seconds: 0, window_title: String::new(), command_line: String::new(), }; self.entries.push(mem::replace(&mut self.last_entry, entry)); for entry in &self.entries { let now = entry.timestamp; writeln!( self.file, "{}-{}-{} {}:{}:{}, {}, {}, {}", now.wYear, now.wMonth, now.wDay, now.wHour, now.wMinute, now.wSecond, entry.duration_in_seconds, entry.command_line, entry.window_title ).unwrap(); } self.entries.clear(); self.count = 0; } } impl Drop for Logger { fn drop(&mut self) { self.flush(); } }
27.410112
92
0.553392
239161e3dc811dc0d57047c62f95a194047c4a52
765
use bitcoin_hashes::hex::ToHex; use bitcoin_hashes::{sha256, sha256d, Hash}; use learn_blockchain::conv; // NOTE: bitcoin_hashes's fmt, to_hex are all little-endian #[test] fn test_sha256() { assert_eq!( sha256::Hash::hash(b"hello world").to_hex(), "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9" ); assert_eq!( sha256::Hash::hash(sha256::Hash::hash(b"hello world").as_inner()).to_hex(), "bc62d4b80d9e36da29c16c5d4d9f11731f36052c72401a76c23c0fb5a9b74423" ); // twice hash, made the endian different? assert_eq!( sha256d::Hash::hash(b"hello world").to_hex(), "2344b7a9b50f3cc2761a40722c05361f73119f4d5d6cc129da369e0db8d462bc" ); } // hash160(s) === ripemd160(sha256(s))
29.423077
83
0.699346
ac0c14b7319d614f5b370d59425b6aa518fe7b70
1,308
/* * ORY Oathkeeper * * ORY Oathkeeper is a reverse proxy that checks the HTTP Authorization for validity against a set of rules. This service uses Hydra to validate access tokens and policies. * * The version of the OpenAPI document: v0.38.14-beta.1 * Contact: [email protected] * Generated by: https://openapi-generator.tech */ #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Upstream { /// PreserveHost, if false (the default), tells ORY Oathkeeper to set the upstream request's Host header to the hostname of the API's upstream's URL. Setting this flag to true instructs ORY Oathkeeper not to do so. #[serde(rename = "preserve_host", skip_serializing_if = "Option::is_none")] pub preserve_host: Option<bool>, /// StripPath if set, replaces the provided path prefix when forwarding the requested URL to the upstream URL. #[serde(rename = "strip_path", skip_serializing_if = "Option::is_none")] pub strip_path: Option<String>, /// URL is the URL the request will be proxied to. #[serde(rename = "url", skip_serializing_if = "Option::is_none")] pub url: Option<String>, } impl Upstream { pub fn new() -> Upstream { Upstream { preserve_host: None, strip_path: None, url: None, } } }
34.421053
218
0.684251
677c4845cbe41cbdb63f24ee717d1fe441da0d50
4,966
use super::obstacle_template::ObstacleTemplate; use crate::common::position::generic::Position; use crate::h3m::result::*; use crate::h3m::terrain_map::{MapCell, TerrainMap}; pub use obstacle_map_area::*; use obstacle_map_cell::ObstacleMapCell; use rand::{rngs::ThreadRng, Rng}; use sparsity_validator::SparsityValidator; mod areas_layout; mod obstacle_map_area; mod obstacle_map_cell; mod sparsity_validator; impl ObstacleMapCell { fn from_map_cell( index: usize, size: usize, map_cell: &Option<MapCell>, ) -> H3mResult<ObstacleMapCell> { let row = index / size; let column = index % size; Ok(ObstacleMapCell::new( row.try_into()?, column.try_into()?, *map_cell, )) } } pub struct ObstacleMap { size: usize, cells: Vec<ObstacleMapCell>, sparsity_penalty: usize, sparsity_validator: SparsityValidator, } impl ObstacleMap { pub fn new(terrain_map: &TerrainMap) -> H3mResult<ObstacleMap> { let size = terrain_map.size(); let map_len = size * size; let cells_len = terrain_map.cells().len(); if cells_len != map_len { return Err(H3mError::Internal(InternalError::new(format!( "terrain map cells length ({}) not equal squared map size ({}).", cells_len, map_len )))); } let cells = { let mut cells = Vec::new(); for (index, map_cell) in terrain_map.cells().iter().enumerate() { cells.push(ObstacleMapCell::from_map_cell(index, size, map_cell)?); } cells }; Ok(ObstacleMap { size, cells, sparsity_penalty: 0, sparsity_validator: SparsityValidator::new(size), }) } pub fn set_sparsity_penalty(&mut self, sparsity_penalty: usize) { self.sparsity_penalty = sparsity_penalty; } pub fn try_position_obstacle( &self, area: &ObstacleMapArea, template_index: usize, obstacle: &ObstacleTemplate, rng: &mut ThreadRng, ) -> Option<usize> { let apply_sparsity_penalty = |sparsity| { if sparsity >= self.sparsity_penalty { sparsity - self.sparsity_penalty } else { 0 } }; let sparsity = rng.gen_range( apply_sparsity_penalty(obstacle.sparsity().min()) ..=apply_sparsity_penalty(obstacle.sparsity().max()), ); let is_valid_delta = |delta_position: Option<Position<usize>>| { if let Some(delta_position) = delta_position { let delta_position_index = delta_position.index(self.size); let delta_cell = &self.cells[delta_position_index]; obstacle.is_valid_terrain(delta_cell.terrain_group()) && obstacle.is_valid_tile(delta_cell.map_cell().unwrap().tile()) && self.sparsity_validator.verify_position( template_index, sparsity, delta_position, ) } else { false } }; let is_valid_index = |index| { let position = Position::from_index(self.size, index); for delta in obstacle.shape() { if !is_valid_delta(position.checked_sub(delta)) { return false; } } true }; for &index in area.indexes().iter().rev() { if is_valid_index(index) { return Some(index); } } None } pub fn add_obstacle( &mut self, position_index: usize, template_index: usize, obstacle: &ObstacleTemplate, ) { let position = Position::from_index(self.size, position_index); for delta in obstacle.shape() { let delta_position = position.sub(delta); let delta_position_index = delta_position.index(self.size); self.cells[delta_position_index].set_template(template_index); self.sparsity_validator.add_position( template_index, obstacle.sparsity().max(), delta_position, ); } } pub fn position(&self, index: usize) -> Position<u8> { self.cells[index].position() } pub fn generalized_terrain_group(&self) -> u16 { self.cells .iter() .fold(0, |result, cell| result | cell.terrain_group()) } pub fn first_position_to_place_obstacle(&self) -> Option<Position<usize>> { for (index, cell) in self.cells.iter().enumerate() { if cell.terrain_group() != 0 { return Some(Position::from_index(self.size, index)); } } None } }
30.09697
84
0.554571
629a87e7fe9e61288ffd6cf34aba046ab25edcc0
22,615
#![cfg_attr(not(feature = "std"), no_std)] // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. #![recursion_limit="256"] // Make the WASM binary available. #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); use sp_std::prelude::*; use sp_core::{crypto::KeyTypeId, OpaqueMetadata, U256, H160, H256}; use sp_runtime::{ ApplyExtrinsicResult, generic, create_runtime_str, impl_opaque_keys, MultiSignature, transaction_validity::{TransactionValidity, TransactionSource}, }; use sp_runtime::traits::{ BlakeTwo256, Block as BlockT, IdentityLookup, Verify, IdentifyAccount, NumberFor, Saturating, }; use sp_api::impl_runtime_apis; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use pallet_grandpa::{AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList}; use pallet_grandpa::fg_primitives; use sp_version::RuntimeVersion; #[cfg(feature = "std")] use sp_version::NativeVersion; use pallet_evm::{ EnsureAddressTruncated, HashedAddressMapping,Account as EVMAccount, FeeCalculator, }; use frontier_rpc_primitives::TransactionStatus; use codec::{Encode, Decode}; use pallet_contracts_rpc_runtime_api::ContractExecResult; // A few exports that help ease life for downstream crates. #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; pub use pallet_timestamp::Call as TimestampCall; pub use pallet_balances::Call as BalancesCall; pub use sp_runtime::{Permill, Perbill}; pub use frame_support::{ construct_runtime, parameter_types, StorageValue, traits::{KeyOwnerProofSystem, Randomness}, weights::{ Weight, IdentityFee, constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, }, }; /// Import the template pallet. pub use pallet_bussines; /// An index to a block. pub type BlockNumber = u32; /// Alias to 512-bit hash when used in the context of a transaction signature on the chain. pub type Signature = MultiSignature; /// Some way of identifying an account on the chain. We intentionally make it equivalent /// to the public key of our transaction signing scheme. pub type AccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId; /// The type for looking up accounts. We don't expect more than 4 billion of them, but you /// never know... pub type AccountIndex = u32; /// Balance of an account. pub type Balance = u128; /// Index of a transaction in the chain. pub type Index = u32; /// A hash of some data used by the chain. pub type Hash = sp_core::H256; /// Digest item type. pub type DigestItem = generic::DigestItem<Hash>; /// Opaque types. These are used by the CLI to instantiate machinery that don't need to know /// the specifics of the runtime. They can then be made to be agnostic over specific formats /// of data like extrinsics, allowing for them to continue syncing the network through upgrades /// to even the core data structures. pub mod opaque { use super::*; pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic; /// Opaque block header type. pub type Header = generic::Header<BlockNumber, BlakeTwo256>; /// Opaque block type. pub type Block = generic::Block<Header, UncheckedExtrinsic>; /// Opaque block identifier type. pub type BlockId = generic::BlockId<Block>; impl_opaque_keys! { pub struct SessionKeys { pub aura: Aura, pub grandpa: Grandpa, } } } pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("node-template"), impl_name: create_runtime_str!("node-template"), authoring_version: 1, spec_version: 1, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, }; pub const MILLISECS_PER_BLOCK: u64 = 6000; pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; // Time is measured by number of blocks. pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); pub const HOURS: BlockNumber = MINUTES * 60; pub const DAYS: BlockNumber = HOURS * 24; pub const MILLICENTS: Balance = 1_000_000_000; pub const CENTS: Balance = 1_000 * MILLICENTS; pub const DOLLARS: Balance = 100 * CENTS; /*** Add This Block ***/ parameter_types! { pub const TombstoneDeposit: Balance = 16 * MILLICENTS; pub const RentByteFee: Balance = 4 * MILLICENTS; pub const RentDepositOffset: Balance = 1000 * MILLICENTS; pub const SurchargeReward: Balance = 150 * MILLICENTS; } impl pallet_contracts::Trait for Runtime { type Time = Timestamp; type Randomness = RandomnessCollectiveFlip; type Currency = Balances; type Event = Event; type DetermineContractAddress = pallet_contracts::SimpleAddressDeterminer<Runtime>; type TrieIdGenerator = pallet_contracts::TrieIdFromParentCounter<Runtime>; type RentPayment = (); type SignedClaimHandicap = pallet_contracts::DefaultSignedClaimHandicap; type TombstoneDeposit = TombstoneDeposit; type StorageSizeOffset = pallet_contracts::DefaultStorageSizeOffset; type RentByteFee = RentByteFee; type RentDepositOffset = RentDepositOffset; type SurchargeReward = SurchargeReward; type MaxDepth = pallet_contracts::DefaultMaxDepth; type MaxValueSize = pallet_contracts::DefaultMaxValueSize; type WeightPrice = pallet_transaction_payment::Module<Self>; } /// The version information used to identify this runtime when compiled natively. #[cfg(feature = "std")] pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default(), } } parameter_types! { pub const BlockHashCount: BlockNumber = 2400; /// We allow for 2 seconds of compute with a 6 second average block time. pub const MaximumBlockWeight: Weight = 2 * WEIGHT_PER_SECOND; pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); /// Assume 10% of weight for average on_initialize calls. pub MaximumExtrinsicWeight: Weight = AvailableBlockRatio::get() .saturating_sub(Perbill::from_percent(10)) * MaximumBlockWeight::get(); pub const MaximumBlockLength: u32 = 5 * 1024 * 1024; pub const Version: RuntimeVersion = VERSION; } // Configure FRAME pallets to include in runtime. impl frame_system::Trait for Runtime { /// The basic call filter to use in dispatchable. type BaseCallFilter = (); /// The identifier used to distinguish between accounts. type AccountId = AccountId; /// The aggregated dispatch type that is available for extrinsics. type Call = Call; /// The lookup mechanism to get account ID from whatever is passed in dispatchers. type Lookup = IdentityLookup<AccountId>; /// The index type for storing how many extrinsics an account has signed. type Index = Index; /// The index type for blocks. type BlockNumber = BlockNumber; /// The type for hashing blocks and tries. type Hash = Hash; /// The hashing algorithm used. type Hashing = BlakeTwo256; /// The header type. type Header = generic::Header<BlockNumber, BlakeTwo256>; /// The ubiquitous event type. type Event = Event; /// The ubiquitous origin type. type Origin = Origin; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). type BlockHashCount = BlockHashCount; /// Maximum weight of each block. type MaximumBlockWeight = MaximumBlockWeight; /// The weight of database operations that the runtime can invoke. type DbWeight = RocksDbWeight; /// The weight of the overhead invoked on the block import process, independent of the /// extrinsics included in that block. type BlockExecutionWeight = BlockExecutionWeight; /// The base weight of any extrinsic processed by the runtime, independent of the /// logic of that extrinsic. (Signature verification, nonce increment, fee, etc...) type ExtrinsicBaseWeight = ExtrinsicBaseWeight; /// The maximum weight that a single extrinsic of `Normal` dispatch class can have, /// idependent of the logic of that extrinsics. (Roughly max block weight - average on /// initialize cost). type MaximumExtrinsicWeight = MaximumExtrinsicWeight; /// Maximum size of all encoded transactions (in bytes) that are allowed in one block. type MaximumBlockLength = MaximumBlockLength; /// Portion of the block weight that is available to all normal transactions. type AvailableBlockRatio = AvailableBlockRatio; /// Version of the runtime. type Version = Version; /// Converts a module to the index of the module in `construct_runtime!`. /// /// This type is being generated by `construct_runtime!`. type PalletInfo = PalletInfo; /// What to do if a new account is created. type OnNewAccount = (); /// What to do if an account is fully reaped from the system. type OnKilledAccount = (); /// The data to be stored in an account. type AccountData = pallet_balances::AccountData<Balance>; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = (); } impl pallet_aura::Trait for Runtime { type AuthorityId = AuraId; } impl pallet_grandpa::Trait for Runtime { type Event = Event; type Call = Call; type KeyOwnerProofSystem = (); type KeyOwnerProof = <Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, GrandpaId)>>::Proof; type KeyOwnerIdentification = <Self::KeyOwnerProofSystem as KeyOwnerProofSystem<( KeyTypeId, GrandpaId, )>>::IdentificationTuple; type HandleEquivocation = (); type WeightInfo = (); } //EVM parameter_types! { pub const LeetChainId: u64 = 1337; } impl pallet_evm::Trait for Runtime { type FeeCalculator = (); type CallOrigin = EnsureAddressTruncated; type WithdrawOrigin = EnsureAddressTruncated; type AddressMapping = HashedAddressMapping<BlakeTwo256>; type Currency = Balances; type Event = Event; type Precompiles = (); type ChainId = LeetChainId; } parameter_types! { pub const MinimumPeriod: u64 = SLOT_DURATION / 2; } impl pallet_timestamp::Trait for Runtime { /// A timestamp: milliseconds since the unix epoch. type Moment = u64; type OnTimestampSet = Aura; type MinimumPeriod = MinimumPeriod; type WeightInfo = (); } parameter_types! { pub const ExistentialDeposit: u128 = 500; pub const MaxLocks: u32 = 50; } impl pallet_balances::Trait for Runtime { type MaxLocks = MaxLocks; /// The type for recording an account's balance. type Balance = Balance; /// The ubiquitous event type. type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); } parameter_types! { pub const TransactionByteFee: Balance = 1; } impl pallet_transaction_payment::Trait for Runtime { type Currency = Balances; type OnTransactionPayment = (); type TransactionByteFee = TransactionByteFee; type WeightToFee = IdentityFee<Balance>; type FeeMultiplierUpdate = (); } impl pallet_sudo::Trait for Runtime { type Event = Event; type Call = Call; } impl pallet_ethereum::Trait for Runtime { type Event = Event; // This means we will never record a block author in the Ethereum-formatted blocks type FindAuthor = (); } /// Configure the template pallet in pallets/template. impl pallet_bussines::Trait for Runtime { type Event = Event; } // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( pub enum Runtime where Block = Block, NodeBlock = opaque::Block, UncheckedExtrinsic = UncheckedExtrinsic { System: frame_system::{Module, Call, Config, Storage, Event<T>}, RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Module, Call, Storage}, Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, Aura: pallet_aura::{Module, Config<T>, Inherent}, Grandpa: pallet_grandpa::{Module, Call, Storage, Config, Event}, Balances: pallet_balances::{Module, Call, Storage, Config<T>, Event<T>}, TransactionPayment: pallet_transaction_payment::{Module, Storage}, Sudo: pallet_sudo::{Module, Call, Config<T>, Storage, Event<T>}, // Include the custom logic from the template pallet in the runtime. AegisBussines: pallet_bussines::{Module, Call, Storage, Event<T>}, Contracts: pallet_contracts::{Module, Call, Config, Storage, Event<T>}, EVM: pallet_evm::{Module, Call, Storage, Config, Event<T>}, Ethereum: pallet_ethereum::{Module, Call, Storage, Event, Config, ValidateUnsigned}, } ); pub struct TransactionConverter; impl frontier_rpc_primitives::ConvertTransaction<UncheckedExtrinsic> for TransactionConverter { fn convert_transaction(&self, transaction: pallet_ethereum::Transaction) -> UncheckedExtrinsic { UncheckedExtrinsic::new_unsigned(pallet_ethereum::Call::<Runtime>::transact(transaction).into()) } } impl frontier_rpc_primitives::ConvertTransaction<opaque::UncheckedExtrinsic> for TransactionConverter { fn convert_transaction(&self, transaction: pallet_ethereum::Transaction) -> opaque::UncheckedExtrinsic { let extrinsic = UncheckedExtrinsic::new_unsigned(pallet_ethereum::Call::<Runtime>::transact(transaction).into()); let encoded = extrinsic.encode(); opaque::UncheckedExtrinsic::decode(&mut &encoded[..]).expect("Encoded extrinsic is always valid") } } /// The address format for describing accounts. pub type Address = AccountId; /// Block header type as expected by this runtime. pub type Header = generic::Header<BlockNumber, BlakeTwo256>; /// Block type as expected by this runtime. pub type Block = generic::Block<Header, UncheckedExtrinsic>; /// A Block signed with a Justification pub type SignedBlock = generic::SignedBlock<Block>; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId<Block>; /// The SignedExtension to the basic transaction logic. pub type SignedExtra = ( frame_system::CheckSpecVersion<Runtime>, frame_system::CheckTxVersion<Runtime>, frame_system::CheckGenesis<Runtime>, frame_system::CheckEra<Runtime>, frame_system::CheckNonce<Runtime>, frame_system::CheckWeight<Runtime>, pallet_transaction_payment::ChargeTransactionPayment<Runtime> ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic<Address, Call, Signature, SignedExtra>; /// Extrinsic type that has already been checked. pub type CheckedExtrinsic = generic::CheckedExtrinsic<AccountId, Call, SignedExtra>; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, Block, frame_system::ChainContext<Runtime>, Runtime, AllModules, >; impl_runtime_apis! { impl frontier_rpc_primitives::EthereumRuntimeRPCApi<Block> for Runtime { fn chain_id() -> u64 { <Runtime as pallet_evm::Trait>::ChainId::get() } fn account_basic(address: H160) -> EVMAccount { EVM::account_basic(&address) } fn gas_price() -> U256 { <Runtime as pallet_evm::Trait>::FeeCalculator::min_gas_price() } fn account_code_at(address: H160) -> Vec<u8> { EVM::account_codes(address) } fn author() -> H160 { Ethereum::find_author() } fn storage_at(address: H160, index: U256) -> H256 { let mut tmp = [0u8; 32]; index.to_big_endian(&mut tmp); EVM::account_storages(address, H256::from_slice(&tmp[..])) } fn call( from: H160, data: Vec<u8>, value: U256, gas_limit: U256, gas_price: Option<U256>, nonce: Option<U256>, action: pallet_ethereum::TransactionAction, ) -> Result<(Vec<u8>, U256), sp_runtime::DispatchError> { match action { pallet_ethereum::TransactionAction::Call(to) => EVM::execute_call( from, to, data, value, gas_limit.low_u32(), gas_price.unwrap_or_default(), nonce, false, ) .map(|(_, ret, gas, _)| (ret, gas)) .map_err(|err| err.into()), pallet_ethereum::TransactionAction::Create => EVM::execute_create( from, data, value, gas_limit.low_u32(), gas_price.unwrap_or_default(), nonce, false, ) .map(|(_, _, gas, _)| (vec![], gas)) .map_err(|err| err.into()), } } fn current_transaction_statuses() -> Option<Vec<TransactionStatus>> { Ethereum::current_transaction_statuses() } fn current_block() -> Option<pallet_ethereum::Block> { Ethereum::current_block() } fn current_receipts() -> Option<Vec<pallet_ethereum::Receipt>> { Ethereum::current_receipts() } fn current_all() -> ( Option<pallet_ethereum::Block>, Option<Vec<pallet_ethereum::Receipt>>, Option<Vec<TransactionStatus>> ) { ( Ethereum::current_block(), Ethereum::current_receipts(), Ethereum::current_transaction_statuses() ) } } impl sp_api::Core<Block> for Runtime { fn version() -> RuntimeVersion { VERSION } fn execute_block(block: Block) { Executive::execute_block(block) } fn initialize_block(header: &<Block as BlockT>::Header) { Executive::initialize_block(header) } } impl pallet_contracts_rpc_runtime_api::ContractsApi<Block, AccountId, Balance, BlockNumber> for Runtime { fn call( origin: AccountId, dest: AccountId, value: Balance, gas_limit: u64, input_data: Vec<u8>, ) -> ContractExecResult { let (exec_result, gas_consumed) = Contracts::bare_call(origin, dest.into(), value, gas_limit, input_data); match exec_result { Ok(v) => ContractExecResult::Success { flags: v.flags.bits(), data: v.data, gas_consumed: gas_consumed, }, Err(_) => ContractExecResult::Error, } } fn get_storage( address: AccountId, key: [u8; 32], ) -> pallet_contracts_primitives::GetStorageResult { Contracts::get_storage(address, key) } fn rent_projection( address: AccountId, ) -> pallet_contracts_primitives::RentProjectionResult<BlockNumber> { Contracts::rent_projection(address) } } impl sp_api::Metadata<Block> for Runtime { fn metadata() -> OpaqueMetadata { Runtime::metadata().into() } } impl sp_block_builder::BlockBuilder<Block> for Runtime { fn apply_extrinsic(extrinsic: <Block as BlockT>::Extrinsic) -> ApplyExtrinsicResult { Executive::apply_extrinsic(extrinsic) } fn finalize_block() -> <Block as BlockT>::Header { Executive::finalize_block() } fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<<Block as BlockT>::Extrinsic> { data.create_extrinsics() } fn check_inherents( block: Block, data: sp_inherents::InherentData, ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } fn random_seed() -> <Block as BlockT>::Hash { RandomnessCollectiveFlip::random_seed() } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block> for Runtime { fn validate_transaction( source: TransactionSource, tx: <Block as BlockT>::Extrinsic, ) -> TransactionValidity { Executive::validate_transaction(source, tx) } } impl sp_offchain::OffchainWorkerApi<Block> for Runtime { fn offchain_worker(header: &<Block as BlockT>::Header) { Executive::offchain_worker(header) } } impl sp_consensus_aura::AuraApi<Block, AuraId> for Runtime { fn slot_duration() -> u64 { Aura::slot_duration() } fn authorities() -> Vec<AuraId> { Aura::authorities() } } impl sp_session::SessionKeys<Block> for Runtime { fn generate_session_keys(seed: Option<Vec<u8>>) -> Vec<u8> { opaque::SessionKeys::generate(seed) } fn decode_session_keys( encoded: Vec<u8>, ) -> Option<Vec<(Vec<u8>, KeyTypeId)>> { opaque::SessionKeys::decode_into_raw_public_keys(&encoded) } } impl fg_primitives::GrandpaApi<Block> for Runtime { fn grandpa_authorities() -> GrandpaAuthorityList { Grandpa::grandpa_authorities() } fn submit_report_equivocation_unsigned_extrinsic( _equivocation_proof: fg_primitives::EquivocationProof< <Block as BlockT>::Hash, NumberFor<Block>, >, _key_owner_proof: fg_primitives::OpaqueKeyOwnershipProof, ) -> Option<()> { None } fn generate_key_ownership_proof( _set_id: fg_primitives::SetId, _authority_id: GrandpaId, ) -> Option<fg_primitives::OpaqueKeyOwnershipProof> { // NOTE: this is the only implementation possible since we've // defined our key owner proof type as a bottom type (i.e. a type // with no values). None } } impl frame_system_rpc_runtime_api::AccountNonceApi<Block, AccountId, Index> for Runtime { fn account_nonce(account: AccountId) -> Index { System::account_nonce(account) } } impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi<Block, Balance> for Runtime { fn query_info( uxt: <Block as BlockT>::Extrinsic, len: u32, ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo<Balance> { TransactionPayment::query_info(uxt, len) } } #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark<Block> for Runtime { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig ) -> Result<Vec<frame_benchmarking::BenchmarkBatch>, sp_runtime::RuntimeString> { use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; use frame_system_benchmarking::Module as SystemBench; impl frame_system_benchmarking::Trait for Runtime {} let whitelist: Vec<TrackedStorageKey> = vec![ // Block Number hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), // Total Issuance hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), // Execution Phase hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), // Event Count hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), // System Events hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), ]; let mut batches = Vec::<BenchmarkBatch>::new(); let params = (&config, &whitelist); add_benchmark!(params, batches, frame_system, SystemBench::<Runtime>); add_benchmark!(params, batches, pallet_balances, Balances); add_benchmark!(params, batches, pallet_timestamp, Timestamp); if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } Ok(batches) } } }
32.87064
121
0.719699
75337ddbba4a671a22e98c877ce685fba120a0ab
916
use std::{ env, fs::File, io::{ErrorKind, Read}, }; use polyglot_tokenizer::Tokenizer; fn main() { if let Some(file_name) = env::args().skip(1).next() { match File::open(&file_name) { Ok(mut file) => { let mut content = String::new(); match file.read_to_string(&mut content) { Ok(_) => Tokenizer::new(&content[..]).tokens().for_each(|token| { println!("{:?}", token); }), Err(e) => println!("Error reading file: {}", e), } } Err(e) => match e.kind() { ErrorKind::NotFound => { println!("File {} not found", file_name); } _ => println!("Error opening file: {}", e), }, } } else { println!("Filename not provided"); } }
28.625
85
0.415939
1abc3e78ca2d56d9837f801366b8d8a58b14da9b
1,045
/// Named constant for `purse`. pub const ARG_PURSE: &str = "purse"; /// Named constant for `amount`. pub const ARG_AMOUNT: &str = "amount"; /// Named constant for `source`. pub const ARG_ACCOUNT: &str = "account"; /// Named constant for method `get_payment_purse`. pub const METHOD_GET_PAYMENT_PURSE: &str = "get_payment_purse"; /// Named constant for method `set_refund_purse`. pub const METHOD_SET_REFUND_PURSE: &str = "set_refund_purse"; /// Named constant for method `get_refund_purse`. pub const METHOD_GET_REFUND_PURSE: &str = "get_refund_purse"; /// Named constant for method `finalize_payment`. pub const METHOD_FINALIZE_PAYMENT: &str = "finalize_payment"; /// Storage for proof of stake payment purse. pub const POS_PAYMENT_PURSE: &str = "pos_payment_purse"; /// Storage for proof of stake rewards purse. pub const POS_REWARDS_PURSE: &str = "pos_rewards_purse"; /// Storage for proof of stake contract hash. pub const HASH_KEY: &str = "pos_hash"; /// Storage for proof of stake access key. pub const ACCESS_KEY: &str = "pos_access";
41.8
63
0.748325
909102c08e5f80270633464e2938a3de4c9b26aa
2,399
use std::error::Error; use std::process; use std::sync::Arc; use env_logger::Builder; use log::{debug, error, info, trace, LevelFilter}; use signal_hook::{iterator::Signals, SIGHUP}; use structopt::StructOpt; use tokio::net::TcpListener; use tokio::sync::watch; use bgpd_rs::cli; use bgpd_rs::config; use bgpd_rs::handler::Server; #[tokio::main] async fn main() -> Result<(), Box<dyn Error>> { let args = cli::Args::from_args(); let (bgpd_level, other_level) = match args.verbose { 0 => (LevelFilter::Info, LevelFilter::Warn), 1 => (LevelFilter::Debug, LevelFilter::Warn), 2 => (LevelFilter::Trace, LevelFilter::Warn), _ => (LevelFilter::Trace, LevelFilter::Trace), }; Builder::new() .filter(Some("bgpd"), bgpd_level) .filter(None, other_level) .init(); info!("Logging at levels {}/{}", bgpd_level, other_level); match args.cmd { cli::Command::Run(opts) => { let config = Arc::new(config::from_file(&opts.config_path)?); debug!("Found {} peers in {}", config.peers.len(), opts.config_path); trace!("Using config: {:#?}", &config); let (config_tx, config_rx) = watch::channel(config.clone()); config_tx.send(config.clone())?; let bgp_listener = TcpListener::bind(&config.bgp_socket).await?; let mut bgp_server = Server::new(config.clone(), bgp_listener, config_rx)?; // Setup JSON RPC Server let _api_handle = bgp_server .serve_rpc_api(args.api.unwrap_or(config.api_socket)) .await?; let signals = Signals::new(&[SIGHUP])?; std::thread::spawn(move || { for sig in signals.forever() { info!("Received {}, reloading config", sig); config::from_file(&opts.config_path) .map(|new_config| config_tx.send(Arc::new(new_config))) .map_err(|err| error!("Error reloading config: {}", err)) .ok(); } }); // Start BGP Daemon info!( "Starting BGPd [pid {}] on {}...", process::id(), config.bgp_socket ); bgp_server.run().await?; } _ => cli::query_bgpd(&args).await, } Ok(()) }
34.271429
87
0.538975
feb6122d8ffda81cde098e713bb4a1a13ecc24f2
3,785
use std::convert::TryFrom; use casper_types::{AccessRights, URef}; use crate::engine_server::{ mappings::{self, ParsingError}, state::{Key_URef, Key_URef_AccessRights}, }; impl From<AccessRights> for Key_URef_AccessRights { fn from(access_rights: AccessRights) -> Self { match access_rights { AccessRights::NONE => Key_URef_AccessRights::NONE, AccessRights::READ => Key_URef_AccessRights::READ, AccessRights::WRITE => Key_URef_AccessRights::WRITE, AccessRights::ADD => Key_URef_AccessRights::ADD, AccessRights::READ_ADD => Key_URef_AccessRights::READ_ADD, AccessRights::READ_WRITE => Key_URef_AccessRights::READ_WRITE, AccessRights::ADD_WRITE => Key_URef_AccessRights::ADD_WRITE, AccessRights::READ_ADD_WRITE => Key_URef_AccessRights::READ_ADD_WRITE, _ => Key_URef_AccessRights::NONE, } } } impl From<URef> for Key_URef { fn from(uref: URef) -> Self { let mut pb_uref = Key_URef::new(); pb_uref.set_uref(uref.addr().to_vec()); let access_rights = uref.access_rights(); pb_uref.set_access_rights(access_rights.into()); pb_uref } } impl TryFrom<Key_URef> for URef { type Error = ParsingError; fn try_from(pb_uref: Key_URef) -> Result<Self, Self::Error> { let addr = mappings::vec_to_array(pb_uref.uref, "Protobuf URef addr")?; let access_rights = match pb_uref.access_rights { Key_URef_AccessRights::NONE => AccessRights::NONE, Key_URef_AccessRights::READ => AccessRights::READ, Key_URef_AccessRights::WRITE => AccessRights::WRITE, Key_URef_AccessRights::ADD => AccessRights::ADD, Key_URef_AccessRights::READ_ADD => AccessRights::READ_ADD, Key_URef_AccessRights::READ_WRITE => AccessRights::READ_WRITE, Key_URef_AccessRights::ADD_WRITE => AccessRights::ADD_WRITE, Key_URef_AccessRights::READ_ADD_WRITE => AccessRights::READ_ADD_WRITE, }; let uref = URef::new(addr, access_rights); Ok(uref) } } #[cfg(test)] mod tests { use casper_types::UREF_ADDR_LENGTH; use super::*; use crate::engine_server::mappings::test_utils; #[test] fn round_trip() { for access_rights in &[ AccessRights::READ, AccessRights::WRITE, AccessRights::ADD, AccessRights::READ_ADD, AccessRights::READ_WRITE, AccessRights::ADD_WRITE, AccessRights::READ_ADD_WRITE, ] { let uref = URef::new(rand::random(), *access_rights); test_utils::protobuf_round_trip::<URef, Key_URef>(uref); } let uref = URef::new(rand::random(), AccessRights::READ).remove_access_rights(); test_utils::protobuf_round_trip::<URef, Key_URef>(uref); } #[test] fn should_fail_to_parse() { // Check we handle invalid Protobuf URefs correctly. let empty_pb_uref = Key_URef::new(); assert!(URef::try_from(empty_pb_uref).is_err()); let mut pb_uref_invalid_addr = Key_URef::new(); pb_uref_invalid_addr.set_uref(vec![1; UREF_ADDR_LENGTH - 1]); assert!(URef::try_from(pb_uref_invalid_addr).is_err()); // Check Protobuf URef with `AccessRights::UNKNOWN` parses to a URef with no access rights. let addr: [u8; UREF_ADDR_LENGTH] = rand::random(); let mut pb_uref = Key_URef::new(); pb_uref.set_uref(addr.to_vec()); pb_uref.set_access_rights(Key_URef_AccessRights::NONE); let parsed_uref = URef::try_from(pb_uref).unwrap(); assert_eq!(addr, parsed_uref.addr()); assert!(parsed_uref.access_rights().is_none()); } }
36.047619
99
0.64148
33027c3e6f3d1aa88811d6da498c93032f97ee73
6,667
mod reader; use std::path::PathBuf; #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] #[repr(u8)] pub enum Encoding { Utf16 = 0, Utf8 = 1, } #[derive(Clone, Debug)] pub struct Header { pub version: f32, pub encoding: Encoding, pub extended_uv: u8, pub vertex_index_size: u8, pub texture_index_size: u8, pub material_index_size: u8, pub bone_index_size: u8, pub morph_index_size: u8, pub rigid_index_size: u8, } #[derive(Clone, Debug)] pub struct ModelInfo { pub name: String, pub name_en: String, pub comment: String, pub comment_en: String, } #[derive(Clone, Debug)] pub struct Bdef1 { pub bone: Option<usize>, } #[derive(Clone, Debug)] pub struct Bdef2 { pub bones: [Option<usize>; 2], pub weight: f32, } #[derive(Clone, Debug)] pub struct Bdef4 { pub bones: [Option<usize>; 4], pub weights: [f32; 4], } #[derive(Clone, Debug)] pub struct Sdef { pub bones: [Option<usize>; 2], pub weight: f32, pub c: [f32; 3], pub r0: [f32; 3], pub r1: [f32; 3], } #[derive(Clone, Debug)] pub enum Weight { Bdef1(Bdef1), Bdef2(Bdef2), Bdef4(Bdef4), Sdef(Sdef), } #[derive(Clone, Debug)] pub struct Vertex { pub position: [f32; 3], pub normal: [f32; 3], pub uv: [f32; 2], pub extended_uv: Vec<[f32; 4]>, pub weight: Weight, pub edge_ratio: f32, } #[derive(Clone, Debug)] pub enum SphereMode { None, Mul, Add, SubTexture, } #[derive(Clone, Debug)] pub enum Toon { Texture(Option<usize>), Shared(u32), } #[derive(Clone, Debug)] pub struct Material { pub name: String, pub name_en: String, pub diffuse: [f32; 4], pub specular: [f32; 3], pub specular_power: f32, pub ambient: [f32; 3], pub both: bool, pub ground_shadow: bool, pub self_shadow_map: bool, pub self_shadow: bool, pub edge: bool, pub edge_color: [f32; 4], pub edge_size: f32, pub texture: Option<usize>, pub sphere: Option<usize>, pub sphere_mode: SphereMode, pub toon: Toon, pub memo: String, pub index_count: u32, } #[derive(Clone, Debug)] pub enum ConnectedTo { Offset([f32; 3]), Bone(Option<usize>), } #[derive(Clone, Debug)] pub struct AngleLimit { pub lower: [f32; 3], pub upper: [f32; 3], } #[derive(Clone, Debug)] pub struct IkLink { pub bone: Option<usize>, pub limits: Option<AngleLimit>, } #[derive(Clone, Debug)] pub struct Ik { pub bone: Option<usize>, pub loop_count: u32, pub angle: f32, pub links: Vec<IkLink>, } #[derive(Clone, Debug)] pub struct Addition { pub rotation: bool, pub translation: bool, pub local: bool, pub bone: Option<usize>, pub ratio: f32, } #[derive(Clone, Debug)] pub struct LocalPole { pub x: [f32; 3], pub z: [f32; 3], } #[derive(Clone, Debug)] pub struct Bone { pub name: String, pub name_en: String, pub position: [f32; 3], pub parent: Option<usize>, pub deform_hierarchy: i32, pub connected_to: ConnectedTo, pub rotatable: bool, pub translatable: bool, pub visibility: bool, pub operable: bool, pub ik: Option<Ik>, pub addition: Option<Addition>, pub after_physics: bool, pub fixed_pole: Option<[f32; 3]>, pub local_pole: Option<LocalPole>, pub external_parent: Option<usize>, } #[derive(Clone, Debug)] pub enum Panel { Reserved, Eyebrow, Eye, Mouth, Other, } pub mod morph { #[derive(Clone, Debug)] pub struct Vertex { pub vertex: Option<usize>, pub offset: [f32; 3], } #[derive(Clone, Debug)] pub struct Uv { pub vertex: Option<usize>, pub offset: [f32; 4], } #[derive(Clone, Debug)] pub struct Bone { pub bone: Option<usize>, pub offset: [f32; 3], pub rotation: [f32; 4], } #[derive(Clone, PartialEq, Eq, Debug)] pub enum MaterialOp { Mul, Add, } #[derive(Clone, Debug)] pub struct Material { pub material: Option<usize>, pub op: MaterialOp, pub diffuse: [f32; 4], pub specular: [f32; 3], pub specular_power: f32, pub ambient: [f32; 3], pub edge_color: [f32; 4], pub edge_size: f32, pub texture: [f32; 4], pub sphere: [f32; 4], pub toon: [f32; 4], } #[derive(Clone, Debug)] pub struct Group { pub morph: Option<usize>, pub ratio: f32, } #[derive(Clone, Debug)] pub enum Kind { Vertex(Vec<Vertex>), Uv(Vec<Uv>), Bone(Vec<Bone>), Maerial(Vec<Material>), Group(Vec<Group>), ExtendedUv(usize, Vec<Uv>), } } #[derive(Clone, Debug)] pub struct Morph { pub name: String, pub name_en: String, pub panel: Panel, pub kind: morph::Kind, } #[derive(Clone, Debug)] pub enum DisplayElement { Bone(Option<usize>), Morph(Option<usize>), } #[derive(Clone, Debug)] pub struct DisplayGroup { pub name: String, pub name_en: String, pub special: bool, pub elements: Vec<DisplayElement>, } pub mod rigid { #[derive(Clone, Debug)] pub enum Shape { Sphere, Box, Capsule, } #[derive(Clone, Debug)] pub enum Method { Static, Dynamic, DynamicWithBone, } } #[derive(Clone, Debug)] pub struct Rigid { pub name: String, pub name_en: String, pub bone: Option<usize>, pub group: u8, pub non_collision_groups: u16, pub shape: rigid::Shape, pub size: [f32; 3], pub position: [f32; 3], pub rotation: [f32; 3], pub mass: f32, pub dump_translation: f32, pub dump_rotation: f32, pub repulsive: f32, pub friction: f32, pub method: rigid::Method, } #[derive(Clone, Debug)] pub struct Joint { pub name: String, pub name_en: String, pub rigids: [Option<usize>; 2], pub position: [f32; 3], pub rotation: [f32; 3], pub limit_translation: AngleLimit, pub limit_rotation: AngleLimit, pub spring_translation: [f32; 3], pub spring_rotation: [f32; 3], } #[derive(Clone, Debug)] pub struct Pmx { pub header: Header, pub model_info: ModelInfo, pub vertices: Vec<Vertex>, pub faces: Vec<u32>, pub textures: Vec<PathBuf>, pub materials: Vec<Material>, pub bones: Vec<Bone>, pub morphs: Vec<Morph>, pub display_groups: Vec<DisplayGroup>, pub rigids: Vec<Rigid>, pub joints: Vec<Joint>, } #[inline] pub fn read<T: std::io::Read>(reader: T) -> Result<Pmx, reader::Error> { let mut reader = reader::Reader::new(reader); reader.read() }
20.081325
72
0.59592
48477ea25e0141282eb19fc395be3c54b2e156b5
32,654
use std::collections::{BTreeSet, HashMap}; use std::fs::{self, File}; use std::io::prelude::*; use std::io::SeekFrom; use std::path::{Path, PathBuf}; use std::rc::Rc; use std::sync::Arc; use crate::core::compiler::{BuildConfig, CompileMode, DefaultExecutor, Executor}; use crate::core::resolver::CliFeatures; use crate::core::{Feature, Shell, Verbosity, Workspace}; use crate::core::{Package, PackageId, PackageSet, Resolve, Source, SourceId}; use crate::sources::PathSource; use crate::util::errors::CargoResult; use crate::util::toml::TomlManifest; use crate::util::{self, restricted_names, Config, FileLock}; use crate::{drop_println, ops}; use anyhow::Context as _; use cargo_util::paths; use flate2::read::GzDecoder; use flate2::{Compression, GzBuilder}; use log::debug; use serde::Serialize; use tar::{Archive, Builder, EntryType, Header, HeaderMode}; pub struct PackageOpts<'cfg> { pub config: &'cfg Config, pub list: bool, pub check_metadata: bool, pub allow_dirty: bool, pub verify: bool, pub jobs: Option<u32>, pub to_package: ops::Packages, pub targets: Vec<String>, pub cli_features: CliFeatures, } const VCS_INFO_FILE: &str = ".cargo_vcs_info.json"; struct ArchiveFile { /// The relative path in the archive (not including the top-level package /// name directory). rel_path: PathBuf, /// String variant of `rel_path`, for convenience. rel_str: String, /// The contents to add to the archive. contents: FileContents, } enum FileContents { /// Absolute path to the file on disk to add to the archive. OnDisk(PathBuf), /// Generates a file. Generated(GeneratedFile), } enum GeneratedFile { /// Generates `Cargo.toml` by rewriting the original. Manifest, /// Generates `Cargo.lock` in some cases (like if there is a binary). Lockfile, /// Adds a `.cargo_vcs_info.json` file if in a (clean) git repo. VcsInfo(VcsInfo), } #[derive(Serialize)] struct VcsInfo { git: GitVcsInfo, /// Path to the package within repo (empty string if root). / not \ path_in_vcs: String, } #[derive(Serialize)] struct GitVcsInfo { sha1: String, } pub fn package_one( ws: &Workspace<'_>, pkg: &Package, opts: &PackageOpts<'_>, ) -> CargoResult<Option<FileLock>> { let config = ws.config(); let mut src = PathSource::new(pkg.root(), pkg.package_id().source_id(), config); src.update()?; if opts.check_metadata { check_metadata(pkg, config)?; } if !pkg.manifest().exclude().is_empty() && !pkg.manifest().include().is_empty() { config.shell().warn( "both package.include and package.exclude are specified; \ the exclude list will be ignored", )?; } let src_files = src.list_files(pkg)?; // Check (git) repository state, getting the current commit hash if not // dirty. let vcs_info = if !opts.allow_dirty { // This will error if a dirty repo is found. check_repo_state(pkg, &src_files, config)? } else { None }; let ar_files = build_ar_list(ws, pkg, src_files, vcs_info)?; if opts.list { for ar_file in ar_files { drop_println!(config, "{}", ar_file.rel_str); } return Ok(None); } // Check that the package dependencies are safe to deploy. for dep in pkg.dependencies() { super::check_dep_has_version(dep, false)?; } let filename = format!("{}-{}.crate", pkg.name(), pkg.version()); let dir = ws.target_dir().join("package"); let mut dst = { let tmp = format!(".{}", filename); dir.open_rw(&tmp, config, "package scratch space")? }; // Package up and test a temporary tarball and only move it to the final // location if it actually passes all our tests. Any previously existing // tarball can be assumed as corrupt or invalid, so we just blow it away if // it exists. config .shell() .status("Packaging", pkg.package_id().to_string())?; dst.file().set_len(0)?; tar(ws, pkg, ar_files, dst.file(), &filename) .with_context(|| "failed to prepare local package for uploading")?; if opts.verify { dst.seek(SeekFrom::Start(0))?; run_verify(ws, pkg, &dst, opts).with_context(|| "failed to verify package tarball")? } dst.seek(SeekFrom::Start(0))?; let src_path = dst.path(); let dst_path = dst.parent().join(&filename); fs::rename(&src_path, &dst_path) .with_context(|| "failed to move temporary tarball into final location")?; return Ok(Some(dst)); } pub fn package(ws: &Workspace<'_>, opts: &PackageOpts<'_>) -> CargoResult<Option<Vec<FileLock>>> { let pkgs = ws.members_with_features( &opts.to_package.to_package_id_specs(ws)?, &opts.cli_features, )?; let mut dsts = Vec::with_capacity(pkgs.len()); if ws.root().join("Cargo.lock").exists() { // Make sure the Cargo.lock is up-to-date and valid. let _ = ops::resolve_ws(ws)?; // If Cargo.lock does not exist, it will be generated by `build_lock` // below, and will be validated during the verification step. } for (pkg, cli_features) in pkgs { let result = package_one( ws, pkg, &PackageOpts { config: opts.config, list: opts.list, check_metadata: opts.check_metadata, allow_dirty: opts.allow_dirty, verify: opts.verify, jobs: opts.jobs, to_package: ops::Packages::Default, targets: opts.targets.clone(), cli_features: cli_features, }, )?; if !opts.list { dsts.push(result.unwrap()); } } if opts.list { // We're just listing, so there's no file output Ok(None) } else { Ok(Some(dsts)) } } /// Builds list of files to archive. fn build_ar_list( ws: &Workspace<'_>, pkg: &Package, src_files: Vec<PathBuf>, vcs_info: Option<VcsInfo>, ) -> CargoResult<Vec<ArchiveFile>> { let mut result = Vec::new(); let root = pkg.root(); for src_file in src_files { let rel_path = src_file.strip_prefix(&root)?.to_path_buf(); check_filename(&rel_path, &mut ws.config().shell())?; let rel_str = rel_path .to_str() .ok_or_else(|| { anyhow::format_err!("non-utf8 path in source directory: {}", rel_path.display()) })? .to_string(); match rel_str.as_ref() { "Cargo.toml" => { result.push(ArchiveFile { rel_path: PathBuf::from("Cargo.toml.orig"), rel_str: "Cargo.toml.orig".to_string(), contents: FileContents::OnDisk(src_file), }); result.push(ArchiveFile { rel_path, rel_str, contents: FileContents::Generated(GeneratedFile::Manifest), }); } "Cargo.lock" => continue, VCS_INFO_FILE => anyhow::bail!( "invalid inclusion of reserved file name \ {} in package source", VCS_INFO_FILE ), _ => { result.push(ArchiveFile { rel_path, rel_str, contents: FileContents::OnDisk(src_file), }); } } } if pkg.include_lockfile() { result.push(ArchiveFile { rel_path: PathBuf::from("Cargo.lock"), rel_str: "Cargo.lock".to_string(), contents: FileContents::Generated(GeneratedFile::Lockfile), }); } if let Some(vcs_info) = vcs_info { result.push(ArchiveFile { rel_path: PathBuf::from(VCS_INFO_FILE), rel_str: VCS_INFO_FILE.to_string(), contents: FileContents::Generated(GeneratedFile::VcsInfo(vcs_info)), }); } if let Some(license_file) = &pkg.manifest().metadata().license_file { let license_path = Path::new(license_file); let abs_license_path = paths::normalize_path(&pkg.root().join(license_path)); if abs_license_path.exists() { match abs_license_path.strip_prefix(&pkg.root()) { Ok(rel_license_path) => { if !result.iter().any(|ar| ar.rel_path == rel_license_path) { result.push(ArchiveFile { rel_path: rel_license_path.to_path_buf(), rel_str: rel_license_path .to_str() .expect("everything was utf8") .to_string(), contents: FileContents::OnDisk(abs_license_path), }); } } Err(_) => { // The license exists somewhere outside of the package. let license_name = license_path.file_name().unwrap(); if result .iter() .any(|ar| ar.rel_path.file_name().unwrap() == license_name) { ws.config().shell().warn(&format!( "license-file `{}` appears to be a path outside of the package, \ but there is already a file named `{}` in the root of the package. \ The archived crate will contain the copy in the root of the package. \ Update the license-file to point to the path relative \ to the root of the package to remove this warning.", license_file, license_name.to_str().unwrap() ))?; } else { result.push(ArchiveFile { rel_path: PathBuf::from(license_name), rel_str: license_name.to_str().unwrap().to_string(), contents: FileContents::OnDisk(abs_license_path), }); } } } } else { let rel_msg = if license_path.is_absolute() { "".to_string() } else { format!(" (relative to `{}`)", pkg.root().display()) }; ws.config().shell().warn(&format!( "license-file `{}` does not appear to exist{}.\n\ Please update the license-file setting in the manifest at `{}`\n\ This may become a hard error in the future.", license_path.display(), rel_msg, pkg.manifest_path().display() ))?; } } result.sort_unstable_by(|a, b| a.rel_path.cmp(&b.rel_path)); Ok(result) } /// Construct `Cargo.lock` for the package to be published. fn build_lock(ws: &Workspace<'_>, orig_pkg: &Package) -> CargoResult<String> { let config = ws.config(); let orig_resolve = ops::load_pkg_lockfile(ws)?; // Convert Package -> TomlManifest -> Manifest -> Package let toml_manifest = Rc::new( orig_pkg .manifest() .original() .prepare_for_publish(ws, orig_pkg.root())?, ); let package_root = orig_pkg.root(); let source_id = orig_pkg.package_id().source_id(); let (manifest, _nested_paths) = TomlManifest::to_real_manifest(&toml_manifest, source_id, package_root, config)?; let new_pkg = Package::new(manifest, orig_pkg.manifest_path()); // Regenerate Cargo.lock using the old one as a guide. let tmp_ws = Workspace::ephemeral(new_pkg, ws.config(), None, true)?; let (pkg_set, mut new_resolve) = ops::resolve_ws(&tmp_ws)?; if let Some(orig_resolve) = orig_resolve { compare_resolve(config, tmp_ws.current()?, &orig_resolve, &new_resolve)?; } check_yanked(config, &pkg_set, &new_resolve)?; ops::resolve_to_string(&tmp_ws, &mut new_resolve) } // Checks that the package has some piece of metadata that a human can // use to tell what the package is about. fn check_metadata(pkg: &Package, config: &Config) -> CargoResult<()> { let md = pkg.manifest().metadata(); let mut missing = vec![]; macro_rules! lacking { ($( $($field: ident)||* ),*) => {{ $( if $(md.$field.as_ref().map_or(true, |s| s.is_empty()))&&* { $(missing.push(stringify!($field).replace("_", "-"));)* } )* }} } lacking!( description, license || license_file, documentation || homepage || repository ); if !missing.is_empty() { let mut things = missing[..missing.len() - 1].join(", "); // `things` will be empty if and only if its length is 1 (i.e., the only case // to have no `or`). if !things.is_empty() { things.push_str(" or "); } things.push_str(missing.last().unwrap()); config.shell().warn(&format!( "manifest has no {things}.\n\ See https://doc.rust-lang.org/cargo/reference/manifest.html#package-metadata for more info.", things = things ))? } Ok(()) } /// Checks if the package source is in a *git* DVCS repository. If *git*, and /// the source is *dirty* (e.g., has uncommitted changes) then `bail!` with an /// informative message. Otherwise return the sha1 hash of the current *HEAD* /// commit, or `None` if no repo is found. fn check_repo_state( p: &Package, src_files: &[PathBuf], config: &Config, ) -> CargoResult<Option<VcsInfo>> { if let Ok(repo) = git2::Repository::discover(p.root()) { if let Some(workdir) = repo.workdir() { debug!("found a git repo at {:?}", workdir); let path = p.manifest_path(); let path = path.strip_prefix(workdir).unwrap_or(path); if let Ok(status) = repo.status_file(path) { if (status & git2::Status::IGNORED).is_empty() { debug!( "found (git) Cargo.toml at {:?} in workdir {:?}", path, workdir ); let path_in_vcs = path .parent() .and_then(|p| p.to_str()) .unwrap_or("") .replace("\\", "/"); return Ok(Some(VcsInfo { git: git(p, src_files, &repo)?, path_in_vcs, })); } } config.shell().verbose(|shell| { shell.warn(format!( "No (git) Cargo.toml found at `{}` in workdir `{}`", path.display(), workdir.display() )) })?; } } else { config.shell().verbose(|shell| { shell.warn(format!("No (git) VCS found for `{}`", p.root().display())) })?; } // No VCS with a checked in `Cargo.toml` found, so we don't know if the // directory is dirty or not, thus we have to assume that it's clean. return Ok(None); fn git(p: &Package, src_files: &[PathBuf], repo: &git2::Repository) -> CargoResult<GitVcsInfo> { // This is a collection of any dirty or untracked files. This covers: // - new/modified/deleted/renamed/type change (index or worktree) // - untracked files (which are "new" worktree files) // - ignored (in case the user has an `include` directive that // conflicts with .gitignore). let mut dirty_files = Vec::new(); collect_statuses(repo, &mut dirty_files)?; // Include each submodule so that the error message can provide // specifically *which* files in a submodule are modified. status_submodules(repo, &mut dirty_files)?; // Find the intersection of dirty in git, and the src_files that would // be packaged. This is a lazy n^2 check, but seems fine with // thousands of files. let dirty_src_files: Vec<String> = src_files .iter() .filter(|src_file| dirty_files.iter().any(|path| src_file.starts_with(path))) .map(|path| { path.strip_prefix(p.root()) .unwrap_or(path) .display() .to_string() }) .collect(); if dirty_src_files.is_empty() { let rev_obj = repo.revparse_single("HEAD")?; Ok(GitVcsInfo { sha1: rev_obj.id().to_string(), }) } else { anyhow::bail!( "{} files in the working directory contain changes that were \ not yet committed into git:\n\n{}\n\n\ to proceed despite this and include the uncommitted changes, pass the `--allow-dirty` flag", dirty_src_files.len(), dirty_src_files.join("\n") ) } } // Helper to collect dirty statuses for a single repo. fn collect_statuses( repo: &git2::Repository, dirty_files: &mut Vec<PathBuf>, ) -> CargoResult<()> { let mut status_opts = git2::StatusOptions::new(); // Exclude submodules, as they are being handled manually by recursing // into each one so that details about specific files can be // retrieved. status_opts .exclude_submodules(true) .include_ignored(true) .include_untracked(true); let repo_statuses = repo.statuses(Some(&mut status_opts)).with_context(|| { format!( "failed to retrieve git status from repo {}", repo.path().display() ) })?; let workdir = repo.workdir().unwrap(); let this_dirty = repo_statuses.iter().filter_map(|entry| { let path = entry.path().expect("valid utf-8 path"); if path.ends_with("Cargo.lock") && entry.status() == git2::Status::IGNORED { // It is OK to include Cargo.lock even if it is ignored. return None; } // Use an absolute path, so that comparing paths is easier // (particularly with submodules). Some(workdir.join(path)) }); dirty_files.extend(this_dirty); Ok(()) } // Helper to collect dirty statuses while recursing into submodules. fn status_submodules( repo: &git2::Repository, dirty_files: &mut Vec<PathBuf>, ) -> CargoResult<()> { for submodule in repo.submodules()? { // Ignore submodules that don't open, they are probably not initialized. // If its files are required, then the verification step should fail. if let Ok(sub_repo) = submodule.open() { status_submodules(&sub_repo, dirty_files)?; collect_statuses(&sub_repo, dirty_files)?; } } Ok(()) } } fn tar( ws: &Workspace<'_>, pkg: &Package, ar_files: Vec<ArchiveFile>, dst: &File, filename: &str, ) -> CargoResult<()> { // Prepare the encoder and its header. let filename = Path::new(filename); let encoder = GzBuilder::new() .filename(paths::path2bytes(filename)?) .write(dst, Compression::best()); // Put all package files into a compressed archive. let mut ar = Builder::new(encoder); let config = ws.config(); let base_name = format!("{}-{}", pkg.name(), pkg.version()); let base_path = Path::new(&base_name); for ar_file in ar_files { let ArchiveFile { rel_path, rel_str, contents, } = ar_file; let ar_path = base_path.join(&rel_path); config .shell() .verbose(|shell| shell.status("Archiving", &rel_str))?; let mut header = Header::new_gnu(); match contents { FileContents::OnDisk(disk_path) => { let mut file = File::open(&disk_path).with_context(|| { format!("failed to open for archiving: `{}`", disk_path.display()) })?; let metadata = file.metadata().with_context(|| { format!("could not learn metadata for: `{}`", disk_path.display()) })?; header.set_metadata_in_mode(&metadata, HeaderMode::Deterministic); header.set_cksum(); ar.append_data(&mut header, &ar_path, &mut file) .with_context(|| { format!("could not archive source file `{}`", disk_path.display()) })?; } FileContents::Generated(generated_kind) => { let contents = match generated_kind { GeneratedFile::Manifest => pkg.to_registry_toml(ws)?, GeneratedFile::Lockfile => build_lock(ws, pkg)?, GeneratedFile::VcsInfo(ref s) => serde_json::to_string_pretty(s)?, }; header.set_entry_type(EntryType::file()); header.set_mode(0o644); header.set_size(contents.len() as u64); // use something nonzero to avoid rust-lang/cargo#9512 header.set_mtime(1); header.set_cksum(); ar.append_data(&mut header, &ar_path, contents.as_bytes()) .with_context(|| format!("could not archive source file `{}`", rel_str))?; } } } let encoder = ar.into_inner()?; encoder.finish()?; Ok(()) } /// Generate warnings when packaging Cargo.lock, and the resolve have changed. fn compare_resolve( config: &Config, current_pkg: &Package, orig_resolve: &Resolve, new_resolve: &Resolve, ) -> CargoResult<()> { if config.shell().verbosity() != Verbosity::Verbose { return Ok(()); } let new_set: BTreeSet<PackageId> = new_resolve.iter().collect(); let orig_set: BTreeSet<PackageId> = orig_resolve.iter().collect(); let added = new_set.difference(&orig_set); // Removed entries are ignored, this is used to quickly find hints for why // an entry changed. let removed: Vec<&PackageId> = orig_set.difference(&new_set).collect(); for pkg_id in added { if pkg_id.name() == current_pkg.name() && pkg_id.version() == current_pkg.version() { // Skip the package that is being created, since its SourceId // (directory) changes. continue; } // Check for candidates where the source has changed (such as [patch] // or a dependency with multiple sources like path/version). let removed_candidates: Vec<&PackageId> = removed .iter() .filter(|orig_pkg_id| { orig_pkg_id.name() == pkg_id.name() && orig_pkg_id.version() == pkg_id.version() }) .cloned() .collect(); let extra = match removed_candidates.len() { 0 => { // This can happen if the original was out of date. let previous_versions: Vec<&PackageId> = removed .iter() .filter(|orig_pkg_id| orig_pkg_id.name() == pkg_id.name()) .cloned() .collect(); match previous_versions.len() { 0 => String::new(), 1 => format!( ", previous version was `{}`", previous_versions[0].version() ), _ => format!( ", previous versions were: {}", previous_versions .iter() .map(|pkg_id| format!("`{}`", pkg_id.version())) .collect::<Vec<_>>() .join(", ") ), } } 1 => { // This can happen for multi-sourced dependencies like // `{path="...", version="..."}` or `[patch]` replacement. // `[replace]` is not captured in Cargo.lock. format!( ", was originally sourced from `{}`", removed_candidates[0].source_id() ) } _ => { // I don't know if there is a way to actually trigger this, // but handle it just in case. let comma_list = removed_candidates .iter() .map(|pkg_id| format!("`{}`", pkg_id.source_id())) .collect::<Vec<_>>() .join(", "); format!( ", was originally sourced from one of these sources: {}", comma_list ) } }; let msg = format!( "package `{}` added to the packaged Cargo.lock file{}", pkg_id, extra ); config.shell().note(msg)?; } Ok(()) } fn check_yanked(config: &Config, pkg_set: &PackageSet<'_>, resolve: &Resolve) -> CargoResult<()> { // Checking the yanked status involves taking a look at the registry and // maybe updating files, so be sure to lock it here. let _lock = config.acquire_package_cache_lock()?; let mut sources = pkg_set.sources_mut(); for pkg_id in resolve.iter() { if let Some(source) = sources.get_mut(pkg_id.source_id()) { if source.is_yanked(pkg_id)? { config.shell().warn(format!( "package `{}` in Cargo.lock is yanked in registry `{}`, \ consider updating to a version that is not yanked", pkg_id, pkg_id.source_id().display_registry_name() ))?; } } } Ok(()) } fn run_verify( ws: &Workspace<'_>, pkg: &Package, tar: &FileLock, opts: &PackageOpts<'_>, ) -> CargoResult<()> { let config = ws.config(); config.shell().status("Verifying", pkg)?; let f = GzDecoder::new(tar.file()); let dst = tar .parent() .join(&format!("{}-{}", pkg.name(), pkg.version())); if dst.exists() { paths::remove_dir_all(&dst)?; } let mut archive = Archive::new(f); // We don't need to set the Modified Time, as it's not relevant to verification // and it errors on filesystems that don't support setting a modified timestamp archive.set_preserve_mtime(false); archive.unpack(dst.parent().unwrap())?; // Manufacture an ephemeral workspace to ensure that even if the top-level // package has a workspace we can still build our new crate. let id = SourceId::for_path(&dst)?; let mut src = PathSource::new(&dst, id, ws.config()); let new_pkg = src.root_package()?; let pkg_fingerprint = hash_all(&dst)?; let ws = Workspace::ephemeral(new_pkg, config, None, true)?; let rustc_args = if pkg .manifest() .unstable_features() .require(Feature::public_dependency()) .is_ok() { // FIXME: Turn this on at some point in the future //Some(vec!["-D exported_private_dependencies".to_string()]) Some(vec![]) } else { None }; let exec: Arc<dyn Executor> = Arc::new(DefaultExecutor); ops::compile_with_exec( &ws, &ops::CompileOptions { build_config: BuildConfig::new(config, opts.jobs, &opts.targets, CompileMode::Build)?, cli_features: opts.cli_features.clone(), spec: ops::Packages::Packages(Vec::new()), filter: ops::CompileFilter::Default { required_features_filterable: true, }, target_rustdoc_args: None, target_rustc_args: rustc_args, local_rustdoc_args: None, rustdoc_document_private_items: false, honor_rust_version: true, }, &exec, )?; // Check that `build.rs` didn't modify any files in the `src` directory. let ws_fingerprint = hash_all(&dst)?; if pkg_fingerprint != ws_fingerprint { let changes = report_hash_difference(&pkg_fingerprint, &ws_fingerprint); anyhow::bail!( "Source directory was modified by build.rs during cargo publish. \ Build scripts should not modify anything outside of OUT_DIR.\n\ {}\n\n\ To proceed despite this, pass the `--no-verify` flag.", changes ) } Ok(()) } fn hash_all(path: &Path) -> CargoResult<HashMap<PathBuf, u64>> { fn wrap(path: &Path) -> CargoResult<HashMap<PathBuf, u64>> { let mut result = HashMap::new(); let walker = walkdir::WalkDir::new(path).into_iter(); for entry in walker.filter_entry(|e| !(e.depth() == 1 && e.file_name() == "target")) { let entry = entry?; let file_type = entry.file_type(); if file_type.is_file() { let file = File::open(entry.path())?; let hash = util::hex::hash_u64_file(&file)?; result.insert(entry.path().to_path_buf(), hash); } else if file_type.is_symlink() { let hash = util::hex::hash_u64(&fs::read_link(entry.path())?); result.insert(entry.path().to_path_buf(), hash); } else if file_type.is_dir() { let hash = util::hex::hash_u64(&()); result.insert(entry.path().to_path_buf(), hash); } } Ok(result) } let result = wrap(path).with_context(|| format!("failed to verify output at {:?}", path))?; Ok(result) } fn report_hash_difference(orig: &HashMap<PathBuf, u64>, after: &HashMap<PathBuf, u64>) -> String { let mut changed = Vec::new(); let mut removed = Vec::new(); for (key, value) in orig { match after.get(key) { Some(after_value) => { if value != after_value { changed.push(key.to_string_lossy()); } } None => removed.push(key.to_string_lossy()), } } let mut added: Vec<_> = after .keys() .filter(|key| !orig.contains_key(*key)) .map(|key| key.to_string_lossy()) .collect(); let mut result = Vec::new(); if !changed.is_empty() { changed.sort_unstable(); result.push(format!("Changed: {}", changed.join("\n\t"))); } if !added.is_empty() { added.sort_unstable(); result.push(format!("Added: {}", added.join("\n\t"))); } if !removed.is_empty() { removed.sort_unstable(); result.push(format!("Removed: {}", removed.join("\n\t"))); } assert!(!result.is_empty(), "unexpected empty change detection"); result.join("\n") } // It can often be the case that files of a particular name on one platform // can't actually be created on another platform. For example files with colons // in the name are allowed on Unix but not on Windows. // // To help out in situations like this, issue about weird filenames when // packaging as a "heads up" that something may not work on other platforms. fn check_filename(file: &Path, shell: &mut Shell) -> CargoResult<()> { let name = match file.file_name() { Some(name) => name, None => return Ok(()), }; let name = match name.to_str() { Some(name) => name, None => anyhow::bail!( "path does not have a unicode filename which may not unpack \ on all platforms: {}", file.display() ), }; let bad_chars = ['/', '\\', '<', '>', ':', '"', '|', '?', '*']; if let Some(c) = bad_chars.iter().find(|c| name.contains(**c)) { anyhow::bail!( "cannot package a filename with a special character `{}`: {}", c, file.display() ) } if restricted_names::is_windows_reserved_path(file) { shell.warn(format!( "file {} is a reserved Windows filename, \ it will not work on Windows platforms", file.display() ))?; } Ok(()) }
36.897175
109
0.540791
9151071a8cf62f4815191ee4ff31fdfaa08907cd
7,933
// Copyright 2020 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { crate::model::walk_state::WalkStateUnit, ::routing::error::EventsRoutingError, cm_rust::DictionaryValue, maplit::hashmap, std::collections::HashMap, }; type OptionFilterMap = Option<HashMap<String, DictionaryValue>>; #[derive(Debug, Clone, Eq, PartialEq)] pub struct EventFilter { filter: Option<HashMap<String, DictionaryValue>>, is_debug: bool, } impl EventFilter { pub fn new(filter: Option<HashMap<String, DictionaryValue>>) -> Self { Self { filter, is_debug: false } } pub fn debug() -> Self { Self { filter: None, is_debug: true } } /// Verifies that for all fields given, they are present in the current filter. If no fields /// are given, returns true. pub fn has_fields(&self, fields: &OptionFilterMap) -> bool { if self.is_debug { return true; } Self::validate_subset(&fields, &self.filter).is_ok() } pub fn contains(&self, key: impl Into<String>, values: Vec<String>) -> bool { self.has_fields(&Some(hashmap! {key.into() => DictionaryValue::StrVec(values)})) } fn validate_subset( self_filter: &OptionFilterMap, next_filter: &OptionFilterMap, ) -> Result<(), EventsRoutingError> { match (self_filter, next_filter) { (None, None) => {} (None, Some(_)) => {} (Some(filter), Some(next_filter)) => { for (key, value) in filter { if !(next_filter.contains_key(key) && is_subset(value, next_filter.get(key).as_ref().unwrap())) { return Err(EventsRoutingError::InvalidFilter); } } } (Some(_), None) => { return Err(EventsRoutingError::InvalidFilter); } } Ok(()) } } impl WalkStateUnit for EventFilter { type Error = EventsRoutingError; /// Ensures the next walk state of filters is a superset of the current state. /// /// Consider A->B where A (next_state) is offering an event to B (self) and B is using it itself /// or offering it again. /// /// For all properties of B, those properties are in A and they are subsets of the property in /// B. fn validate_next(&self, next_state: &EventFilter) -> Result<(), Self::Error> { Self::validate_subset(&self.filter, &next_state.filter) } fn finalize_error() -> Self::Error { EventsRoutingError::MissingFilter } } fn is_subset(prev_value: &DictionaryValue, next_value: &DictionaryValue) -> bool { match (prev_value, next_value) { (DictionaryValue::Str(field), DictionaryValue::Str(next_field)) => field == next_field, (DictionaryValue::StrVec(fields), DictionaryValue::StrVec(next_fields)) => { fields.iter().all(|field| next_fields.contains(field)) } (DictionaryValue::Str(field), DictionaryValue::StrVec(next_fields)) => { next_fields.contains(field) } (DictionaryValue::StrVec(fields), DictionaryValue::Str(next_field)) => { if fields.is_empty() { return true; } if fields.len() > 1 { return false; } fields.contains(next_field) } // Self is a vector, next is a unit. Not subset. _ => false, } } #[cfg(test)] mod tests { use {super::*, maplit::hashmap, matches::assert_matches}; #[test] fn test_walk_state() { let none_filter = EventFilter::new(None); let empty_filter = EventFilter::new(Some(hashmap! {})); let single_field_filter = EventFilter::new(Some(hashmap! { "field".to_string() => DictionaryValue::Str("/foo".to_string()), })); let single_field_filter_2 = EventFilter::new(Some(hashmap! { "field".to_string() => DictionaryValue::Str("/bar".to_string()), })); let multi_field_filter = EventFilter::new(Some(hashmap! { "field".to_string() => DictionaryValue::StrVec(vec![ "/bar".to_string(), "/baz".to_string()]) })); let multi_field_filter_2 = EventFilter::new(Some(hashmap! { "field".to_string() => DictionaryValue::StrVec(vec![ "/bar".to_string(), "/baz".to_string(), "/foo".to_string()]) })); let multi_field_single = EventFilter::new(Some(hashmap! { "field".to_string() => DictionaryValue::StrVec(vec!["/foo".to_string()]) })); let multi_field_empty = EventFilter::new(Some(hashmap! { "field".to_string() => DictionaryValue::StrVec(vec![]) })); assert_matches!(none_filter.validate_next(&none_filter), Ok(())); assert_matches!( single_field_filter.validate_next(&none_filter), Err(EventsRoutingError::InvalidFilter) ); assert_matches!( single_field_filter.validate_next(&empty_filter), Err(EventsRoutingError::InvalidFilter) ); assert_matches!(single_field_filter.validate_next(&single_field_filter), Ok(())); assert_matches!( single_field_filter.validate_next(&single_field_filter_2), Err(EventsRoutingError::InvalidFilter) ); assert_matches!( single_field_filter.validate_next(&multi_field_filter), Err(EventsRoutingError::InvalidFilter) ); assert_matches!(single_field_filter.validate_next(&multi_field_filter_2), Ok(())); assert_matches!( multi_field_filter.validate_next(&none_filter), Err(EventsRoutingError::InvalidFilter) ); assert_matches!( multi_field_filter_2.validate_next(&multi_field_filter), Err(EventsRoutingError::InvalidFilter) ); assert_matches!( multi_field_filter.validate_next(&single_field_filter), Err(EventsRoutingError::InvalidFilter) ); assert_matches!( multi_field_filter.validate_next(&single_field_filter_2), Err(EventsRoutingError::InvalidFilter) ); assert_matches!(multi_field_filter.validate_next(&multi_field_filter), Ok(())); assert_matches!(multi_field_filter.validate_next(&multi_field_filter_2), Ok(())); assert_matches!( multi_field_filter.validate_next(&empty_filter), Err(EventsRoutingError::InvalidFilter) ); assert_matches!( empty_filter.validate_next(&none_filter), Err(EventsRoutingError::InvalidFilter) ); assert_matches!(empty_filter.validate_next(&empty_filter), Ok(())); assert_matches!(empty_filter.validate_next(&single_field_filter), Ok(())); assert_matches!(empty_filter.validate_next(&multi_field_filter), Ok(())); assert_matches!(multi_field_single.validate_next(&single_field_filter), Ok(())); assert_matches!(multi_field_empty.validate_next(&single_field_filter), Ok(())); } #[test] fn contains() { let filter = EventFilter::new(Some(hashmap! { "field".to_string() => DictionaryValue::StrVec(vec!["/foo".to_string(), "/bar".to_string()]), })); assert!(filter.contains("field", vec!["/foo".to_string()])); assert!(filter.contains("field", vec!["/foo".to_string(), "/bar".to_string()])); assert!(!filter.contains("field2", vec!["/foo".to_string()])); assert!(!filter .contains("field2", vec!["/foo".to_string(), "/bar".to_string(), "/baz".to_string()])); assert!(!filter.contains("field2", vec!["/baz".to_string()])); } }
38.697561
105
0.60179
11e064e0aa2975a0f7c2b40177364290369f0c58
295
fn seven(mut n: i64) -> (i64, i32) { let mut step = 0; let mut s: String; while n >= 100 { s = n.to_string(); n = &s[0..s.len() - 1].parse().unwrap() - (2 * &s[s.len() - 1..s.len()].parse().unwrap()) as i64; step = step + 1; } (n, step) }
24.583333
69
0.420339
c14869b6c502091418d608d6dcd3d8454b5650c1
704
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 // Defines Forge Tests pub mod event_fetcher; pub mod fullnode; pub mod replay_tooling; pub mod scripts_and_modules; pub mod transaction; pub mod verifying_client; // Converted to local Forge backend #[cfg(test)] mod client; #[cfg(test)] mod consensus; #[cfg(test)] mod genesis; #[cfg(test)] mod key_manager; #[cfg(test)] mod operational_tooling; #[cfg(test)] mod release_flow; #[cfg(test)] mod state_sync; #[cfg(test)] mod storage; // Left to convert #[cfg(test)] mod full_nodes; #[cfg(test)] mod network; #[cfg(test)] mod smoke_test_environment; #[cfg(test)] mod test_utils; #[cfg(test)] mod workspace_builder;
15.304348
43
0.730114
1a16fabf0d579a21d3de715f228a01f87ee64b4c
7,330
use std::{ fmt::Display, path::{Path, PathBuf}, }; use thiserror::Error; use crate::{ source_target_configuration::SourceTargetConfiguration, target_tv_show::{SeasonEntry, TargetTVShow}, }; #[derive(Error, Debug)] pub enum TaskError { #[error("Source path `{0}` is not a directory or can't be found")] SourcePath(String), #[error("Error reading source path `{0}` : `{1}`")] SourcePathReading(String, String), #[error("Target path `{0}` is not a directory or can't be found")] TargetPath(String), #[error("Error reading target path `{0}` : `{1}`")] TargetPathReading(String, String), #[error("Error in IO")] IoError(#[from] std::io::Error), #[error("Something else happened!")] UnknownError, } pub const VALID_EXTENSIONS: [&str; 3] = ["mp4", "webm", "m4v"]; #[derive(Debug)] pub enum TaskAction { Copy(PathBuf, SeasonEntry), } impl Display for TaskAction { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { TaskAction::Copy(source, target) => { write!( f, "src: {}\ntrg: {}", source.to_str().unwrap_or("<wrong source?>"), target.full_path.to_str().unwrap_or("<wrong target?>") ) } } } } impl TaskAction { pub fn simplified_display(&self) -> String { match self { TaskAction::Copy(source, target) => { let source_file = source .as_path() .file_name() .map_or("<wrong source?>", |s| s.to_str().unwrap()); let target_file = target .full_path .as_path() .file_name() .map_or("<wrong target?>", |t| t.to_str().unwrap()); format!("{}\n{}", source_file, target_file) } } } } pub struct TaskTvShow { pub configuration: SourceTargetConfiguration, pub target_tv_show: TargetTVShow, pub source_files: Vec<String>, } impl TaskTvShow { pub fn new(configuration: SourceTargetConfiguration) -> Result<Self, TaskError> { let source_files = super::source_files::gather_source_files(configuration.source.clone())?; let target_tv_show = super::target_tv_show::construct_tv_show(configuration.target.clone())?; Ok(Self { configuration, target_tv_show, source_files, }) } } impl TaskTvShow { pub fn count_target_files(&self) -> usize { self.target_tv_show.total_len() } pub fn count_source_files(&self) -> usize { self.source_files.len() } pub fn gather_actions_to_run(&self) -> Result<Vec<TaskAction>, TaskError> { if self.source_files.is_empty() { return Ok(vec![]); } let mut output = vec![]; let (mut season, mut episode) = self.target_tv_show.first_available_entry( self.configuration.default_season, self.configuration.default_episode, ); for source_file in self .source_files .iter() .filter(|source_file| !self.target_tv_show.contains(source_file)) { let se = self .target_tv_show .construct_season_entry(source_file, season, episode); season = se.season_number; episode = se.episode_number + 1; output.push(TaskAction::Copy( Path::new(&self.configuration.source).join(source_file), se, )); } Ok(output) } /// Gathers actions and logs some reports mainly pub fn dry_run(&self) -> Result<(), TaskError> { log::info!( "Dry run: for: {} - {}", self.configuration.source, self.configuration.target ); let tasks = self.gather_actions_to_run()?; if tasks.is_empty() { log::info!("Dry run: No new actions needed",); return Ok(()); } log::info!( "Dry run: tasks found for source: {}", self.configuration.source ); for task in tasks { log::info!("Dry run:\n{}", task.simplified_display()); } Ok(()) } pub fn run(&self) -> Result<(), TaskError> { log::info!( "Executing: {} - {}", self.configuration.source, self.configuration.target ); let tasks = self.gather_actions_to_run()?; if tasks.is_empty() { log::info!("No new actions needed"); return Ok(()); } for task in tasks { log::info!("Executing {task}"); #[allow(irrefutable_let_patterns)] if let TaskAction::Copy(source, target) = task { if !target.target_dir.exists() { std::fs::create_dir(target.target_dir.clone())?; } let output = std::process::Command::new("/bin/cp") .arg(source.to_str().unwrap()) .arg(target.full_path.to_str().unwrap()) .output()?; log::info!("{:#?}", output); } else { // This is essentially scaffolding as of now log::warn!("Action {:#?} not supported", task); } } Ok(()) } } #[cfg(test)] mod tests { use super::TaskTvShow; use crate::source_target_configuration::SourceTargetConfiguration; #[test] fn dry_run() { let source = r#"[ { "source" : "./test_data/source_a", "target" : "./test_data/target_a", "default_season" : 30, "default_episode": 1 } ]"#; let mut all_configurations = serde_json::from_str::<Vec<SourceTargetConfiguration>>(source).unwrap(); let task = TaskTvShow::new(all_configurations.pop().unwrap()).unwrap(); let dry_actions = task.gather_actions_to_run(); assert!(dry_actions.is_ok()); assert_eq!(dry_actions.unwrap().len(), 2); } #[test] fn count_source_files() { let source = r#"[ { "source" : "./test_data/source_a", "target" : "./test_data/target_a", "default_season" : 30, "default_episode": 1 } ]"#; let mut all_configurations = serde_json::from_str::<Vec<SourceTargetConfiguration>>(source).unwrap(); let task = TaskTvShow::new(all_configurations.pop().unwrap()).unwrap(); assert_eq!(task.count_source_files(), 4); } #[test] fn count_target_files() { let source = r#"[ { "source" : "./test_data/source_a", "target" : "./test_data/target_a", "default_season" : 30, "default_episode": 1 } ]"#; let mut all_configurations = serde_json::from_str::<Vec<SourceTargetConfiguration>>(source).unwrap(); let task = TaskTvShow::new(all_configurations.pop().unwrap()).unwrap(); assert_eq!(task.count_target_files(), 2); } }
30.92827
99
0.525102
75079123b08e767893361381b7ca1169620c011a
2,100
// 2d matrix math taken from glmatrix: http://glmatrix.net/docs/mat2d.js.html //Broken... could theoretically work but different targets //expect a different order sometimes (e.g. 6 elem isn't as standard) // pub fn from_mat4(mat:&[f64;16]) -> [f64;6] { // let translate_x = mat[12]; // let translate_y = mat[13]; // let scale_x = mat[0]; // let scale_y = mat[5]; // let skew_x = mat[4].atan(); // let skew_y = mat[1].atan(); // } pub fn translate_mut(transform: &mut [f64; 6], x: f64, y: f64) { let a = transform; let a0 = a[0]; let a1 = a[1]; let a2 = a[2]; let a3 = a[3]; let a4 = a[4]; let a5 = a[5]; let v0 = x; let v1 = y; a[0] = a0; a[1] = a1; a[2] = a2; a[3] = a3; a[4] = a0 * v0 + a2 * v1 + a4; a[5] = a1 * v0 + a3 * v1 + a5; } pub fn scale_mut(transform: &mut [f64; 6], x: f64, y: f64) { let a = transform; let a0 = a[0]; let a1 = a[1]; let a2 = a[2]; let a3 = a[3]; let a4 = a[4]; let a5 = a[5]; let v0 = x; let v1 = y; a[0] = a0 * v0; a[1] = a1 * v0; a[2] = a2 * v1; a[3] = a3 * v1; a[4] = a4; a[5] = a5; } pub fn mul_mut(transform: &mut [f64; 6], other: &[f64; 6]) { let a = transform; let a0 = a[0]; let a1 = a[1]; let a2 = a[2]; let a3 = a[3]; let a4 = a[4]; let a5 = a[5]; let b = other; let b0 = b[0]; let b1 = b[1]; let b2 = b[2]; let b3 = b[3]; let b4 = b[4]; let b5 = b[5]; a[0] = a0 * b0 + a2 * b1; a[1] = a1 * b0 + a3 * b1; a[2] = a0 * b2 + a2 * b3; a[3] = a1 * b2 + a3 * b3; a[4] = a0 * b4 + a2 * b5 + a4; a[5] = a1 * b4 + a3 * b5 + a5; } /* export function multiply(out, a, b) { let a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4], a5 = a[5]; let b0 = b[0], b1 = b[1], b2 = b[2], b3 = b[3], b4 = b[4], b5 = b[5]; out[0] = a0 * b0 + a2 * b1; out[1] = a1 * b0 + a3 * b1; out[2] = a0 * b2 + a2 * b3; out[3] = a1 * b2 + a3 * b3; out[4] = a0 * b4 + a2 * b5 + a4; out[5] = a1 * b4 + a3 * b5 + a5; return out; } */
22.105263
77
0.448571
9bc933c218e6f11f746042b32997392a3eb3e674
25,271
use super::*; #[test] fn select_1() { assert_eq!( Query::select() .columns(vec![Char::Character, Char::SizeW, Char::SizeH]) .from(Char::Table) .limit(10) .offset(100) .to_string(MysqlQueryBuilder), "SELECT `character`, `size_w`, `size_h` FROM `character` LIMIT 10 OFFSET 100" ); } #[test] fn select_2() { assert_eq!( Query::select() .columns(vec![Char::Character, Char::SizeW, Char::SizeH]) .from(Char::Table) .and_where(Expr::col(Char::SizeW).eq(3)) .to_string(MysqlQueryBuilder), "SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `size_w` = 3" ); } #[test] fn select_3() { assert_eq!( Query::select() .columns(vec![ Char::Character, Char::SizeW, Char::SizeH ]) .from(Char::Table) .and_where(Expr::col(Char::SizeW).eq(3)) .and_where(Expr::col(Char::SizeH).eq(4)) .to_string(MysqlQueryBuilder), "SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `size_w` = 3 AND `size_h` = 4" ); } #[test] fn select_4() { assert_eq!( Query::select() .columns(vec![Glyph::Image]) .from_subquery( Query::select() .columns(vec![Glyph::Image, Glyph::Aspect]) .from(Glyph::Table) .take(), Alias::new("subglyph") ) .to_string(MysqlQueryBuilder), "SELECT `image` FROM (SELECT `image`, `aspect` FROM `glyph`) AS `subglyph`" ); } #[test] fn select_5() { assert_eq!( Query::select() .column((Glyph::Table, Glyph::Image)) .from(Glyph::Table) .and_where(Expr::tbl(Glyph::Table, Glyph::Aspect).is_in(vec![3, 4])) .to_string(MysqlQueryBuilder), "SELECT `glyph`.`image` FROM `glyph` WHERE `glyph`.`aspect` IN (3, 4)" ); } #[test] fn select_6() { assert_eq!( Query::select() .columns(vec![Glyph::Aspect,]) .exprs(vec![Expr::col(Glyph::Image).max(),]) .from(Glyph::Table) .group_by_columns(vec![Glyph::Aspect,]) .and_having(Expr::col(Glyph::Aspect).gt(2)) .to_string(MysqlQueryBuilder), "SELECT `aspect`, MAX(`image`) FROM `glyph` GROUP BY `aspect` HAVING `aspect` > 2" ); } #[test] fn select_7() { assert_eq!( Query::select() .columns(vec![Glyph::Aspect,]) .from(Glyph::Table) .and_where(Expr::expr(Expr::col(Glyph::Aspect).if_null(0)).gt(2)) .to_string(MysqlQueryBuilder), "SELECT `aspect` FROM `glyph` WHERE IFNULL(`aspect`, 0) > 2" ); } #[test] fn select_8() { assert_eq!( Query::select() .columns(vec![ Char::Character, ]) .from(Char::Table) .left_join(Font::Table, Expr::tbl(Char::Table, Char::FontId).equals(Font::Table, Font::Id)) .to_string(MysqlQueryBuilder), "SELECT `character` FROM `character` LEFT JOIN `font` ON `character`.`font_id` = `font`.`id`" ); } #[test] fn select_9() { assert_eq!( Query::select() .columns(vec![ Char::Character, ]) .from(Char::Table) .left_join(Font::Table, Expr::tbl(Char::Table, Char::FontId).equals(Font::Table, Font::Id)) .inner_join(Glyph::Table, Expr::tbl(Char::Table, Char::Character).equals(Glyph::Table, Glyph::Image)) .to_string(MysqlQueryBuilder), "SELECT `character` FROM `character` LEFT JOIN `font` ON `character`.`font_id` = `font`.`id` INNER JOIN `glyph` ON `character`.`character` = `glyph`.`image`" ); } #[test] fn select_10() { assert_eq!( Query::select() .columns(vec![ Char::Character, ]) .from(Char::Table) .left_join(Font::Table, Expr::tbl(Char::Table, Char::FontId).equals(Font::Table, Font::Id) .and(Expr::tbl(Char::Table, Char::FontId).equals(Font::Table, Font::Id)) ) .to_string(MysqlQueryBuilder), "SELECT `character` FROM `character` LEFT JOIN `font` ON (`character`.`font_id` = `font`.`id`) AND (`character`.`font_id` = `font`.`id`)" ); } #[test] fn select_11() { assert_eq!( Query::select() .columns(vec![ Glyph::Aspect, ]) .from(Glyph::Table) .and_where(Expr::expr(Expr::col(Glyph::Aspect).if_null(0)).gt(2)) .order_by(Glyph::Image, Order::Desc) .order_by((Glyph::Table, Glyph::Aspect), Order::Asc) .to_string(MysqlQueryBuilder), "SELECT `aspect` FROM `glyph` WHERE IFNULL(`aspect`, 0) > 2 ORDER BY `image` DESC, `glyph`.`aspect` ASC" ); } #[test] fn select_12() { assert_eq!( Query::select() .columns(vec![ Glyph::Aspect, ]) .from(Glyph::Table) .and_where(Expr::expr(Expr::col(Glyph::Aspect).if_null(0)).gt(2)) .order_by_columns(vec![ (Glyph::Id, Order::Asc), (Glyph::Aspect, Order::Desc), ]) .to_string(MysqlQueryBuilder), "SELECT `aspect` FROM `glyph` WHERE IFNULL(`aspect`, 0) > 2 ORDER BY `id` ASC, `aspect` DESC" ); } #[test] fn select_13() { assert_eq!( Query::select() .columns(vec![ Glyph::Aspect, ]) .from(Glyph::Table) .and_where(Expr::expr(Expr::col(Glyph::Aspect).if_null(0)).gt(2)) .order_by_columns(vec![ ((Glyph::Table, Glyph::Id), Order::Asc), ((Glyph::Table, Glyph::Aspect), Order::Desc), ]) .to_string(MysqlQueryBuilder), "SELECT `aspect` FROM `glyph` WHERE IFNULL(`aspect`, 0) > 2 ORDER BY `glyph`.`id` ASC, `glyph`.`aspect` DESC" ); } #[test] fn select_14() { assert_eq!( Query::select() .columns(vec![ Glyph::Id, Glyph::Aspect, ]) .expr(Expr::col(Glyph::Image).max()) .from(Glyph::Table) .group_by_columns(vec![ (Glyph::Table, Glyph::Id), (Glyph::Table, Glyph::Aspect), ]) .and_having(Expr::col(Glyph::Aspect).gt(2)) .to_string(MysqlQueryBuilder), "SELECT `id`, `aspect`, MAX(`image`) FROM `glyph` GROUP BY `glyph`.`id`, `glyph`.`aspect` HAVING `aspect` > 2" ); } #[test] fn select_15() { assert_eq!( Query::select() .columns(vec![Char::Character]) .from(Char::Table) .and_where(Expr::col(Char::FontId).is_null()) .to_string(MysqlQueryBuilder), "SELECT `character` FROM `character` WHERE `font_id` IS NULL" ); } #[test] fn select_16() { assert_eq!( Query::select() .columns(vec![Char::Character]) .from(Char::Table) .and_where(Expr::col(Char::FontId).is_null()) .and_where(Expr::col(Char::Character).is_not_null()) .to_string(MysqlQueryBuilder), "SELECT `character` FROM `character` WHERE `font_id` IS NULL AND `character` IS NOT NULL" ); } #[test] fn select_17() { assert_eq!( Query::select() .columns(vec![(Glyph::Table, Glyph::Image),]) .from(Glyph::Table) .and_where(Expr::tbl(Glyph::Table, Glyph::Aspect).between(3, 5)) .to_string(MysqlQueryBuilder), "SELECT `glyph`.`image` FROM `glyph` WHERE `glyph`.`aspect` BETWEEN 3 AND 5" ); } #[test] fn select_18() { assert_eq!( Query::select() .columns(vec![ Glyph::Aspect, ]) .from(Glyph::Table) .and_where(Expr::col(Glyph::Aspect).between(3, 5)) .and_where(Expr::col(Glyph::Aspect).not_between(8, 10)) .to_string(MysqlQueryBuilder), "SELECT `aspect` FROM `glyph` WHERE (`aspect` BETWEEN 3 AND 5) AND (`aspect` NOT BETWEEN 8 AND 10)" ); } #[test] fn select_19() { assert_eq!( Query::select() .columns(vec![Char::Character]) .from(Char::Table) .and_where(Expr::col(Char::Character).eq("A")) .to_string(MysqlQueryBuilder), "SELECT `character` FROM `character` WHERE `character` = 'A'" ); } #[test] fn select_20() { assert_eq!( Query::select() .column(Char::Character) .from(Char::Table) .and_where(Expr::col(Char::Character).like("A")) .to_string(MysqlQueryBuilder), "SELECT `character` FROM `character` WHERE `character` LIKE 'A'" ); } #[test] fn select_21() { assert_eq!( Query::select() .columns(vec![ Char::Character ]) .from(Char::Table) .or_where(Expr::col(Char::Character).like("A%")) .or_where(Expr::col(Char::Character).like("%B")) .or_where(Expr::col(Char::Character).like("%C%")) .to_string(MysqlQueryBuilder), "SELECT `character` FROM `character` WHERE `character` LIKE 'A%' OR `character` LIKE '%B' OR `character` LIKE '%C%'" ); } #[test] fn select_22() { assert_eq!( Query::select() .column(Char::Character) .from(Char::Table) .cond_where( Cond::all() .add( Cond::any() .add(Expr::col(Char::Character).like("C")) .add(Expr::col(Char::Character).like("D").and(Expr::col(Char::Character).like("E"))) ) .add( Expr::col(Char::Character).like("F").or(Expr::col(Char::Character).like("G")) ) ) .to_string(MysqlQueryBuilder), "SELECT `character` FROM `character` WHERE (`character` LIKE 'C' OR ((`character` LIKE 'D') AND (`character` LIKE 'E'))) AND ((`character` LIKE 'F') OR (`character` LIKE 'G'))" ); } #[test] fn select_23() { assert_eq!( Query::select() .column(Char::Character) .from(Char::Table) .and_where_option(None) .to_string(MysqlQueryBuilder), "SELECT `character` FROM `character`" ); } #[test] fn select_24() { assert_eq!( Query::select() .column(Char::Character) .from(Char::Table) .conditions( true, |x| { x.and_where(Expr::col(Char::FontId).eq(5)); }, |_| () ) .to_string(MysqlQueryBuilder), "SELECT `character` FROM `character` WHERE `font_id` = 5" ); } #[test] fn select_25() { assert_eq!( Query::select() .column(Char::Character) .from(Char::Table) .and_where( Expr::col(Char::SizeW) .mul(2) .equals(Expr::col(Char::SizeH).div(2)) ) .to_string(MysqlQueryBuilder), "SELECT `character` FROM `character` WHERE `size_w` * 2 = `size_h` / 2" ); } #[test] fn select_26() { assert_eq!( Query::select() .column(Char::Character) .from(Char::Table) .and_where( Expr::expr(Expr::col(Char::SizeW).add(1)) .mul(2) .equals(Expr::expr(Expr::col(Char::SizeH).div(2)).sub(1)) ) .to_string(MysqlQueryBuilder), "SELECT `character` FROM `character` WHERE (`size_w` + 1) * 2 = (`size_h` / 2) - 1" ); } #[test] fn select_27() { assert_eq!( Query::select() .columns(vec![ Char::Character, Char::SizeW, Char::SizeH ]) .from(Char::Table) .and_where(Expr::col(Char::SizeW).eq(3)) .and_where(Expr::col(Char::SizeH).eq(4)) .and_where(Expr::col(Char::SizeH).eq(5)) .to_string(MysqlQueryBuilder), "SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `size_w` = 3 AND `size_h` = 4 AND `size_h` = 5" ); } #[test] fn select_28() { assert_eq!( Query::select() .columns(vec![ Char::Character, Char::SizeW, Char::SizeH ]) .from(Char::Table) .or_where(Expr::col(Char::SizeW).eq(3)) .or_where(Expr::col(Char::SizeH).eq(4)) .or_where(Expr::col(Char::SizeH).eq(5)) .to_string(MysqlQueryBuilder), "SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `size_w` = 3 OR `size_h` = 4 OR `size_h` = 5" ); } #[test] #[should_panic] fn select_29() { assert_eq!( Query::select() .columns(vec![ Char::Character, Char::SizeW, Char::SizeH ]) .from(Char::Table) .and_where(Expr::col(Char::SizeW).eq(3)) .or_where(Expr::col(Char::SizeH).eq(4)) .and_where(Expr::col(Char::SizeH).eq(5)) .to_string(MysqlQueryBuilder), "SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `size_w` = 3 OR `size_h` = 4 AND `size_h` = 5" ); } #[test] fn select_30() { assert_eq!( Query::select() .columns(vec![ Char::Character, Char::SizeW, Char::SizeH ]) .from(Char::Table) .and_where( Expr::col(Char::SizeW).mul(2) .add(Expr::col(Char::SizeH).div(3)) .equals(Expr::value(4)) ) .to_string(MysqlQueryBuilder), "SELECT `character`, `size_w`, `size_h` FROM `character` WHERE (`size_w` * 2) + (`size_h` / 3) = 4" ); } #[test] fn select_31() { assert_eq!( Query::select() .expr((1..10_i32).fold(Expr::value(0), |expr, i| { expr.add(Expr::value(i)) })) .to_string(MysqlQueryBuilder), "SELECT 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9" ); } #[test] fn select_32() { assert_eq!( Query::select() .expr_as(Expr::col(Char::Character), Alias::new("C")) .from(Char::Table) .to_string(MysqlQueryBuilder), "SELECT `character` AS `C` FROM `character`" ); } #[test] fn select_33() { assert_eq!( Query::select() .column(Glyph::Image) .from(Glyph::Table) .and_where( Expr::col(Glyph::Aspect) .in_subquery(Query::select().expr(Expr::cust("3 + 2 * 2")).take()) ) .to_string(MysqlQueryBuilder), "SELECT `image` FROM `glyph` WHERE `aspect` IN (SELECT 3 + 2 * 2)" ); } #[test] fn select_34a() { assert_eq!( Query::select() .column(Glyph::Aspect) .expr(Expr::col(Glyph::Image).max()) .from(Glyph::Table) .group_by_columns(vec![Glyph::Aspect,]) .or_having( Expr::col(Glyph::Aspect) .gt(2) .or(Expr::col(Glyph::Aspect).lt(8)) ) .or_having( Expr::col(Glyph::Aspect) .gt(12) .and(Expr::col(Glyph::Aspect).lt(18)) ) .or_having(Expr::col(Glyph::Aspect).gt(32)) .to_string(MysqlQueryBuilder), vec![ "SELECT `aspect`, MAX(`image`) FROM `glyph` GROUP BY `aspect`", "HAVING ((`aspect` > 2) OR (`aspect` < 8))", "OR ((`aspect` > 12) AND (`aspect` < 18))", "OR `aspect` > 32", ] .join(" ") ); } #[test] #[should_panic] fn select_34b() { assert_eq!( Query::select() .column(Glyph::Aspect) .expr(Expr::col(Glyph::Image).max()) .from(Glyph::Table) .group_by_columns(vec![Glyph::Aspect,]) .or_having( Expr::col(Glyph::Aspect) .gt(2) .or(Expr::col(Glyph::Aspect).lt(8)) ) .and_having( Expr::col(Glyph::Aspect) .gt(22) .or(Expr::col(Glyph::Aspect).lt(28)) ) .to_string(MysqlQueryBuilder), vec![ "SELECT `aspect`, MAX(`image`) FROM `glyph` GROUP BY `aspect`", "HAVING ((`aspect` > 2) OR (`aspect` < 8))", "AND ((`aspect` > 22) OR (`aspect` < 28))", ] .join(" ") ); } #[test] fn select_35() { let (statement, values) = sea_query::Query::select() .column(Glyph::Id) .from(Glyph::Table) .and_where(Expr::col(Glyph::Aspect).is_null()) .build(sea_query::MysqlQueryBuilder); assert_eq!( statement, r#"SELECT `id` FROM `glyph` WHERE `aspect` IS NULL"# ); assert_eq!(values.0, vec![]); } #[test] fn select_36() { let (statement, values) = sea_query::Query::select() .column(Glyph::Id) .from(Glyph::Table) .cond_where(Cond::any().add(Expr::col(Glyph::Aspect).is_null())) .build(sea_query::MysqlQueryBuilder); assert_eq!( statement, r#"SELECT `id` FROM `glyph` WHERE `aspect` IS NULL"# ); assert_eq!(values.0, vec![]); } #[test] fn select_37() { let (statement, values) = sea_query::Query::select() .column(Glyph::Id) .from(Glyph::Table) .cond_where(Cond::any().add(Cond::all()).add(Cond::any())) .build(sea_query::MysqlQueryBuilder); assert_eq!(statement, r#"SELECT `id` FROM `glyph`"#); assert_eq!(values.0, vec![]); } #[test] fn select_38() { let (statement, values) = sea_query::Query::select() .column(Glyph::Id) .from(Glyph::Table) .cond_where( Cond::any() .add(Expr::col(Glyph::Aspect).is_null()) .add(Expr::col(Glyph::Aspect).is_not_null()), ) .build(sea_query::MysqlQueryBuilder); assert_eq!( statement, r#"SELECT `id` FROM `glyph` WHERE `aspect` IS NULL OR `aspect` IS NOT NULL"# ); assert_eq!(values.0, vec![]); } #[test] fn select_39() { let (statement, values) = sea_query::Query::select() .column(Glyph::Id) .from(Glyph::Table) .cond_where( Cond::all() .add(Expr::col(Glyph::Aspect).is_null()) .add(Expr::col(Glyph::Aspect).is_not_null()), ) .build(sea_query::MysqlQueryBuilder); assert_eq!( statement, r#"SELECT `id` FROM `glyph` WHERE `aspect` IS NULL AND `aspect` IS NOT NULL"# ); assert_eq!(values.0, vec![]); } #[test] fn select_40() { let statement = sea_query::Query::select() .column(Glyph::Id) .from(Glyph::Table) .cond_where(any![ Expr::col(Glyph::Aspect).is_null(), all![ Expr::col(Glyph::Aspect).is_not_null(), Expr::col(Glyph::Aspect).lt(8) ] ]) .to_string(sea_query::MysqlQueryBuilder); assert_eq!( statement, r#"SELECT `id` FROM `glyph` WHERE `aspect` IS NULL OR (`aspect` IS NOT NULL AND `aspect` < 8)"# ); } #[test] fn select_41() { assert_eq!( Query::select() .columns(vec![Glyph::Aspect]) .exprs(vec![Expr::col(Glyph::Image).max()]) .from(Glyph::Table) .group_by_columns(vec![Glyph::Aspect]) .cond_having(any![Expr::col(Glyph::Aspect).gt(2)]) .to_string(MysqlQueryBuilder), "SELECT `aspect`, MAX(`image`) FROM `glyph` GROUP BY `aspect` HAVING `aspect` > 2" ); } #[test] fn select_42() { let statement = sea_query::Query::select() .column(Glyph::Id) .from(Glyph::Table) .cond_where( Cond::all() .add_option(Some(Expr::col(Glyph::Aspect).lt(8))) .add(Expr::col(Glyph::Aspect).is_not_null()), ) .to_string(MysqlQueryBuilder); assert_eq!( statement, r#"SELECT `id` FROM `glyph` WHERE `aspect` < 8 AND `aspect` IS NOT NULL"# ); } #[test] fn select_43() { let statement = sea_query::Query::select() .column(Glyph::Id) .from(Glyph::Table) .cond_where(Cond::all().add_option::<SimpleExpr>(None)) .to_string(MysqlQueryBuilder); assert_eq!(statement, "SELECT `id` FROM `glyph`"); } #[test] #[allow(clippy::approx_constant)] #[cfg(feature = "with-json")] fn insert_1() { assert_eq!( Query::insert() .into_table(Glyph::Table) .json(json!({ "image": "24B0E11951B03B07F8300FD003983F03F0780060", "aspect": 2.1345, })) .to_string(MysqlQueryBuilder), "INSERT INTO `glyph` (`aspect`, `image`) VALUES (2.1345, '24B0E11951B03B07F8300FD003983F03F0780060')" ); } #[test] #[allow(clippy::approx_constant)] fn insert_2() { assert_eq!( Query::insert() .into_table(Glyph::Table) .columns(vec![ Glyph::Image, Glyph::Aspect, ]) .values_panic(vec![ "04108048005887010020060000204E0180400400".into(), 3.1415.into(), ]) .to_string(MysqlQueryBuilder), "INSERT INTO `glyph` (`image`, `aspect`) VALUES ('04108048005887010020060000204E0180400400', 3.1415)" ); } #[test] #[allow(clippy::approx_constant)] fn insert_3() { assert_eq!( Query::insert() .into_table(Glyph::Table) .columns(vec![ Glyph::Image, Glyph::Aspect, ]) .values_panic(vec![ "04108048005887010020060000204E0180400400".into(), 3.1415.into(), ]) .values_panic(vec![ Value::Null, 2.1345.into(), ]) .to_string(MysqlQueryBuilder), "INSERT INTO `glyph` (`image`, `aspect`) VALUES ('04108048005887010020060000204E0180400400', 3.1415), (NULL, 2.1345)" ); } #[test] #[cfg(feature = "with-chrono")] fn insert_4() { assert_eq!( Query::insert() .into_table(Glyph::Table) .columns(vec![Glyph::Image]) .values_panic(vec![chrono::NaiveDateTime::from_timestamp(0, 0).into()]) .to_string(MysqlQueryBuilder), "INSERT INTO `glyph` (`image`) VALUES ('1970-01-01 00:00:00')" ); } #[test] #[cfg(feature = "with-uuid")] fn insert_5() { assert_eq!( Query::insert() .into_table(Glyph::Table) .columns(vec![Glyph::Image]) .values_panic(vec![uuid::Uuid::nil().into()]) .to_string(MysqlQueryBuilder), "INSERT INTO `glyph` (`image`) VALUES ('00000000-0000-0000-0000-000000000000')" ); } #[test] fn update_1() { assert_eq!( Query::update() .table(Glyph::Table) .values(vec![ (Glyph::Aspect, 2.1345.into()), (Glyph::Image, "24B0E11951B03B07F8300FD003983F03F0780060".into()), ]) .and_where(Expr::col(Glyph::Id).eq(1)) .order_by(Glyph::Id, Order::Asc) .limit(1) .to_string(MysqlQueryBuilder), "UPDATE `glyph` SET `aspect` = 2.1345, `image` = '24B0E11951B03B07F8300FD003983F03F0780060' WHERE `id` = 1 ORDER BY `id` ASC LIMIT 1" ); } #[test] #[cfg(feature = "with-json")] fn update_2() { assert_eq!( Query::update() .table(Glyph::Table) .json(json!({ "aspect": 2.1345, "image": "24B0E11951B03B07F8300FD003983F03F0780060", })) .and_where(Expr::col(Glyph::Id).eq(1)) .order_by(Glyph::Id, Order::Asc) .limit(1) .to_string(MysqlQueryBuilder), "UPDATE `glyph` SET `aspect` = 2.1345, `image` = '24B0E11951B03B07F8300FD003983F03F0780060' WHERE `id` = 1 ORDER BY `id` ASC LIMIT 1" ); } #[test] fn update_3() { assert_eq!( Query::update() .table(Glyph::Table) .value_expr(Glyph::Aspect, Expr::cust("60 * 24 * 24")) .values(vec![ (Glyph::Image, "24B0E11951B03B07F8300FD003983F03F0780060".into()), ]) .and_where(Expr::col(Glyph::Id).eq(1)) .order_by(Glyph::Id, Order::Asc) .limit(1) .to_string(MysqlQueryBuilder), "UPDATE `glyph` SET `aspect` = 60 * 24 * 24, `image` = '24B0E11951B03B07F8300FD003983F03F0780060' WHERE `id` = 1 ORDER BY `id` ASC LIMIT 1" ); } #[test] fn delete_1() { assert_eq!( Query::delete() .from_table(Glyph::Table) .and_where(Expr::col(Glyph::Id).eq(1)) .order_by(Glyph::Id, Order::Asc) .limit(1) .to_string(MysqlQueryBuilder), "DELETE FROM `glyph` WHERE `id` = 1 ORDER BY `id` ASC LIMIT 1" ); }
29.906509
184
0.508686
f7e489a36a634eaecc7e76e3eea89e8ec3431637
3,067
use crate::error::invalid_data_error; use crate::sparql::ParseError; use crate::store::numeric_encoder::DecoderError; use std::convert::Infallible; use std::error; use std::fmt; use std::io; /// A SPARQL evaluation error. #[derive(Debug)] #[non_exhaustive] pub enum EvaluationError { /// An error in SPARQL query parsing Parsing(ParseError), /// An error returned during store IOs or during results write Io(io::Error), /// An error returned during the query evaluation itself Query(QueryError), /// A conflict during a transaction #[doc(hidden)] Conflict, } #[derive(Debug)] pub struct QueryError { inner: QueryErrorKind, } #[derive(Debug)] enum QueryErrorKind { Msg { msg: String }, Other(Box<dyn error::Error + Send + Sync + 'static>), } impl fmt::Display for EvaluationError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::Parsing(error) => error.fmt(f), Self::Io(error) => error.fmt(f), Self::Query(error) => error.fmt(f), Self::Conflict => write!(f, "Transaction conflict"), } } } impl fmt::Display for QueryError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match &self.inner { QueryErrorKind::Msg { msg } => write!(f, "{}", msg), QueryErrorKind::Other(error) => error.fmt(f), } } } impl error::Error for EvaluationError { fn source(&self) -> Option<&(dyn error::Error + 'static)> { match self { Self::Parsing(e) => Some(e), Self::Io(e) => Some(e), Self::Query(e) => Some(e), _ => None, } } } impl error::Error for QueryError { fn source(&self) -> Option<&(dyn error::Error + 'static)> { match &self.inner { QueryErrorKind::Msg { .. } => None, QueryErrorKind::Other(e) => Some(e.as_ref()), } } } impl EvaluationError { /// Wraps another error. pub(crate) fn wrap(error: impl error::Error + Send + Sync + 'static) -> Self { Self::Query(QueryError { inner: QueryErrorKind::Other(Box::new(error)), }) } /// Builds an error from a printable error message. pub(crate) fn msg(msg: impl Into<String>) -> Self { Self::Query(QueryError { inner: QueryErrorKind::Msg { msg: msg.into() }, }) } } impl From<Infallible> for EvaluationError { fn from(error: Infallible) -> Self { match error {} } } impl From<ParseError> for EvaluationError { fn from(error: ParseError) -> Self { Self::Parsing(error) } } impl From<io::Error> for EvaluationError { fn from(error: io::Error) -> Self { Self::Io(error) } } impl<E: Into<EvaluationError>> From<DecoderError<E>> for EvaluationError { fn from(error: DecoderError<E>) -> Self { match error { DecoderError::Store(error) => error.into(), DecoderError::Decoder { msg } => invalid_data_error(msg).into(), } } }
26.213675
82
0.579067
e521b6c408b7210c376a18e61186a1f071d14e9c
73,378
use crate::email; use crate::models; use crate::sanitizers; use crate::schema; use crate::sql_types::*; use diesel::prelude::*; use diesel::result::Error; use email::Email; use futures::future; use instrumented::{instrument, prometheus, register}; use rolodex_grpc::proto; use rolodex_grpc::tower_grpc::{Request, Response}; pub fn make_intcounter(name: &str, description: &str) -> prometheus::IntCounter { let counter = prometheus::IntCounter::new(name, description).unwrap(); register(Box::new(counter.clone())).unwrap(); counter } lazy_static! { static ref CLIENT_ADDED: prometheus::IntCounter = make_intcounter("client_added_total", "New client added"); static ref CLIENT_PHONE_VERIFIED: prometheus::IntCounter = make_intcounter( "client_phone_verified_total", "Client phone verified via SMS" ); static ref CLIENT_PHONE_VERIFY_BAD_CODE: prometheus::IntCounter = make_intcounter( "client_phone_verify_bad_code_total", "Client phone verification failed due to bad code" ); static ref CLIENT_UPDATE_FAILED: prometheus::IntCounterVec = { let counter_opts = prometheus::Opts::new("client_update_failed_total", "Failed to update a client"); let counter = prometheus::IntCounterVec::new(counter_opts, &["reason"]).unwrap(); register(Box::new(counter.clone())).unwrap(); counter }; static ref CLIENT_ADD_FAILED: prometheus::IntCounterVec = { let counter_opts = prometheus::Opts::new("client_add_failed_total", "Failed to add a client"); let counter = prometheus::IntCounterVec::new(counter_opts, &["reason"]).unwrap(); register(Box::new(counter.clone())).unwrap(); counter }; static ref CLIENT_AUTHED: prometheus::IntCounter = make_intcounter("client_authed_total", "Client authenticated successfully"); static ref CLIENT_UPDATED: prometheus::IntCounter = make_intcounter("client_updated_total", "Client account data updated"); static ref CLIENT_UPDATED_PASSWORD: prometheus::IntCounter = make_intcounter("client_updated_password_total", "Client password updated"); static ref CLIENT_UPDATED_EMAIL: prometheus::IntCounter = make_intcounter("client_updated_email_total", "Client email address updated"); static ref CLIENT_UPDATED_PHONE_NUMBER: prometheus::IntCounter = make_intcounter( "client_updated_phone_number_total", "Client phone number updated" ); } #[derive(Clone)] pub struct Rolodex { db_reader: diesel::r2d2::Pool<diesel::r2d2::ConnectionManager<diesel::pg::PgConnection>>, db_writer: diesel::r2d2::Pool<diesel::r2d2::ConnectionManager<diesel::pg::PgConnection>>, redis_reader: r2d2_redis_cluster::r2d2::Pool<r2d2_redis_cluster::RedisClusterConnectionManager>, redis_writer: r2d2_redis_cluster::r2d2::Pool<r2d2_redis_cluster::RedisClusterConnectionManager>, } #[derive(Debug, Fail)] enum RequestError { #[fail(display = "invalid phone number: {}", err)] InvalidPhoneNumber { err: String }, #[fail(display = "invalid email: {}", email)] InvalidEmail { email: String }, #[fail(display = "invalid password: {}", err)] InvalidPassword { err: String }, #[fail(display = "unique violation")] UniqueViolation, #[fail(display = "database error: {}", err)] DatabaseError { err: String }, #[fail(display = "email domain DNS failure: {}", err)] EmailDNSFailure { err: String }, #[fail(display = "invalid client_id: {}", err)] InvalidClientId { err: String }, #[fail(display = "resource could not be found")] NotFound, #[fail(display = "Bad arguments specified for request")] BadArguments, } #[derive(Debug, QueryableByName)] pub struct AmountByDateQueryResult { #[sql_type = "diesel::sql_types::BigInt"] pub count: i64, #[sql_type = "diesel::sql_types::Date"] pub ds: chrono::NaiveDate, } #[derive(Debug, QueryableByName)] pub struct AmountByClientQueryResult { #[sql_type = "diesel::sql_types::BigInt"] pub amount_cents: i64, #[sql_type = "diesel::sql_types::Uuid"] pub client_id: uuid::Uuid, } impl From<diesel::result::Error> for RequestError { fn from(err: diesel::result::Error) -> RequestError { match err { diesel::result::Error::NotFound => RequestError::NotFound, diesel::result::Error::DatabaseError( diesel::result::DatabaseErrorKind::UniqueViolation, _, ) => RequestError::UniqueViolation, _ => RequestError::DatabaseError { err: format!("{}", err), }, } } } impl From<r2d2_redis_cluster::r2d2::Error> for RequestError { fn from(err: r2d2_redis_cluster::r2d2::Error) -> RequestError { RequestError::DatabaseError { err: format!("{}", err), } } } impl From<failure::Error> for RequestError { fn from(err: failure::Error) -> RequestError { RequestError::InvalidPhoneNumber { err: format!("{}", err), } } } impl From<r2d2_redis_cluster::redis_cluster_rs::redis::RedisError> for RequestError { fn from(err: r2d2_redis_cluster::redis_cluster_rs::redis::RedisError) -> RequestError { RequestError::DatabaseError { err: format!("{}", err), } } } impl From<srp::types::SrpAuthError> for RequestError { fn from(err: srp::types::SrpAuthError) -> RequestError { RequestError::InvalidPassword { err: format!("{}", err), } } } impl From<email::EmailError> for RequestError { fn from(err: email::EmailError) -> RequestError { CLIENT_ADD_FAILED .with_label_values(&["invalid email"]) .inc(); match err { email::EmailError::BadFormat { email } => { CLIENT_ADD_FAILED .with_label_values(&["invalid email"]) .inc(); RequestError::InvalidEmail { email } } email::EmailError::BannedDomain { email } => { CLIENT_ADD_FAILED .with_label_values(&["banned email domain"]) .inc(); RequestError::InvalidEmail { email } } email::EmailError::InvalidSuffix { email } => { CLIENT_ADD_FAILED .with_label_values(&["invalid suffix"]) .inc(); RequestError::InvalidEmail { email } } email::EmailError::DatabaseError { err } => RequestError::DatabaseError { err }, email::EmailError::InvalidDomain { email } => RequestError::InvalidEmail { email }, email::EmailError::DNSFailure { err } => RequestError::EmailDNSFailure { err }, } } } impl From<uuid::parser::ParseError> for RequestError { fn from(err: uuid::parser::ParseError) -> RequestError { RequestError::InvalidClientId { err: format!("{}", err), } } } impl From<models::Client> for rolodex_grpc::proto::GetClientResponse { fn from(client: models::Client) -> rolodex_grpc::proto::GetClientResponse { rolodex_grpc::proto::GetClientResponse { client: Some(client.into()), } } } impl From<models::Client> for proto::NewClientResponse { fn from(client: models::Client) -> proto::NewClientResponse { proto::NewClientResponse { client_id: client.uuid.to_simple().to_string(), referred_by: match client.referred_by { Some(uuid) => uuid.to_simple().to_string(), None => "".into(), }, } } } impl From<models::Client> for proto::Client { fn from(client: models::Client) -> proto::Client { (&client).into() } } impl From<&models::Client> for proto::Client { fn from(client: &models::Client) -> proto::Client { proto::Client { box_public_key: client.box_public_key.clone(), client_id: client.uuid.to_simple().to_string(), full_name: client.full_name.clone(), handle: client .handle .as_ref() .cloned() .unwrap_or_else(|| String::from("")), profile: client .profile .as_ref() .cloned() .unwrap_or_else(|| String::from("")), signing_public_key: client.signing_public_key.clone(), joined: client.created_at.timestamp(), phone_sms_verified: client.phone_sms_verified, ral: client.ral, avatar_version: client.avatar_version, referred_by: match client.referred_by { Some(uuid) => uuid.to_simple().to_string(), None => "".into(), }, } } } impl From<models::ClientPrefs> for proto::Prefs { fn from(prefs: models::ClientPrefs) -> Self { Self { email_notifications: match prefs.email_notifications { EmailNotificationsPref::Never => "never".into(), EmailNotificationsPref::Ral => "ral".into(), EmailNotificationsPref::Always => "always".into(), }, include_in_leaderboard: prefs.include_in_leaderboard, } } } impl From<&proto::Prefs> for models::UpdateClientPrefs { fn from(prefs: &proto::Prefs) -> Self { Self { email_notifications: match prefs.email_notifications.as_ref() { "never" => EmailNotificationsPref::Never, "ral" => EmailNotificationsPref::Ral, "always" => EmailNotificationsPref::Always, _ => EmailNotificationsPref::Ral, }, include_in_leaderboard: prefs.include_in_leaderboard, } } } fn insert_client_action( client_id: i64, action: ClientAccountAction, location: &Option<proto::Location>, conn: &diesel::pg::PgConnection, ) -> Result<(), Error> { let (ip_address, region, region_subdivision, city) = if let Some(location) = location { ( Some(location.ip_address.clone()), Some(location.region.clone()), Some(location.region_subdivision.clone()), Some(location.city.clone()), ) } else { (None, None, None, None) }; let client_action = models::NewClientAccountAction { client_id, action, ip_address, region, region_subdivision, city, }; diesel::insert_into(schema::client_account_actions::table) .values(&client_action) .execute(conn)?; Ok(()) } fn validate_phone_number( phone_number: &Option<proto::PhoneNumber>, ) -> Result<String, RequestError> { if let Some(phone_number) = phone_number { let country = phone_number.country_code.parse().unwrap(); let number = phonenumber::parse(Some(country), &phone_number.national_number)?; let phonenumber_valid = number.is_valid(); if !phonenumber_valid { CLIENT_UPDATE_FAILED .with_label_values(&["invalid phone number"]) .inc(); return Err(RequestError::InvalidPhoneNumber { err: number.to_string(), }); } Ok(number .format() .mode(phonenumber::Mode::International) .to_string()) } else { CLIENT_UPDATE_FAILED .with_label_values(&["phone number omitted"]) .inc(); Err(RequestError::InvalidPhoneNumber { err: "no phone number specified".to_string(), }) } } fn generate_and_send_verification_code(client: &models::Client) -> i32 { use crate::messagebird::Client; use rand::Rng; let mut rng = rand::thread_rng(); let code = rng.gen_range(100_000, 1_000_000); let sms_client = Client::new(); let body = format!( "Umpyre verification code: {:03}-{:03}", code / 1000, code % 1000 ); let result = sms_client.send_sms(&client.phone_country_code, &client.phone_number, &body); if result.is_err() { error!("sms send error: {:?}", result.unwrap_err()); } code } impl Rolodex { pub fn new( db_reader: diesel::r2d2::Pool<diesel::r2d2::ConnectionManager<diesel::pg::PgConnection>>, db_writer: diesel::r2d2::Pool<diesel::r2d2::ConnectionManager<diesel::pg::PgConnection>>, redis_reader: r2d2_redis_cluster::r2d2::Pool< r2d2_redis_cluster::RedisClusterConnectionManager, >, redis_writer: r2d2_redis_cluster::r2d2::Pool< r2d2_redis_cluster::RedisClusterConnectionManager, >, ) -> Self { Rolodex { db_reader, db_writer, redis_reader, redis_writer, } } /// Returns the client_id for this client if auth succeeds #[instrument(INFO)] fn handle_auth_handshake( &self, request: &proto::AuthHandshakeRequest, ) -> Result<proto::AuthHandshakeResponse, RequestError> { use crate::config; use data_encoding::BASE64URL_NOPAD; use r2d2_redis_cluster::redis_cluster_rs::redis; use r2d2_redis_cluster::redis_cluster_rs::redis::RedisResult; use r2d2_redis_cluster::Commands; use rand::rngs::OsRng; use rand::RngCore; use sha2::Sha256; use srp::groups::G_2048; use srp::server::{SrpServer, UserRecord}; let email = request.email.clone(); // Retrieve client auth info let conn = self.db_reader.get().unwrap(); // Find client_id, if it exists match schema::unique_email_addresses::table .filter(schema::unique_email_addresses::dsl::email_as_entered.eq(&email)) .first::<models::UniqueEmailAddress>(&conn) { Ok(unique_email_address) => { let client_pk_id = unique_email_address.client_id; let client: models::ClientAuth = schema::clients::table .select(( schema::clients::dsl::id, schema::clients::dsl::uuid, schema::clients::dsl::password_verifier, schema::clients::dsl::password_salt, )) .filter(schema::clients::dsl::id.eq(client_pk_id)) .first(&conn)?; let mut b = vec![0u8; 64]; OsRng.fill_bytes(&mut b); let mut redis_conn = self.redis_writer.get()?; let auth_key = format!("auth:{}:{}", email, BASE64URL_NOPAD.encode(&request.a_pub)); redis_conn.set_ex(auth_key.clone(), BASE64URL_NOPAD.encode(&b), 300)?; let _result: (i32) = redis::cmd("WAIT") .arg(config::CONFIG.redis.replicas_per_master) .arg(0) .query(&mut (*redis_conn))?; // Verify the key was _actually_ written to n + 1 replicas before proceeding. let mut checks_passed = 0; let checks_required = config::CONFIG.redis.replicas_per_master + 1; while checks_passed < checks_required { let mut redis_conn = self.redis_reader.get()?; let response: RedisResult<String> = redis_conn.get(auth_key.clone()); match response { Ok(_) => checks_passed += 1, _ => checks_passed -= 1, }; checks_passed = std::cmp::max(0, checks_passed); } let user = UserRecord { username: email.as_bytes(), salt: &client.password_salt, verifier: &client.password_verifier, }; let server = SrpServer::<Sha256>::new(&user, &request.a_pub, &b, &G_2048)?; let b_pub = server.get_b_pub(); Ok(proto::AuthHandshakeResponse { email, salt: client.password_salt, b_pub, }) } Err(_err) => { let mut b_pub = vec![0u8; 256]; OsRng.fill_bytes(&mut b_pub); let mut salt = vec![0u8; 16]; OsRng.fill_bytes(&mut salt); Ok(proto::AuthHandshakeResponse { email, salt, b_pub }) } } } /// Returns the client_id for this client if auth succeeds #[instrument(INFO)] fn handle_auth_verify( &self, request: &proto::AuthVerifyRequest, ) -> Result<proto::AuthVerifyResponse, RequestError> { use data_encoding::BASE64URL_NOPAD; use r2d2_redis_cluster::redis_cluster_rs::redis::RedisResult; use r2d2_redis_cluster::Commands; use rolodex_grpc::proto::AuthVerifyResponse; use sha2::Sha256; use srp::groups::G_2048; use srp::server::{SrpServer, UserRecord}; let email = request.email.clone(); let mut redis_conn = self.redis_reader.get()?; let key = format!("auth:{}:{}", email, BASE64URL_NOPAD.encode(&request.a_pub)); let response: RedisResult<String> = redis_conn.get(key.clone()); let b = match response { Ok(response) => BASE64URL_NOPAD.decode(response.as_bytes()).unwrap(), _ => { return Err(RequestError::InvalidPassword { err: "could not retrieve key".into(), }) } }; // Retrieve client auth info let conn = self.db_reader.get().unwrap(); // Find client_id, if it exists let unique_email_address: models::UniqueEmailAddress = match schema::unique_email_addresses::table .filter(schema::unique_email_addresses::dsl::email_as_entered.eq(&email)) .first(&conn) { Ok(result) => result, _ => return Err(RequestError::BadArguments), }; let client_pk_id = unique_email_address.client_id; let client: models::ClientAuth = schema::clients::table .select(( schema::clients::dsl::id, schema::clients::dsl::uuid, schema::clients::dsl::password_verifier, schema::clients::dsl::password_salt, )) .filter(schema::clients::dsl::id.eq(client_pk_id)) .first(&conn)?; let user = UserRecord { username: email.as_bytes(), salt: &client.password_salt, verifier: &client.password_verifier, }; let server = SrpServer::<Sha256>::new(&user, &request.a_pub, &b, &G_2048)?; let conn = self.db_writer.get().unwrap(); insert_client_action( client.id, ClientAccountAction::Authenticated, &request.location, &conn, )?; CLIENT_AUTHED.inc(); Ok(AuthVerifyResponse { client_id: client.uuid.to_simple().to_string(), server_proof: server.verify(&request.client_proof)?.to_vec(), session_key: server.get_key().to_vec(), }) } /// Returns the client_id for this client if account creation succeeded #[instrument(INFO)] fn handle_add_client( &self, request: &proto::NewClientRequest, ) -> Result<proto::NewClientResponse, RequestError> { use std::{thread, time}; let phone_number = validate_phone_number(&request.phone_number)?; let email: Email = request.email.to_lowercase().parse()?; let mut redis_conn = self.redis_reader.get()?; email.check_validity(&mut *redis_conn)?; let email_as_entered = email.email_as_entered.clone(); let email_without_labels = email.email_without_labels.clone(); let referred_by = if !request.referred_by.is_empty() { let conn = self.db_reader.get().unwrap(); // check account exists match uuid::Uuid::parse_str(&request.referred_by) { Ok(ref_uuid) => { let ref_client: Result<models::Client, diesel::result::Error> = schema::clients::table .filter(schema::clients::dsl::uuid.eq(&ref_uuid)) .first(&conn); match ref_client { Ok(_) => Some(ref_uuid), Err(_) => None, } } Err(_) => None, } } else { None }; let new_client = models::NewClient { full_name: sanitizers::full_name(&request.full_name), password_verifier: request.password_verifier.clone(), password_salt: request.password_salt.clone(), phone_number, box_public_key: sanitizers::public_key(&request.box_public_key), signing_public_key: sanitizers::public_key(&request.signing_public_key), phone_country_code: request.phone_number.as_ref().unwrap().country_code.clone(), referred_by, }; let conn = self.db_writer.get().unwrap(); let client = conn.transaction::<_, Error, _>(|| { let client: models::Client = diesel::insert_into(schema::clients::table) .values(&new_client) .get_result(&conn)?; let new_unique_email_address = models::NewUniqueEmailAddress { client_id: client.id, email_as_entered, email_without_labels, }; diesel::insert_into(schema::unique_email_addresses::table) .values(&new_unique_email_address) .execute(&conn)?; let code = generate_and_send_verification_code(&client); let new_phone_verification_code = models::NewPhoneVerificationCode { client_id: client.id, code, }; diesel::insert_into(schema::phone_verification_codes::table) .values(&new_phone_verification_code) .execute(&conn)?; insert_client_action( client.id, ClientAccountAction::Created, &request.location, &conn, )?; Ok(client) })?; // lastly, wait until new client is visible on read replica(s) before returning. let mut client_replicated = false; while !client_replicated { let conn = self.db_reader.get().unwrap(); let new_client: Result<models::Client, Error> = schema::clients::table .filter(schema::clients::uuid.eq(client.uuid)) .first(&conn); client_replicated = match new_client { Ok(_) => true, Err(_) => { let fifty_millis = time::Duration::from_millis(50); thread::sleep(fifty_millis); false } }; } CLIENT_ADDED.inc(); Ok(client.into()) } /// Returns the client_id for this client if account creation succeeded #[instrument(INFO)] fn handle_get_client( &self, request: &proto::GetClientRequest, ) -> Result<proto::GetClientResponse, RequestError> { match &request.id { Some(proto::get_client_request::Id::ClientId(client_id)) => { let request_uuid = uuid::Uuid::parse_str(&client_id)?; let conn = self.db_reader.get().unwrap(); let client: models::Client = schema::clients::table .filter(schema::clients::dsl::uuid.eq(&request_uuid)) .first(&conn)?; Ok(client.into()) } Some(proto::get_client_request::Id::Handle(handle)) => { let conn = self.db_reader.get().unwrap(); let handle_lowercase = sanitizers::handle(&handle).to_lowercase(); let client: models::Client = schema::clients::table .filter(schema::clients::dsl::handle_lowercase.eq(&handle_lowercase)) .first(&conn)?; Ok(client.into()) } _ => Err(RequestError::NotFound), } } /// Returns the client_id for this client if account creation succeeded #[instrument(INFO)] fn handle_get_client_email( &self, request: &proto::GetClientEmailRequest, ) -> Result<proto::GetClientEmailResponse, RequestError> { use models::UniqueEmailAddress; let request_uuid = uuid::Uuid::parse_str(&request.client_id)?; let conn = self.db_reader.get().unwrap(); let client: models::Client = schema::clients::table .filter(schema::clients::dsl::uuid.eq(&request_uuid)) .first(&conn)?; let email: UniqueEmailAddress = UniqueEmailAddress::belonging_to(&client).first(&conn)?; Ok(proto::GetClientEmailResponse { client_id: request.client_id.clone(), email_as_entered: email.email_as_entered, email_without_labels: email.email_without_labels, }) } // Updates the underlying client model #[instrument(INFO)] fn handle_update_client( &self, request: &proto::UpdateClientRequest, ) -> Result<proto::UpdateClientResponse, RequestError> { use crate::optional::Optional; let client = if let Some(client) = &request.client { client.clone() } else { return Err(RequestError::BadArguments); }; let request_uuid = uuid::Uuid::parse_str(&client.client_id)?; let updated_client = models::UpdateClient { full_name: sanitizers::full_name(&client.full_name), box_public_key: sanitizers::public_key(&client.box_public_key), signing_public_key: sanitizers::public_key(&client.signing_public_key), profile: sanitizers::profile(&client.profile).into_option(), handle: sanitizers::handle(&client.handle).into_option(), handle_lowercase: sanitizers::handle(&client.handle) .to_lowercase() .into_option(), }; let conn = self.db_writer.get().unwrap(); let updated_row = conn.transaction::<_, Error, _>(|| { let updated_row: models::Client = diesel::update( schema::clients::table.filter(schema::clients::uuid.eq(request_uuid)), ) .set(&updated_client) .get_result(&conn)?; insert_client_action( updated_row.id, ClientAccountAction::Updated, &request.location, &conn, )?; Ok(updated_row) })?; CLIENT_UPDATED.inc(); Ok(proto::UpdateClientResponse { result: proto::Result::Success as i32, client: Some(updated_row.into()), }) } // Updates the underlying client model #[instrument(INFO)] fn handle_update_client_ral( &self, request: &proto::UpdateClientRalRequest, ) -> Result<proto::UpdateClientResponse, RequestError> { let request_uuid = uuid::Uuid::parse_str(&request.client_id)?; let conn = self.db_writer.get().unwrap(); let client = conn.transaction::<models::Client, Error, _>(|| { let updated_row: models::Client = diesel::update( schema::clients::table.filter(schema::clients::uuid.eq(request_uuid)), ) .set((schema::clients::ral.eq(request.ral),)) .get_result(&conn)?; Ok(updated_row) })?; CLIENT_UPDATED_PASSWORD.inc(); Ok(proto::UpdateClientResponse { result: proto::Result::Success as i32, client: Some(client.into()), }) } // Increment a client's avatar version #[instrument(INFO)] fn handle_increment_client_avatar( &self, request: &proto::IncrementClientAvatarRequest, ) -> Result<proto::UpdateClientResponse, RequestError> { let request_uuid = uuid::Uuid::parse_str(&request.client_id)?; let conn = self.db_writer.get().unwrap(); let client = conn.transaction::<models::Client, Error, _>(|| { let updated_row: models::Client = diesel::update( schema::clients::table.filter(schema::clients::uuid.eq(request_uuid)), ) .set((schema::clients::avatar_version .eq(schema::clients::avatar_version + request.increment_by),)) .get_result(&conn)?; Ok(updated_row) })?; CLIENT_UPDATED_PASSWORD.inc(); Ok(proto::UpdateClientResponse { result: proto::Result::Success as i32, client: Some(client.into()), }) } // Updates the underlying client model #[instrument(INFO)] fn handle_update_client_password( &self, request: &proto::UpdateClientPasswordRequest, ) -> Result<proto::UpdateClientResponse, RequestError> { let request_uuid = uuid::Uuid::parse_str(&request.client_id)?; let conn = self.db_writer.get().unwrap(); let client = conn.transaction::<models::Client, Error, _>(|| { let updated_row: models::Client = diesel::update( schema::clients::table.filter(schema::clients::uuid.eq(request_uuid)), ) .set(( schema::clients::password_verifier.eq(request.password_verifier.clone()), schema::clients::password_salt.eq(request.password_salt.clone()), )) .get_result(&conn)?; insert_client_action( updated_row.id, ClientAccountAction::PasswordUpdated, &request.location, &conn, )?; Ok(updated_row) })?; CLIENT_UPDATED_PASSWORD.inc(); Ok(proto::UpdateClientResponse { result: proto::Result::Success as i32, client: Some(client.into()), }) } // Updates the underlying client model #[instrument(INFO)] fn handle_update_client_email( &self, request: &proto::UpdateClientEmailRequest, ) -> Result<proto::UpdateClientResponse, RequestError> { let request_uuid = uuid::Uuid::parse_str(&request.client_id)?; let email: Email = request.email.to_lowercase().parse()?; let mut redis_conn = self.redis_reader.get()?; email.check_validity(&mut *redis_conn)?; let email_as_entered = email.email_as_entered.clone(); let email_without_labels = email.email_without_labels.clone(); let conn = self.db_writer.get().unwrap(); let client = conn.transaction::<models::Client, Error, _>(|| { let client: models::Client = schema::clients::table .filter(schema::clients::dsl::uuid.eq(&request_uuid)) .first(&conn)?; // Delete the old email address first diesel::delete( schema::unique_email_addresses::table .filter(schema::unique_email_addresses::client_id.eq(client.id)), ) .execute(&conn)?; let new_unique_email_address = models::NewUniqueEmailAddress { client_id: client.id, email_as_entered, email_without_labels, }; // Insert new email address diesel::insert_into(schema::unique_email_addresses::table) .values(&new_unique_email_address) .execute(&conn)?; insert_client_action( client.id, ClientAccountAction::EmailUpdated, &request.location, &conn, )?; let client: models::Client = schema::clients::table .filter(schema::clients::dsl::uuid.eq(&request_uuid)) .first(&conn)?; Ok(client) })?; CLIENT_UPDATED_EMAIL.inc(); Ok(proto::UpdateClientResponse { result: proto::Result::Success as i32, client: Some(client.into()), }) } // Updates the underlying client model #[instrument(INFO)] fn handle_update_client_phone_number( &self, request: &proto::UpdateClientPhoneNumberRequest, ) -> Result<proto::UpdateClientResponse, RequestError> { let request_uuid = uuid::Uuid::parse_str(&request.client_id)?; let conn = self.db_writer.get().unwrap(); let client = conn.transaction::<models::Client, Error, _>(|| { let updated_row: models::Client = diesel::update( schema::clients::table.filter(schema::clients::uuid.eq(request_uuid)), ) .set(schema::clients::phone_sms_verified.eq(true)) .get_result(&conn)?; insert_client_action( updated_row.id, ClientAccountAction::PhoneNumberUpdated, &request.location, &conn, )?; Ok(updated_row) })?; CLIENT_UPDATED_PHONE_NUMBER.inc(); Ok(proto::UpdateClientResponse { result: proto::Result::Success as i32, client: Some(client.into()), }) } // Updates the underlying client model #[instrument(INFO)] fn handle_verify_phone( &self, request: &proto::VerifyPhoneRequest, ) -> Result<proto::VerifyPhoneResponse, RequestError> { use crate::config; use crate::models::PhoneVerificationCode; use crate::schema::clients::columns::{phone_sms_verified, uuid as client_uuid}; use crate::schema::clients::table as clients; let request_uuid = uuid::Uuid::parse_str(&request.client_id)?; let conn = self.db_reader.get().unwrap(); let client: models::Client = clients.filter(client_uuid.eq(request_uuid)).first(&conn)?; let db_code: PhoneVerificationCode = PhoneVerificationCode::belonging_to(&client).first(&conn)?; if config::CONFIG.messagebird.verification_enforced && db_code.code != request.code { CLIENT_PHONE_VERIFY_BAD_CODE.inc(); Ok(proto::VerifyPhoneResponse { result: proto::Result::Failure as i32, client: Some(client.into()), }) } else { let conn = self.db_writer.get().unwrap(); let client = conn.transaction::<models::Client, Error, _>(|| { let updated_row: models::Client = diesel::update(clients.filter(client_uuid.eq(request_uuid))) .set(phone_sms_verified.eq(true)) .get_result(&conn)?; insert_client_action( updated_row.id, ClientAccountAction::PhoneVerified, &request.location, &conn, )?; Ok(updated_row) })?; CLIENT_PHONE_VERIFIED.inc(); Ok(proto::VerifyPhoneResponse { result: proto::Result::Success as i32, client: Some(client.into()), }) } } // Updates the underlying client model #[instrument(INFO)] fn handle_send_verification_code( &self, request: &proto::SendVerificationCodeRequest, ) -> Result<proto::SendVerificationCodeResponse, RequestError> { use crate::models::PhoneVerificationCode; use crate::schema::clients::columns::uuid as client_uuid; use crate::schema::clients::table as clients; use crate::schema::phone_verification_codes::table as phone_verification_codes; use chrono::prelude::*; use diesel::update; let request_uuid = uuid::Uuid::parse_str(&request.client_id)?; let conn = self.db_reader.get().unwrap(); let client: models::Client = clients.filter(client_uuid.eq(request_uuid)).first(&conn)?; let db_code: PhoneVerificationCode = PhoneVerificationCode::belonging_to(&client).first(&conn)?; let duration = Utc::now().naive_utc() - db_code.updated_at; // Don't send more than 1 code very 30 seconds if duration.num_seconds() > 30 { let code = generate_and_send_verification_code(&client); let conn = self.db_writer.get().unwrap(); update( phone_verification_codes .filter(crate::schema::phone_verification_codes::columns::id.eq(db_code.id)), ) .set(crate::schema::phone_verification_codes::columns::code.eq(code)) .execute(&conn)?; } Ok(proto::SendVerificationCodeResponse {}) } // Updates the underlying client model #[instrument(INFO)] fn handle_get_prefs( &self, request: &proto::GetPrefsRequest, ) -> Result<proto::GetPrefsResponse, RequestError> { use crate::models::{ClientPrefs, NewClientPrefs}; use crate::schema::clients::columns::uuid as client_uuid; use crate::schema::clients::table as clients; use crate::schema::prefs::table as prefs; let request_uuid = uuid::Uuid::parse_str(&request.client_id)?; let conn = self.db_reader.get().unwrap(); let client: models::Client = clients.filter(client_uuid.eq(request_uuid)).first(&conn)?; let client_prefs: ClientPrefs = match ClientPrefs::belonging_to(&client).first(&conn) { Ok(client_prefs) => client_prefs, Err(_) => { let conn = self.db_writer.get().unwrap(); diesel::insert_into(prefs) .values(&NewClientPrefs { client_id: client.id, }) .get_result(&conn)? } }; Ok(proto::GetPrefsResponse { client_id: request.client_id.clone(), prefs: Some(client_prefs.into()), }) } // Get all the clients referred by a client #[instrument(INFO)] fn handle_get_referrals( &self, request: &proto::GetReferralsRequest, ) -> Result<proto::GetReferralsResponse, RequestError> { use crate::schema::clients::columns::referred_by; use crate::schema::clients::table as clients; let request_uuid = uuid::Uuid::parse_str(&request.referred_by_client_id)?; let conn = self.db_reader.get().unwrap(); let referred_clients = clients .filter(referred_by.eq(request_uuid)) .load::<models::Client>(&conn)?; Ok(proto::GetReferralsResponse { referred_by_client_id: request.referred_by_client_id.clone(), referrals: referred_clients.iter().map(proto::Client::from).collect(), }) } // Updates the underlying client model #[instrument(INFO)] fn handle_update_prefs( &self, request: &proto::UpdatePrefsRequest, ) -> Result<proto::UpdatePrefsResponse, RequestError> { use crate::models::{ClientPrefs, UpdateClientPrefs}; use crate::schema::clients::columns::uuid as client_uuid; use crate::schema::clients::table as clients; let request_uuid = uuid::Uuid::parse_str(&request.client_id)?; let conn = self.db_reader.get().unwrap(); let client: models::Client = clients.filter(client_uuid.eq(request_uuid)).first(&conn)?; let conn = self.db_writer.get().unwrap(); let updated_prefs: UpdateClientPrefs = match request.prefs.as_ref() { Some(prefs) => prefs.into(), _ => UpdateClientPrefs { email_notifications: EmailNotificationsPref::Ral, include_in_leaderboard: true, }, }; let client_prefs: ClientPrefs = diesel::update(ClientPrefs::belonging_to(&client)) .set(&updated_prefs) .get_result(&conn)?; Ok(proto::UpdatePrefsResponse { client_id: request.client_id.clone(), prefs: Some(client_prefs.into()), }) } // Get client stats #[instrument(INFO)] fn handle_get_stats( &self, _request: &proto::GetStatsRequest, ) -> Result<proto::GetStatsResponse, RequestError> { use chrono::Datelike; use diesel::prelude::*; use diesel::result::Error; use diesel::sql_query; let conn = self.db_reader.get().unwrap(); let result: Result<Vec<AmountByDateQueryResult>, Error> = sql_query( r#" SELECT Count(1) AS count, dq.date AS ds FROM (SELECT ( CURRENT_DATE - offs ) AS date FROM Generate_series(1, 31, 1) AS offs) AS dq LEFT OUTER JOIN clients c ON Date(c.created_at) <= dq.date GROUP BY dq.date ORDER BY dq.date "#, ) .get_results(&conn); let clients_by_date = match result { Ok(result) => result .iter() .map(|result| proto::CountByDate { count: result.count, year: result.ds.year(), month: result.ds.month() as i32, day: result.ds.day() as i32, }) .collect(), Err(err) => { error!("Error reading stats: {:?}", err); vec![] } }; let result: Result<Vec<AmountByClientQueryResult>, Error> = sql_query( r#" SELECT CAST(c.ral * 100 AS BIGINT) AS amount_cents, c.uuid AS client_id FROM clients AS c LEFT OUTER JOIN prefs p ON p.client_id = c.id WHERE p.include_in_leaderboard = true ORDER BY c.ral DESC limit 10 "#, ) .get_results(&conn); let clients_by_ral = match result { Ok(result) => result .iter() .map(|result| proto::AmountByClient { amount_cents: result.amount_cents, client_id: result.client_id.to_simple().to_string(), }) .collect(), Err(err) => { error!("Error reading stats: {:?}", err); vec![] } }; Ok(proto::GetStatsResponse { clients_by_date, clients_by_ral, }) } } impl proto::server::Rolodex for Rolodex { type AuthHandshakeFuture = future::FutureResult< Response<proto::AuthHandshakeResponse>, rolodex_grpc::tower_grpc::Status, >; fn auth_handshake( &mut self, request: Request<proto::AuthHandshakeRequest>, ) -> Self::AuthHandshakeFuture { use futures::future::IntoFuture; use rolodex_grpc::tower_grpc::{Code, Status}; self.handle_auth_handshake(request.get_ref()) .map(Response::new) .map_err(|err| Status::new(Code::InvalidArgument, err.to_string())) .into_future() } type AuthVerifyFuture = future::FutureResult<Response<proto::AuthVerifyResponse>, rolodex_grpc::tower_grpc::Status>; fn auth_verify( &mut self, request: Request<proto::AuthVerifyRequest>, ) -> Self::AuthVerifyFuture { use futures::future::IntoFuture; use rolodex_grpc::tower_grpc::{Code, Status}; self.handle_auth_verify(request.get_ref()) .map(Response::new) .map_err(|err| Status::new(Code::InvalidArgument, err.to_string())) .into_future() } type AddClientFuture = future::FutureResult<Response<proto::NewClientResponse>, rolodex_grpc::tower_grpc::Status>; fn add_client(&mut self, request: Request<proto::NewClientRequest>) -> Self::AddClientFuture { use futures::future::IntoFuture; use rolodex_grpc::tower_grpc::{Code, Status}; self.handle_add_client(request.get_ref()) .map(Response::new) .map_err(|err| Status::new(Code::InvalidArgument, err.to_string())) .into_future() } type GetClientFuture = future::FutureResult<Response<proto::GetClientResponse>, rolodex_grpc::tower_grpc::Status>; fn get_client(&mut self, request: Request<proto::GetClientRequest>) -> Self::GetClientFuture { use futures::future::IntoFuture; use rolodex_grpc::tower_grpc::{Code, Status}; self.handle_get_client(request.get_ref()) .map(Response::new) .map_err(|err| Status::new(Code::InvalidArgument, err.to_string())) .into_future() } type GetClientEmailFuture = future::FutureResult< Response<proto::GetClientEmailResponse>, rolodex_grpc::tower_grpc::Status, >; fn get_client_email( &mut self, request: Request<proto::GetClientEmailRequest>, ) -> Self::GetClientEmailFuture { use futures::future::IntoFuture; use rolodex_grpc::tower_grpc::{Code, Status}; self.handle_get_client_email(request.get_ref()) .map(Response::new) .map_err(|err| Status::new(Code::InvalidArgument, err.to_string())) .into_future() } type UpdateClientFuture = future::FutureResult< Response<proto::UpdateClientResponse>, rolodex_grpc::tower_grpc::Status, >; fn update_client( &mut self, request: Request<proto::UpdateClientRequest>, ) -> Self::UpdateClientFuture { use futures::future::IntoFuture; use rolodex_grpc::tower_grpc::{Code, Status}; self.handle_update_client(request.get_ref()) .map(Response::new) .map_err(|err| Status::new(Code::InvalidArgument, err.to_string())) .into_future() } type UpdateClientRalFuture = future::FutureResult< Response<proto::UpdateClientResponse>, rolodex_grpc::tower_grpc::Status, >; fn update_client_ral( &mut self, request: Request<proto::UpdateClientRalRequest>, ) -> Self::UpdateClientRalFuture { use futures::future::IntoFuture; use rolodex_grpc::tower_grpc::{Code, Status}; self.handle_update_client_ral(request.get_ref()) .map(Response::new) .map_err(|err| Status::new(Code::InvalidArgument, err.to_string())) .into_future() } type IncrementClientAvatarFuture = future::FutureResult< Response<proto::UpdateClientResponse>, rolodex_grpc::tower_grpc::Status, >; fn increment_client_avatar( &mut self, request: Request<proto::IncrementClientAvatarRequest>, ) -> Self::IncrementClientAvatarFuture { use futures::future::IntoFuture; use rolodex_grpc::tower_grpc::{Code, Status}; self.handle_increment_client_avatar(request.get_ref()) .map(Response::new) .map_err(|err| Status::new(Code::InvalidArgument, err.to_string())) .into_future() } type UpdateClientPasswordFuture = future::FutureResult< Response<proto::UpdateClientResponse>, rolodex_grpc::tower_grpc::Status, >; fn update_client_password( &mut self, request: Request<proto::UpdateClientPasswordRequest>, ) -> Self::UpdateClientPasswordFuture { use futures::future::IntoFuture; use rolodex_grpc::tower_grpc::{Code, Status}; self.handle_update_client_password(request.get_ref()) .map(Response::new) .map_err(|err| Status::new(Code::InvalidArgument, err.to_string())) .into_future() } type UpdateClientEmailFuture = future::FutureResult< Response<proto::UpdateClientResponse>, rolodex_grpc::tower_grpc::Status, >; fn update_client_email( &mut self, request: Request<proto::UpdateClientEmailRequest>, ) -> Self::UpdateClientEmailFuture { use futures::future::IntoFuture; use rolodex_grpc::tower_grpc::{Code, Status}; self.handle_update_client_email(request.get_ref()) .map(Response::new) .map_err(|err| Status::new(Code::InvalidArgument, err.to_string())) .into_future() } type UpdateClientPhoneNumberFuture = future::FutureResult< Response<proto::UpdateClientResponse>, rolodex_grpc::tower_grpc::Status, >; fn update_client_phone_number( &mut self, request: Request<proto::UpdateClientPhoneNumberRequest>, ) -> Self::UpdateClientPhoneNumberFuture { use futures::future::IntoFuture; use rolodex_grpc::tower_grpc::{Code, Status}; self.handle_update_client_phone_number(request.get_ref()) .map(Response::new) .map_err(|err| Status::new(Code::InvalidArgument, err.to_string())) .into_future() } type VerifyPhoneFuture = future::FutureResult< Response<proto::VerifyPhoneResponse>, rolodex_grpc::tower_grpc::Status, >; fn verify_phone( &mut self, request: Request<proto::VerifyPhoneRequest>, ) -> Self::VerifyPhoneFuture { use futures::future::IntoFuture; use rolodex_grpc::tower_grpc::{Code, Status}; self.handle_verify_phone(request.get_ref()) .map(Response::new) .map_err(|err| Status::new(Code::InvalidArgument, err.to_string())) .into_future() } type SendVerificationCodeFuture = future::FutureResult< Response<proto::SendVerificationCodeResponse>, rolodex_grpc::tower_grpc::Status, >; fn send_verification_code( &mut self, request: Request<proto::SendVerificationCodeRequest>, ) -> Self::SendVerificationCodeFuture { use futures::future::IntoFuture; use rolodex_grpc::tower_grpc::{Code, Status}; self.handle_send_verification_code(request.get_ref()) .map(Response::new) .map_err(|err| Status::new(Code::InvalidArgument, err.to_string())) .into_future() } type GetPrefsFuture = future::FutureResult<Response<proto::GetPrefsResponse>, rolodex_grpc::tower_grpc::Status>; fn get_prefs(&mut self, request: Request<proto::GetPrefsRequest>) -> Self::GetPrefsFuture { use futures::future::IntoFuture; use rolodex_grpc::tower_grpc::{Code, Status}; self.handle_get_prefs(request.get_ref()) .map(Response::new) .map_err(|err| Status::new(Code::InvalidArgument, err.to_string())) .into_future() } type UpdatePrefsFuture = future::FutureResult< Response<proto::UpdatePrefsResponse>, rolodex_grpc::tower_grpc::Status, >; fn update_prefs( &mut self, request: Request<proto::UpdatePrefsRequest>, ) -> Self::UpdatePrefsFuture { use futures::future::IntoFuture; use rolodex_grpc::tower_grpc::{Code, Status}; self.handle_update_prefs(request.get_ref()) .map(Response::new) .map_err(|err| Status::new(Code::InvalidArgument, err.to_string())) .into_future() } type GetReferralsFuture = future::FutureResult< Response<proto::GetReferralsResponse>, rolodex_grpc::tower_grpc::Status, >; fn get_referrals( &mut self, request: Request<proto::GetReferralsRequest>, ) -> Self::GetReferralsFuture { use futures::future::IntoFuture; use rolodex_grpc::tower_grpc::{Code, Status}; self.handle_get_referrals(request.get_ref()) .map(Response::new) .map_err(|err| Status::new(Code::InvalidArgument, err.to_string())) .into_future() } type GetStatsFuture = future::FutureResult<Response<proto::GetStatsResponse>, rolodex_grpc::tower_grpc::Status>; fn get_stats(&mut self, request: Request<proto::GetStatsRequest>) -> Self::GetStatsFuture { use futures::future::IntoFuture; use rolodex_grpc::tower_grpc::{Code, Status}; self.handle_get_stats(request.get_ref()) .map(Response::new) .map_err(|err| Status::new(Code::InvalidArgument, err.to_string())) .into_future() } type CheckFuture = future::FutureResult< Response<proto::HealthCheckResponse>, rolodex_grpc::tower_grpc::Status, >; fn check(&mut self, _request: Request<proto::HealthCheckRequest>) -> Self::CheckFuture { future::ok(Response::new(proto::HealthCheckResponse { status: proto::health_check_response::ServingStatus::Serving as i32, })) } } #[cfg(test)] mod tests { // Note this useful idiom: importing names from outer (for mod tests) scope. use super::*; use diesel::dsl::*; use diesel::r2d2::{ConnectionManager, Pool}; use sha2::Sha256; use srp::client::SrpClient; use srp::groups::G_2048; use std::sync::Mutex; lazy_static! { static ref LOCK: Mutex<i32> = Mutex::new(0); } fn get_pools() -> ( diesel::r2d2::Pool<diesel::r2d2::ConnectionManager<diesel::pg::PgConnection>>, r2d2_redis_cluster::r2d2::Pool<r2d2_redis_cluster::RedisClusterConnectionManager>, ) { let pg_manager = ConnectionManager::<PgConnection>::new( "postgres://postgres:[email protected]:5432/rolodex", ); let db_pool = Pool::builder().build(pg_manager).unwrap(); let redis_manager = r2d2_redis_cluster::RedisClusterConnectionManager::new("redis://127.0.0.1/").unwrap(); let redis_pool = r2d2_redis_cluster::r2d2::Pool::builder() .build(redis_manager) .unwrap(); (db_pool, redis_pool) } fn empty_tables( db_pool: &diesel::r2d2::Pool<diesel::r2d2::ConnectionManager<diesel::pg::PgConnection>>, ) { let conn = db_pool.get().unwrap(); macro_rules! empty_tables { ( $( $x:ident ),* ) => { $( diesel::delete(schema::$x::table).execute(&conn).unwrap(); assert_eq!(Ok(0), schema::$x::table.select(count(schema::$x::id)).first(&conn)); )* }; } empty_tables![client_account_actions, unique_email_addresses, clients]; } fn email_in_table( db_pool: &diesel::r2d2::Pool<diesel::r2d2::ConnectionManager<diesel::pg::PgConnection>>, email: &str, ) -> bool { let conn = db_pool.get().unwrap(); let count: i64 = schema::unique_email_addresses::table .select(count(schema::unique_email_addresses::id)) .filter(schema::unique_email_addresses::email_as_entered.eq(email)) .first(&conn) .unwrap(); count > 0 } fn phone_number_in_table( db_pool: &diesel::r2d2::Pool<diesel::r2d2::ConnectionManager<diesel::pg::PgConnection>>, phone_number: &str, ) -> bool { let conn = db_pool.get().unwrap(); let count: i64 = schema::clients::table .select(count(schema::clients::id)) .filter(schema::clients::phone_number.eq(phone_number)) .first(&conn) .unwrap(); count > 0 } fn gen_salt() -> Vec<u8> { use rand::rngs::OsRng; use rand::RngCore; let mut salt = [0u8; 32]; OsRng.fill_bytes(&mut salt); salt.to_vec() } fn gen_a() -> Vec<u8> { use rand::rngs::OsRng; use rand::RngCore; let mut a = [0u8; 64]; OsRng.fill_bytes(&mut a); a.to_vec() } fn make_srp_client<'a>( email: &str, password: &str, salt: &[u8], a: &[u8], ) -> (SrpClient<'a, Sha256>, Vec<u8>) { use srp::client::srp_private_key; let private_key = srp_private_key::<Sha256>(email.as_bytes(), password.as_bytes(), salt); (SrpClient::<Sha256>::new(&a, &G_2048), private_key.to_vec()) } fn make_client( rolodex: &Rolodex, db_pool: &diesel::r2d2::Pool<diesel::r2d2::ConnectionManager<diesel::pg::PgConnection>>, email: &str, password: &str, ) -> proto::NewClientResponse { let password_salt = gen_salt(); let a = gen_a(); let (srp_client, srp_private_key) = make_srp_client(email, password, &password_salt, &a); let password_verifier = srp_client.get_password_verifier(&srp_private_key); let result = rolodex.handle_add_client(&proto::NewClientRequest { full_name: "Bob Marley".into(), email: email.into(), phone_number: Some(proto::PhoneNumber { country_code: "US".into(), national_number: "4013213952".into(), }), password_verifier: password_verifier.clone(), password_salt: password_salt.clone(), box_public_key: "herp derp".into(), signing_public_key: "herp derp".into(), location: Some(proto::Location { ip_address: "127.0.0.1".into(), region: "United States".into(), region_subdivision: "New York".into(), city: "New York".into(), }), }); assert_eq!(result.is_ok(), true); assert_eq!(email_in_table(&db_pool, email), true); let client = result.unwrap(); assert_eq!(auth_client(rolodex, email, password, &client), true); client } fn auth_client( rolodex: &Rolodex, email: &str, password: &str, client: &proto::NewClientResponse, ) -> bool { let email = email.to_string(); let a = gen_a(); let srp_client = SrpClient::<Sha256>::new(&a, &G_2048); let auth_result = rolodex.handle_auth_handshake(&proto::AuthHandshakeRequest { email: email.clone(), a_pub: srp_client.get_a_pub().clone(), location: Some(proto::Location { ip_address: "127.0.0.1".into(), region: "United States".into(), region_subdivision: "New York".into(), city: "New York".into(), }), }); assert_eq!(auth_result.is_ok(), true); let auth_result = auth_result.unwrap(); assert_eq!(auth_result.email, email); let (srp_client, srp_private_key) = make_srp_client(&email, password, &auth_result.salt, &a); let a_pub = srp_client.get_a_pub().clone(); let srp_client2 = srp_client .process_reply(&srp_private_key, &auth_result.b_pub) .unwrap(); let auth_result = rolodex.handle_auth_verify(&proto::AuthVerifyRequest { email: email.clone(), a_pub, client_proof: srp_client2.get_proof().to_vec(), location: Some(proto::Location { ip_address: "127.0.0.1".into(), region: "United States".into(), region_subdivision: "New York".into(), city: "New York".into(), }), }); assert_eq!(auth_result.is_ok(), true); let auth_result = auth_result.unwrap(); assert_eq!(auth_result.client_id, client.client_id); let verify_result = srp_client2.verify_server(&auth_result.server_proof); assert_eq!(verify_result.is_ok(), true); verify_result.is_ok() } #[test] fn test_add_client_valid() { let _lock = LOCK.lock().unwrap(); tokio::run(future::lazy(|| { let (db_pool, redis_pool) = get_pools(); empty_tables(&db_pool); let rolodex = Rolodex::new( db_pool.clone(), db_pool.clone(), redis_pool.clone(), redis_pool.clone(), ); let _client = make_client(&rolodex, &db_pool, "[email protected]", "secrit"); future::ok(()) })); } #[test] fn test_client_invalid_auth() { let _lock = LOCK.lock().unwrap(); tokio::run(future::lazy(|| { let (db_pool, redis_pool) = get_pools(); empty_tables(&db_pool); let rolodex = Rolodex::new( db_pool.clone(), db_pool.clone(), redis_pool.clone(), redis_pool.clone(), ); let auth_result = rolodex.handle_auth_handshake(&proto::AuthHandshakeRequest { email: "[email protected]".to_string(), a_pub: b"blah".to_vec(), location: Some(proto::Location { ip_address: "127.0.0.1".into(), region: "United States".into(), region_subdivision: "New York".into(), city: "New York".into(), }), }); assert_eq!(auth_result.is_err(), true); future::ok(()) })); } #[test] fn test_add_client_duplicate_email() { let _lock = LOCK.lock().unwrap(); tokio::run(future::lazy(|| { let (db_pool, redis_pool) = get_pools(); empty_tables(&db_pool); let rolodex = Rolodex::new( db_pool.clone(), db_pool.clone(), redis_pool.clone(), redis_pool.clone(), ); let _client = make_client(&rolodex, &db_pool, "[email protected]", "secrit"); let email = "[email protected]".to_string(); let password_salt = gen_salt(); let a = gen_a(); let (srp_client, srp_private_key) = make_srp_client(&email, "secrit", &password_salt, &a); let password_verifier = srp_client.get_password_verifier(&srp_private_key); let result = rolodex.handle_add_client(&proto::NewClientRequest { full_name: "Bob Marley".into(), email: email.clone(), phone_number: Some(proto::PhoneNumber { country_code: "US".into(), national_number: "4013213953".into(), }), password_verifier: password_verifier.clone(), password_salt: password_salt.clone(), box_public_key: "herp derp".into(), signing_public_key: "herp derp".into(), location: Some(proto::Location { ip_address: "127.0.0.1".into(), region: "United States".into(), region_subdivision: "New York".into(), city: "New York".into(), }), }); assert_eq!(result.is_ok(), false); future::ok(()) })); } #[test] fn test_add_client_duplicate_phone() { let _lock = LOCK.lock().unwrap(); tokio::run(future::lazy(|| { let (db_pool, redis_pool) = get_pools(); empty_tables(&db_pool); let rolodex = Rolodex::new( db_pool.clone(), db_pool.clone(), redis_pool.clone(), redis_pool.clone(), ); let _client = make_client(&rolodex, &db_pool, "[email protected]", "secrit"); let email = "[email protected]".to_string(); let password_salt = gen_salt(); let a = gen_a(); let (srp_client, srp_private_key) = make_srp_client(&email, "secrit", &password_salt, &a); let password_verifier = srp_client.get_password_verifier(&srp_private_key); let result = rolodex.handle_add_client(&proto::NewClientRequest { full_name: "Bob Marley".into(), email: email.clone(), phone_number: Some(proto::PhoneNumber { country_code: "US".into(), national_number: "4013213952".into(), }), password_verifier: password_verifier.clone(), password_salt: password_salt.clone(), box_public_key: "herp derp".into(), signing_public_key: "herp derp".into(), location: Some(proto::Location { ip_address: "127.0.0.1".into(), region: "United States".into(), region_subdivision: "New York".into(), city: "New York".into(), }), }); assert_eq!(result.is_ok(), false); assert_eq!(email_in_table(&db_pool, "[email protected]"), false); future::ok(()) })); } #[test] fn test_get_client() { let _lock = LOCK.lock().unwrap(); tokio::run(future::lazy(|| { let (db_pool, redis_pool) = get_pools(); empty_tables(&db_pool); let rolodex = Rolodex::new( db_pool.clone(), db_pool.clone(), redis_pool.clone(), redis_pool.clone(), ); let client = make_client(&rolodex, &db_pool, "[email protected]", "secrit"); let get_client = rolodex.handle_get_client(&proto::GetClientRequest { id: Some(rolodex_grpc::proto::get_client_request::Id::ClientId( client.client_id.clone(), )), calling_client_id: client.client_id.to_string(), }); assert_eq!(get_client.is_ok(), true); assert_eq!( get_client.unwrap().client.unwrap().client_id, client.client_id ); future::ok(()) })); } #[test] fn test_add_client_update_client() { let _lock = LOCK.lock().unwrap(); tokio::run(future::lazy(|| { let (db_pool, redis_pool) = get_pools(); empty_tables(&db_pool); let rolodex = Rolodex::new( db_pool.clone(), db_pool.clone(), redis_pool.clone(), redis_pool.clone(), ); let client = make_client(&rolodex, &db_pool, "[email protected]", "secrit"); // Update client model let update_result = rolodex.handle_update_client(&proto::UpdateClientRequest { client: Some(proto::Client { client_id: client.client_id.to_string(), full_name: "bob nob".into(), box_public_key: "herp derp".into(), signing_public_key: "herp derp".into(), handle: "handle".into(), profile: "profile".into(), }), location: Some(proto::Location { ip_address: "127.0.0.1".into(), region: "United States".into(), region_subdivision: "New York".into(), city: "New York".into(), }), }); if update_result.is_err() { panic!("err: {:?}", update_result.err()); } assert_eq!(update_result.is_ok(), true); assert_eq!(update_result.unwrap().result, proto::Result::Success as i32); let updated_client = rolodex .handle_get_client(&proto::GetClientRequest { calling_client_id: client.client_id.clone(), id: Some(rolodex_grpc::proto::get_client_request::Id::ClientId( client.client_id.clone(), )), }) .unwrap(); let updated_client = updated_client.client.unwrap().clone(); assert_eq!(updated_client.full_name, "bob nob"); assert_eq!(updated_client.box_public_key, "herp derp"); assert_eq!(updated_client.signing_public_key, "herp derp"); assert_eq!(updated_client.handle, "handle"); assert_eq!(updated_client.profile, "profile"); future::ok(()) })); } #[test] fn test_add_client_update_password() { let _lock = LOCK.lock().unwrap(); tokio::run(future::lazy(|| { let (db_pool, redis_pool) = get_pools(); empty_tables(&db_pool); let rolodex = Rolodex::new( db_pool.clone(), db_pool.clone(), redis_pool.clone(), redis_pool.clone(), ); let email = "[email protected]"; let client = make_client(&rolodex, &db_pool, email, "secrit"); // Update password let new_password = "new_password"; let password_salt = gen_salt(); let a = gen_a(); let (srp_client, srp_private_key) = make_srp_client(email, new_password, &password_salt, &a); let password_verifier = srp_client.get_password_verifier(&srp_private_key); let update_result = rolodex.handle_update_client_password(&proto::UpdateClientPasswordRequest { client_id: client.client_id.to_string(), password_verifier, password_salt, location: None, }); if update_result.is_err() { panic!("err: {:?}", update_result.err()); } assert_eq!(update_result.is_ok(), true); assert_eq!(update_result.unwrap().result, proto::Result::Success as i32); // Try auth with new password assert_eq!(auth_client(&rolodex, email, new_password, &client), true); future::ok(()) })); } #[test] fn test_add_client_update_email() { let _lock = LOCK.lock().unwrap(); tokio::run(future::lazy(|| { let (db_pool, redis_pool) = get_pools(); empty_tables(&db_pool); let rolodex = Rolodex::new( db_pool.clone(), db_pool.clone(), redis_pool.clone(), redis_pool.clone(), ); let client = make_client(&rolodex, &db_pool, "[email protected]", "secrit"); // Update Email let update_result = rolodex.handle_update_client_email(&proto::UpdateClientEmailRequest { client_id: client.client_id.to_string(), email: "[email protected]".into(), location: Some(proto::Location { ip_address: "127.0.0.1".into(), region: "United States".into(), region_subdivision: "New York".into(), city: "New York".into(), }), }); if update_result.is_err() { panic!("err: {:?}", update_result.err()); } assert_eq!(update_result.is_ok(), true); assert_eq!(update_result.unwrap().result, proto::Result::Success as i32); assert_eq!(email_in_table(&db_pool, "[email protected]"), true); future::ok(()) })); } #[test] fn test_add_client_update_phone_number() { let _lock = LOCK.lock().unwrap(); tokio::run(future::lazy(|| { let (db_pool, redis_pool) = get_pools(); empty_tables(&db_pool); let rolodex = Rolodex::new( db_pool.clone(), db_pool.clone(), redis_pool.clone(), redis_pool.clone(), ); let client = make_client(&rolodex, &db_pool, "[email protected]", "secrit"); // Update phone number let update_result = rolodex.handle_update_client_phone_number(&proto::UpdateClientPhoneNumberRequest { client_id: client.client_id.to_string(), phone_number: Some(proto::PhoneNumber { country_code: "US".into(), national_number: "5105825858".into(), }), location: Some(proto::Location { ip_address: "127.0.0.1".into(), region: "United States".into(), region_subdivision: "New York".into(), city: "New York".into(), }), }); if update_result.is_err() { panic!("err: {:?}", update_result.err()); } assert_eq!(update_result.is_ok(), true); assert_eq!(update_result.unwrap().result, proto::Result::Success as i32); assert_eq!(phone_number_in_table(&db_pool, "+1 510-582-5858"), true); future::ok(()) })); } }
35.934378
100
0.563956
0983ad0470d2d0255b31e7b0e03cda360016aadd
7,097
use serde_json::Value; use sgx_tstd::{ptr, string::String, time::SystemTime, vec::Vec}; use sgx_types::*; type SignatureAlgorithms = &'static [&'static webpki::SignatureAlgorithm]; static SUPPORTED_SIG_ALGS: SignatureAlgorithms = &[ &webpki::ECDSA_P256_SHA256, &webpki::ECDSA_P256_SHA384, &webpki::ECDSA_P384_SHA256, &webpki::ECDSA_P384_SHA384, &webpki::RSA_PSS_2048_8192_SHA256_LEGACY_KEY, &webpki::RSA_PSS_2048_8192_SHA384_LEGACY_KEY, &webpki::RSA_PSS_2048_8192_SHA512_LEGACY_KEY, &webpki::RSA_PKCS1_2048_8192_SHA256, &webpki::RSA_PKCS1_2048_8192_SHA384, &webpki::RSA_PKCS1_2048_8192_SHA512, &webpki::RSA_PKCS1_3072_8192_SHA384, ]; fn verify_intel_sign( attn_report: Vec<u8>, sig: Vec<u8>, cert: Vec<u8>, ) -> Result<(), sgx_status_t> { let now = match webpki::Time::try_from(SystemTime::now()) { Ok(r) => r, Err(e) => { println!("webpki::Time::try_from failed with {:?}", e); return Err(sgx_status_t::SGX_ERROR_UNEXPECTED); } }; let root_ca_raw = include_bytes!("../../../cert/AttestationReportSigningCACert.pem"); let root_ca_pem = pem::parse(root_ca_raw).expect("failed to parse pem file."); let root_ca = root_ca_pem.contents; let mut root_store = rustls::RootCertStore::empty(); root_store .add(&rustls::Certificate(root_ca.clone())) .unwrap(); let trust_anchors: Vec<webpki::TrustAnchor> = root_store .roots .iter() .map(|cert| cert.to_trust_anchor()) .collect(); let mut chain: Vec<&[u8]> = Vec::new(); chain.push(&root_ca); let report_cert = webpki::EndEntityCert::from(&cert).unwrap(); match report_cert.verify_is_valid_tls_server_cert( SUPPORTED_SIG_ALGS, &webpki::TLSServerTrustAnchors(&trust_anchors), &chain, now, ) { Ok(r) => r, Err(e) => { println!("verify_is_valid_tls_server_cert failed with {:?}", e); return Err(sgx_status_t::SGX_ERROR_UNEXPECTED); } }; match report_cert.verify_signature(&webpki::RSA_PKCS1_2048_8192_SHA256, &attn_report, &sig) { Ok(_) => Ok(()), Err(e) => { println!("verify_signature failed with {:?}", e); Err(sgx_status_t::SGX_ERROR_UNEXPECTED) } } } fn get_quote_from_attn_report(attn_report: Vec<u8>) -> Result<sgx_quote_t, sgx_status_t> { let attn_report: Value = serde_json::from_slice(&attn_report).unwrap(); // Check timestamp is within 24H if let Value::String(time) = &attn_report["timestamp"] { let time_fixed = time.clone() + "+0000"; println!("Time = {}", time_fixed); } else { println!("Failed to fetch timestamp from attestation report"); return Err(sgx_status_t::SGX_ERROR_UNEXPECTED); } if let Value::String(version) = &attn_report["version"] { if version != "4" { return Err(sgx_status_t::SGX_ERROR_UNEXPECTED); } } // Verify quote status (mandatory field) if let Value::String(quote_status) = &attn_report["isvEnclaveQuoteStatus"] { match quote_status.as_ref() { "OK" => (), "GROUP_OUT_OF_DATE" | "GROUP_REVOKED" | "CONFIGURATION_NEEDED" => {} _ => return Err(sgx_status_t::SGX_ERROR_UNEXPECTED), } } else { println!("Failed to fetch isvEnclaveQuoteStatus from attestation report"); return Err(sgx_status_t::SGX_ERROR_UNEXPECTED); } match &attn_report["isvEnclaveQuoteBody"] { Value::String(quote_raw) => { let quote = base64::decode(&quote_raw).unwrap(); let sgx_quote: sgx_quote_t = unsafe { ptr::read(quote.as_ptr() as *const _) }; Ok(sgx_quote) } _ => { println!("Failed to fetch isvEnclaveQuoteBody from attestation report"); Err(sgx_status_t::SGX_ERROR_UNEXPECTED) } } } pub fn verify_ra_cert(cert_der: &[u8]) -> Result<(), sgx_status_t> { // Search for Public Key prime256v1 OID let prime256v1_oid = &[0x06, 0x08, 0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07]; let mut offset = cert_der .windows(prime256v1_oid.len()) .position(|window| window == prime256v1_oid) .unwrap(); offset += 11; // 10 + TAG (0x03) // Obtain Public Key length let mut len = cert_der[offset] as usize; if len > 0x80 { len = (cert_der[offset + 1] as usize) * 0x100 + (cert_der[offset + 2] as usize); offset += 2; } // Obtain Public Key offset += 1; let pub_k = cert_der[offset + 2..offset + len].to_vec(); // skip "00 04" // Search for Netscape Comment OID let ns_cmt_oid = &[ 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x86, 0xF8, 0x42, 0x01, 0x0D, ]; let mut offset = cert_der .windows(ns_cmt_oid.len()) .position(|window| window == ns_cmt_oid) .unwrap(); offset += 12; // 11 + TAG (0x04) // Obtain Netscape Comment length let mut len = cert_der[offset] as usize; if len > 0x80 { len = (cert_der[offset + 1] as usize) * 0x100 + (cert_der[offset + 2] as usize); offset += 2; } // Obtain Netscape Comment offset += 1; let payload = cert_der[offset..offset + len].to_vec(); // Extract each field let mut iter = payload.split(|x| *x == 0x7C); let attn_report_raw = iter.next().unwrap(); let attn_report = attn_report_raw.to_vec(); let sig_raw = iter.next().unwrap(); let sig = base64::decode(&sig_raw).unwrap(); let cert_raw = iter.next().unwrap(); let cert = base64::decode(&cert_raw).unwrap(); verify_intel_sign(attn_report.clone(), sig, cert)?; let sgx_quote = get_quote_from_attn_report(attn_report)?; // Borrow of packed field is unsafe in future Rust releases // ATTENTION // DO SECURITY CHECK ON DEMAND // DO SECURITY CHECK ON DEMAND // DO SECURITY CHECK ON DEMAND unsafe { println!("sgx quote version = {}", sgx_quote.version); println!("sgx quote signature type = {}", sgx_quote.sign_type); println!( "sgx quote report_data = {}", sgx_quote .report_body .report_data .d .iter() .map(|c| format!("{:02x}", c)) .collect::<String>() ); println!( "sgx quote mr_enclave = {}", sgx_quote .report_body .mr_enclave .m .iter() .map(|c| format!("{:02x}", c)) .collect::<String>() ); println!( "sgx quote mr_signer = {}", sgx_quote .report_body .mr_signer .m .iter() .map(|c| format!("{:02x}", c)) .collect::<String>() ); }; if sgx_quote.report_body.report_data.d.to_vec() == pub_k.to_vec() { println!("ue RA done!"); } Ok(()) }
32.113122
97
0.583627
3a2ca19d8e0c3ebc6ab59dce81891525dbe5f11b
397
// compile-flags: -Z print-type-sizes // build-pass (FIXME(62277): could be check-pass?) // ignore-pass // ^-- needed because `--pass check` does not emit the output needed. // FIXME: consider using an attribute instead of side-effects. #![feature(start)] #[start] fn start(_: isize, _: *const *const u8) -> isize { let _x: Option<!> = None; let _y: Result<u32, !> = Ok(42); 0 }
26.466667
69
0.632242
4be62051827a24fe71fce8631a7e5b15165899b7
27,182
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 // // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the THIRD-PARTY file. use super::{create_sockaddr, create_socket, vnet_hdr_len, Error as NetUtilError, MacAddr}; use mac::MAC_ADDR_LEN; use net_gen; use std::fs::File; use std::io::{Error as IoError, Read, Result as IoResult, Write}; use std::net; use std::os::raw::*; use std::os::unix::io::{AsRawFd, FromRawFd, RawFd}; use vmm_sys_util::ioctl::{ioctl_with_mut_ref, ioctl_with_ref, ioctl_with_val}; #[derive(Debug)] pub enum Error { /// Couldn't open /dev/net/tun. OpenTun(IoError), /// Unable to configure tap interface. ConfigureTap(IoError), /// Unable to retrieve features. GetFeatures(IoError), /// Missing multiqueue support in the kernel. MultiQueueKernelSupport, /// ioctl failed. IoctlError(IoError), /// Failed to create a socket. NetUtil(NetUtilError), InvalidIfname, /// Error parsing MAC data MacParsing(IoError), } pub type Result<T> = ::std::result::Result<T, Error>; /// Handle for a network tap interface. /// /// For now, this simply wraps the file descriptor for the tap device so methods /// can run ioctls on the interface. The tap interface fd will be closed when /// Tap goes out of scope, and the kernel will clean up the interface /// automatically. #[derive(Debug)] pub struct Tap { tap_file: File, if_name: Vec<u8>, } impl PartialEq for Tap { fn eq(&self, other: &Tap) -> bool { self.if_name == other.if_name } } impl std::clone::Clone for Tap { fn clone(&self) -> Self { Tap { tap_file: self.tap_file.try_clone().unwrap(), if_name: self.if_name.clone(), } } } // Returns a byte vector representing the contents of a null terminated C string which // contains if_name. fn build_terminated_if_name(if_name: &str) -> Result<Vec<u8>> { // Convert the string slice to bytes, and shadow the variable, // since we no longer need the &str version. let if_name = if_name.as_bytes(); // TODO: the 16usize limit of the if_name member from struct Tap is pretty arbitrary. // We leave it as is for now, but this should be refactored at some point. if if_name.len() > 15 { return Err(Error::InvalidIfname); } let mut terminated_if_name = vec![b'\0'; if_name.len() + 1]; terminated_if_name[..if_name.len()].copy_from_slice(if_name); Ok(terminated_if_name) } impl Tap { pub fn open_named(if_name: &str, num_queue_pairs: usize, flags: Option<i32>) -> Result<Tap> { let terminated_if_name = build_terminated_if_name(if_name)?; let fd = unsafe { // Open calls are safe because we give a constant null-terminated // string and verify the result. libc::open( b"/dev/net/tun\0".as_ptr() as *const c_char, flags.unwrap_or(libc::O_RDWR | libc::O_NONBLOCK | libc::O_CLOEXEC), ) }; if fd < 0 { return Err(Error::OpenTun(IoError::last_os_error())); } // We just checked that the fd is valid. let tuntap = unsafe { File::from_raw_fd(fd) }; // Let's validate some features before going any further. // ioctl is safe since we call it with a valid tap fd and check the return // value. let mut features = 0; let ret = unsafe { ioctl_with_mut_ref(&tuntap, net_gen::TUNGETFEATURES(), &mut features) }; if ret < 0 { return Err(Error::GetFeatures(IoError::last_os_error())); } // Check if the user parameters match the kernel support for MQ if (features & net_gen::IFF_MULTI_QUEUE == 0) && num_queue_pairs > 1 { return Err(Error::MultiQueueKernelSupport); } // This is pretty messy because of the unions used by ifreq. Since we // don't call as_mut on the same union field more than once, this block // is safe. let mut ifreq: net_gen::ifreq = Default::default(); unsafe { let ifrn_name = ifreq.ifr_ifrn.ifrn_name.as_mut(); let ifru_flags = ifreq.ifr_ifru.ifru_flags.as_mut(); let name_slice = &mut ifrn_name[..terminated_if_name.len()]; name_slice.copy_from_slice(terminated_if_name.as_slice()); *ifru_flags = (net_gen::IFF_TAP | net_gen::IFF_NO_PI | net_gen::IFF_VNET_HDR) as c_short; if num_queue_pairs > 1 { *ifru_flags |= net_gen::IFF_MULTI_QUEUE as c_short; } } // ioctl is safe since we call it with a valid tap fd and check the return // value. let ret = unsafe { ioctl_with_mut_ref(&tuntap, net_gen::TUNSETIFF(), &mut ifreq) }; if ret < 0 { return Err(Error::ConfigureTap(IoError::last_os_error())); } let if_name_temp = unsafe { *ifreq.ifr_ifrn.ifrn_name.as_ref() }; let mut if_name = if_name_temp.to_vec(); if_name.truncate(terminated_if_name.len() - 1); // Safe since only the name is accessed, and it's cloned out. Ok(Tap { tap_file: tuntap, if_name, }) } /// Create a new tap interface. pub fn new(num_queue_pairs: usize) -> Result<Tap> { Self::open_named("vmtap%d", num_queue_pairs, None) } pub fn from_tap_fd(fd: RawFd) -> Result<Tap> { // Ensure that the file is opened non-blocking, this is particularly // needed when opened via the shell for macvtap. let ret = unsafe { let mut flags = libc::fcntl(fd, libc::F_GETFL); flags |= libc::O_NONBLOCK; libc::fcntl(fd, libc::F_SETFL, flags) }; if ret < 0 { return Err(Error::ConfigureTap(IoError::last_os_error())); } let tap_file = unsafe { File::from_raw_fd(fd) }; let mut ifreq: net_gen::ifreq = Default::default(); // Get current config including name let ret = unsafe { ioctl_with_mut_ref(&tap_file, net_gen::TUNGETIFF(), &mut ifreq) }; if ret < 0 { return Err(Error::IoctlError(IoError::last_os_error())); } let if_name = unsafe { *ifreq.ifr_ifrn.ifrn_name.as_ref() }.to_vec(); // Try and update flags. Depending on how the tap was created (macvtap // or via open_named()) this might return -EEXIST so we just ignore that. unsafe { let ifru_flags = ifreq.ifr_ifru.ifru_flags.as_mut(); *ifru_flags = (net_gen::IFF_TAP | net_gen::IFF_NO_PI | net_gen::IFF_VNET_HDR) as c_short; } let ret = unsafe { ioctl_with_mut_ref(&tap_file, net_gen::TUNSETIFF(), &mut ifreq) }; if ret < 0 && IoError::last_os_error().raw_os_error().unwrap() != libc::EEXIST { return Err(Error::ConfigureTap(IoError::last_os_error())); } let tap = Tap { if_name, tap_file }; let offload_flags = net_gen::TUN_F_CSUM | net_gen::TUN_F_UFO | net_gen::TUN_F_TSO4 | net_gen::TUN_F_TSO6; let vnet_hdr_size = vnet_hdr_len() as i32; tap.set_offload(offload_flags)?; tap.set_vnet_hdr_size(vnet_hdr_size)?; Ok(tap) } /// Set the host-side IP address for the tap interface. pub fn set_ip_addr(&self, ip_addr: net::Ipv4Addr) -> Result<()> { let sock = create_socket().map_err(Error::NetUtil)?; let addr = create_sockaddr(ip_addr); let mut ifreq = self.get_ifreq(); // We only access one field of the ifru union, hence this is safe. unsafe { let ifru_addr = ifreq.ifr_ifru.ifru_addr.as_mut(); *ifru_addr = addr; } // ioctl is safe. Called with a valid sock fd, and we check the return. #[allow(clippy::cast_lossless)] let ret = unsafe { ioctl_with_ref(&sock, net_gen::sockios::SIOCSIFADDR as c_ulong, &ifreq) }; if ret < 0 { return Err(Error::IoctlError(IoError::last_os_error())); } Ok(()) } /// Set mac addr for tap interface. pub fn set_mac_addr(&self, addr: MacAddr) -> Result<()> { // Checking if the mac address already matches the desired one // is useful to avoid making the "set ioctl" in the case where // the VMM is running without the privilege to do that. // In practice this comes from a reboot after the configuration // has been update with the kernel generated address. if self.get_mac_addr()? == addr { return Ok(()); } let sock = create_socket().map_err(Error::NetUtil)?; let mut ifreq = self.get_ifreq(); // ioctl is safe. Called with a valid sock fd, and we check the return. #[allow(clippy::cast_lossless)] let ret = unsafe { ioctl_with_ref(&sock, net_gen::sockios::SIOCGIFHWADDR as c_ulong, &ifreq) }; if ret < 0 { return Err(Error::IoctlError(IoError::last_os_error())); } // We only access one field of the ifru union, hence this is safe. unsafe { let ifru_hwaddr = ifreq.ifr_ifru.ifru_hwaddr.as_mut(); for (i, v) in addr.get_bytes().iter().enumerate() { ifru_hwaddr.sa_data[i] = *v as c_char; } } // ioctl is safe. Called with a valid sock fd, and we check the return. #[allow(clippy::cast_lossless)] let ret = unsafe { ioctl_with_ref(&sock, net_gen::sockios::SIOCSIFHWADDR as c_ulong, &ifreq) }; if ret < 0 { return Err(Error::IoctlError(IoError::last_os_error())); } Ok(()) } /// Get mac addr for tap interface. pub fn get_mac_addr(&self) -> Result<MacAddr> { let sock = create_socket().map_err(Error::NetUtil)?; let ifreq = self.get_ifreq(); // ioctl is safe. Called with a valid sock fd, and we check the return. #[allow(clippy::cast_lossless)] let ret = unsafe { ioctl_with_ref(&sock, net_gen::sockios::SIOCGIFHWADDR as c_ulong, &ifreq) }; if ret < 0 { return Err(Error::IoctlError(IoError::last_os_error())); } // We only access one field of the ifru union, hence this is safe. let addr = unsafe { let ifru_hwaddr = ifreq.ifr_ifru.ifru_hwaddr.as_ref(); MacAddr::from_bytes( &*(&ifru_hwaddr.sa_data[0..MAC_ADDR_LEN] as *const _ as *const [u8]), ) .map_err(Error::MacParsing)? }; Ok(addr) } /// Set the netmask for the subnet that the tap interface will exist on. pub fn set_netmask(&self, netmask: net::Ipv4Addr) -> Result<()> { let sock = create_socket().map_err(Error::NetUtil)?; let addr = create_sockaddr(netmask); let mut ifreq = self.get_ifreq(); // We only access one field of the ifru union, hence this is safe. unsafe { let ifru_addr = ifreq.ifr_ifru.ifru_addr.as_mut(); *ifru_addr = addr; } // ioctl is safe. Called with a valid sock fd, and we check the return. #[allow(clippy::cast_lossless)] let ret = unsafe { ioctl_with_ref(&sock, net_gen::sockios::SIOCSIFNETMASK as c_ulong, &ifreq) }; if ret < 0 { return Err(Error::IoctlError(IoError::last_os_error())); } Ok(()) } /// Set the offload flags for the tap interface. pub fn set_offload(&self, flags: c_uint) -> Result<()> { // ioctl is safe. Called with a valid tap fd, and we check the return. #[allow(clippy::cast_lossless)] let ret = unsafe { ioctl_with_val(&self.tap_file, net_gen::TUNSETOFFLOAD(), flags as c_ulong) }; if ret < 0 { return Err(Error::IoctlError(IoError::last_os_error())); } Ok(()) } /// Enable the tap interface. pub fn enable(&self) -> Result<()> { let sock = create_socket().map_err(Error::NetUtil)?; let mut ifreq = self.get_ifreq(); #[allow(clippy::cast_lossless)] let ret = unsafe { ioctl_with_ref(&sock, net_gen::sockios::SIOCGIFFLAGS as c_ulong, &ifreq) }; if ret < 0 { return Err(Error::IoctlError(IoError::last_os_error())); } // If TAP device is already up don't try and enable it let ifru_flags = unsafe { ifreq.ifr_ifru.ifru_flags.as_ref() }; if ifru_flags & (net_gen::net_device_flags_IFF_UP | net_gen::net_device_flags_IFF_RUNNING) as i16 == (net_gen::net_device_flags_IFF_UP | net_gen::net_device_flags_IFF_RUNNING) as i16 { return Ok(()); } // We only access one field of the ifru union, hence this is safe. unsafe { let ifru_flags = ifreq.ifr_ifru.ifru_flags.as_mut(); *ifru_flags = (net_gen::net_device_flags_IFF_UP | net_gen::net_device_flags_IFF_RUNNING) as i16; } // ioctl is safe. Called with a valid sock fd, and we check the return. #[allow(clippy::cast_lossless)] let ret = unsafe { ioctl_with_ref(&sock, net_gen::sockios::SIOCSIFFLAGS as c_ulong, &ifreq) }; if ret < 0 { return Err(Error::IoctlError(IoError::last_os_error())); } Ok(()) } /// Set the size of the vnet hdr. pub fn set_vnet_hdr_size(&self, size: c_int) -> Result<()> { // ioctl is safe. Called with a valid tap fd, and we check the return. let ret = unsafe { ioctl_with_ref(&self.tap_file, net_gen::TUNSETVNETHDRSZ(), &size) }; if ret < 0 { return Err(Error::IoctlError(IoError::last_os_error())); } Ok(()) } fn get_ifreq(&self) -> net_gen::ifreq { let mut ifreq: net_gen::ifreq = Default::default(); // This sets the name of the interface, which is the only entry // in a single-field union. unsafe { let ifrn_name = ifreq.ifr_ifrn.ifrn_name.as_mut(); let name_slice = &mut ifrn_name[..self.if_name.len()]; name_slice.copy_from_slice(&self.if_name); } ifreq } pub fn get_if_name(&self) -> Vec<u8> { self.if_name.clone() } } impl Read for Tap { fn read(&mut self, buf: &mut [u8]) -> IoResult<usize> { self.tap_file.read(buf) } } impl Write for Tap { fn write(&mut self, buf: &[u8]) -> IoResult<usize> { self.tap_file.write(&buf) } fn flush(&mut self) -> IoResult<()> { Ok(()) } } impl AsRawFd for Tap { fn as_raw_fd(&self) -> RawFd { self.tap_file.as_raw_fd() } } #[cfg(test)] mod tests { extern crate pnet; use std::net::Ipv4Addr; use std::str; use std::sync::{mpsc, Mutex}; use std::thread; use std::time::Duration; use self::pnet::datalink::Channel::Ethernet; use self::pnet::datalink::{self, DataLinkReceiver, DataLinkSender, NetworkInterface}; use self::pnet::packet::ethernet::{EtherTypes, EthernetPacket, MutableEthernetPacket}; use self::pnet::packet::ip::IpNextHeaderProtocols; use self::pnet::packet::ipv4::{Ipv4Packet, MutableIpv4Packet}; use self::pnet::packet::udp::{MutableUdpPacket, UdpPacket}; use self::pnet::packet::{MutablePacket, Packet}; use self::pnet::util::MacAddr; use super::*; static DATA_STRING: &str = "test for tap"; static SUBNET_MASK: &str = "255.255.255.0"; // We needed to have a mutex as a global variable, so we used the crate that provides the // lazy_static! macro for testing. The main potential problem, caused by tests being run in // parallel by cargo, is creating different TAPs and trying to associate the same address, // so we hide the IP address &str behind this mutex, more as a convention to remember to lock // it at the very beginning of each function susceptible to this issue. Another variant is // to use a different IP address per function, but we must remember to pick an unique one // each time. lazy_static! { static ref TAP_IP_LOCK: Mutex<&'static str> = Mutex::new("192.168.241.1"); } // Describes the outcomes we are currently interested in when parsing a packet (we use // an UDP packet for testing). struct ParsedPkt<'a> { eth: EthernetPacket<'a>, ipv4: Option<Ipv4Packet<'a>>, udp: Option<UdpPacket<'a>>, } impl<'a> ParsedPkt<'a> { fn new(buf: &'a [u8]) -> Self { let eth = EthernetPacket::new(buf).unwrap(); let mut ipv4 = None; let mut udp = None; if eth.get_ethertype() == EtherTypes::Ipv4 { let ipv4_start = 14; ipv4 = Some(Ipv4Packet::new(&buf[ipv4_start..]).unwrap()); // Hiding the old ipv4 variable for the rest of this block. let ipv4 = Ipv4Packet::new(eth.payload()).unwrap(); if ipv4.get_next_level_protocol() == IpNextHeaderProtocols::Udp { // The value in header_length indicates the number of 32 bit words // that make up the header, not the actual length in bytes. let udp_start = ipv4_start + ipv4.get_header_length() as usize * 4; udp = Some(UdpPacket::new(&buf[udp_start..]).unwrap()); } } ParsedPkt { eth, ipv4, udp } } fn print(&self) { print!( "{} {} {} ", self.eth.get_source(), self.eth.get_destination(), self.eth.get_ethertype() ); if let Some(ref ipv4) = self.ipv4 { print!( "{} {} {} ", ipv4.get_source(), ipv4.get_destination(), ipv4.get_next_level_protocol() ); } if let Some(ref udp) = self.udp { print!( "{} {} {}", udp.get_source(), udp.get_destination(), str::from_utf8(udp.payload()).unwrap() ); } println!(); } } fn tap_name_to_string(tap: &Tap) -> String { let null_pos = tap.if_name.iter().position(|x| *x == 0).unwrap(); str::from_utf8(&tap.if_name[..null_pos]) .unwrap() .to_string() } // Given a buffer of appropriate size, this fills in the relevant fields based on the // provided information. Payload refers to the UDP payload. fn pnet_build_packet(buf: &mut [u8], dst_mac: MacAddr, payload: &[u8]) { let mut eth = MutableEthernetPacket::new(buf).unwrap(); eth.set_source(MacAddr::new(0x06, 0, 0, 0, 0, 0)); eth.set_destination(dst_mac); eth.set_ethertype(EtherTypes::Ipv4); let mut ipv4 = MutableIpv4Packet::new(eth.payload_mut()).unwrap(); ipv4.set_version(4); ipv4.set_header_length(5); ipv4.set_total_length(20 + 8 + payload.len() as u16); ipv4.set_ttl(200); ipv4.set_next_level_protocol(IpNextHeaderProtocols::Udp); ipv4.set_source(Ipv4Addr::new(192, 168, 241, 1)); ipv4.set_destination(Ipv4Addr::new(192, 168, 241, 2)); let mut udp = MutableUdpPacket::new(ipv4.payload_mut()).unwrap(); udp.set_source(1000); udp.set_destination(1001); udp.set_length(8 + payload.len() as u16); udp.set_payload(payload); } // Sends a test packet on the interface named "ifname". fn pnet_send_packet(ifname: String) { let payload = DATA_STRING.as_bytes(); // eth hdr + ip hdr + udp hdr + payload len let buf_size = 14 + 20 + 8 + payload.len(); let (mac, mut tx, _) = pnet_get_mac_tx_rx(ifname); let res = tx.build_and_send(1, buf_size, &mut |buf| { pnet_build_packet(buf, mac, payload); }); // Make sure build_and_send() -> Option<io::Result<()>> succeeds. res.unwrap().unwrap(); } // For a given interface name, this returns a tuple that contains the MAC address of the // interface, an object that can be used to send Ethernet frames, and a receiver of // Ethernet frames arriving at the specified interface. fn pnet_get_mac_tx_rx( ifname: String, ) -> (MacAddr, Box<dyn DataLinkSender>, Box<dyn DataLinkReceiver>) { let interface_name_matches = |iface: &NetworkInterface| iface.name == ifname; // Find the network interface with the provided name. let interfaces = datalink::interfaces(); let interface = interfaces.into_iter().find(interface_name_matches).unwrap(); if let Ok(Ethernet(tx, rx)) = datalink::channel(&interface, Default::default()) { (interface.mac.unwrap(), tx, rx) } else { panic!("datalink channel error or unhandled channel type"); } } #[test] fn test_tap_create() { let t = Tap::new(1).unwrap(); println!("created tap: {:?}", t); } #[test] fn test_tap_from_fd() { let orig_tap = Tap::new(1).unwrap(); let fd = orig_tap.as_raw_fd(); let _new_tap = Tap::from_tap_fd(fd).unwrap(); } #[test] fn test_tap_configure() { // This should be the first thing to be called inside the function, so everything else // is torn down by the time the mutex is automatically released. Also, we should // explicitly bind the MutexGuard to a variable via let, the make sure it lives until // the end of the function. let tap_ip_guard = TAP_IP_LOCK.lock().unwrap(); let tap = Tap::new(1).unwrap(); let ip_addr: net::Ipv4Addr = (*tap_ip_guard).parse().unwrap(); let netmask: net::Ipv4Addr = SUBNET_MASK.parse().unwrap(); let ret = tap.set_ip_addr(ip_addr); assert!(ret.is_ok()); let ret = tap.set_netmask(netmask); assert!(ret.is_ok()); } #[test] fn test_set_options() { // This line will fail to provide an initialized FD if the test is not run as root. let tap = Tap::new(1).unwrap(); tap.set_vnet_hdr_size(16).unwrap(); tap.set_offload(0).unwrap(); } #[test] fn test_tap_enable() { let tap = Tap::new(1).unwrap(); let ret = tap.enable(); assert!(ret.is_ok()); } #[test] fn test_tap_get_ifreq() { let tap = Tap::new(1).unwrap(); let ret = tap.get_ifreq(); assert_eq!( "__BindgenUnionField", format!("{:?}", ret.ifr_ifrn.ifrn_name) ); } #[test] fn test_raw_fd() { let tap = Tap::new(1).unwrap(); assert_eq!(tap.as_raw_fd(), tap.tap_file.as_raw_fd()); } #[test] fn test_read() { let tap_ip_guard = TAP_IP_LOCK.lock().unwrap(); let mut tap = Tap::new(1).unwrap(); tap.set_ip_addr((*tap_ip_guard).parse().unwrap()).unwrap(); tap.set_netmask(SUBNET_MASK.parse().unwrap()).unwrap(); tap.enable().unwrap(); // Send a packet to the interface. We expect to be able to receive it on the associated fd. pnet_send_packet(tap_name_to_string(&tap)); let mut buf = [0u8; 4096]; let mut found_packet_sz = None; // In theory, this could actually loop forever if something keeps sending data through the // tap interface, but it's highly unlikely. while found_packet_sz.is_none() { let result = tap.read(&mut buf); assert!(result.is_ok()); let size = result.unwrap(); // We skip the first 10 bytes because the IFF_VNET_HDR flag is set when the interface // is created, and the legacy header is 10 bytes long without a certain flag which // is not set in Tap::new(). let eth_bytes = &buf[10..size]; let packet = EthernetPacket::new(eth_bytes).unwrap(); if packet.get_ethertype() != EtherTypes::Ipv4 { // not an IPv4 packet continue; } let ipv4_bytes = &eth_bytes[14..]; let packet = Ipv4Packet::new(ipv4_bytes).unwrap(); // Our packet should carry an UDP payload, and not contain IP options. if packet.get_next_level_protocol() != IpNextHeaderProtocols::Udp && packet.get_header_length() != 5 { continue; } let udp_bytes = &ipv4_bytes[20..]; let udp_len = UdpPacket::new(udp_bytes).unwrap().get_length() as usize; // Skip the header bytes. let inner_string = str::from_utf8(&udp_bytes[8..udp_len]).unwrap(); if inner_string.eq(DATA_STRING) { found_packet_sz = Some(size); break; } } assert!(found_packet_sz.is_some()); } #[test] fn test_write() { let tap_ip_guard = TAP_IP_LOCK.lock().unwrap(); let mut tap = Tap::new(1).unwrap(); tap.set_ip_addr((*tap_ip_guard).parse().unwrap()).unwrap(); tap.set_netmask(SUBNET_MASK.parse().unwrap()).unwrap(); tap.enable().unwrap(); let (mac, _, mut rx) = pnet_get_mac_tx_rx(tap_name_to_string(&tap)); let payload = DATA_STRING.as_bytes(); // vnet hdr + eth hdr + ip hdr + udp hdr + payload len let buf_size = 10 + 14 + 20 + 8 + payload.len(); let mut buf = vec![0u8; buf_size]; // leave the vnet hdr as is pnet_build_packet(&mut buf[10..], mac, payload); assert!(tap.write(&buf[..]).is_ok()); assert!(tap.flush().is_ok()); let (channel_tx, channel_rx) = mpsc::channel(); // We use a separate thread to wait for the test packet because the API exposed by pnet is // blocking. This thread will be killed when the main thread exits. let _handle = thread::spawn(move || loop { let buf = rx.next().unwrap(); let p = ParsedPkt::new(buf); p.print(); if let Some(ref udp) = p.udp { if payload == udp.payload() { channel_tx.send(true).unwrap(); break; } } }); // We wait for at most SLEEP_MILLIS * SLEEP_ITERS milliseconds for the reception of the // test packet to be detected. static SLEEP_MILLIS: u64 = 500; static SLEEP_ITERS: u32 = 6; let mut found_test_packet = false; for _ in 0..SLEEP_ITERS { thread::sleep(Duration::from_millis(SLEEP_MILLIS)); if let Ok(true) = channel_rx.try_recv() { found_test_packet = true; break; } } assert!(found_test_packet); } }
35.718791
99
0.585314
1cd55d1f3fec13a566ab6e65e32e3906d8f83f71
1,213
// Copyright 2018 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Library module for the key holder functionalities provided by Grin. extern crate blake2_rfc as blake2; extern crate byteorder; #[macro_use] extern crate grin_util as util; extern crate rand; extern crate serde; #[macro_use] extern crate serde_derive; extern crate serde_json; #[macro_use] extern crate slog; extern crate digest; extern crate hmac; extern crate ripemd160; extern crate sha2; extern crate uuid; mod base58; pub mod extkey; pub mod extkey_bip32; mod types; pub mod keychain; pub use keychain::ExtKeychain; pub use types::{BlindSum, BlindingFactor, Error, Identifier, Keychain, IDENTIFIER_SIZE};
28.880952
88
0.769992
61ffa9dbb3e7d435553b4b4b9591e35bc24705d7
338
// primitive_types4.rs // Get a slice out of Array a where the ??? is so that the test passes. // Execute `rustlings hint primitive_types4` for hints!! // run using ``rustlings run primitive_types4`` #[test] fn slice_out_of_array() { let a = [1, 2, 3, 4, 5]; let nice_slice = &a[1..4]; assert_eq!([2, 3, 4], nice_slice) }
22.533333
71
0.64497
e8e0add61cc5f89077fc3ccc0522dccab9713378
2,792
//! p03_join_a_group_mod use crate::*; use crate::root_rendering_component_mod::RootRenderingComponent; use crate::storage_mod; use crate::call_on_next_tick_mod::*; pub fn on_hash_change(vdom: dodrio::VdomWeak, location_hash: String) -> String { ///internal function fn start_websocket_on_p03( rrc: &mut RootRenderingComponent, vdom: dodrio::VdomWeak, location_hash: String, ) { rrc.start_websocket(vdom.clone()); rrc.game_data.my_player_number = 2; if location_hash.contains('.') { let gr = rust_wasm_dodrio_router::router_mod::get_url_param_in_hash_after_dot( &location_hash, ); storage_mod::save_group_id_string_to_local_storage(rrc, gr); } else { storage_mod::load_group_id_string(rrc); } } // entry point for join game call_on_next_tick_3(vdom.clone(), &start_websocket_on_p03, location_hash); //return "p03_join_a_group.html".to_owned() } /// html_templating functions that return a String pub fn replace_with_string(rrc: &RootRenderingComponent, fn_name: &str) -> Option<String> { /// if there is already a group_id don't blink /// internal function fn blink_or_not_group_id(rrc: &RootRenderingComponent) -> String { if rrc.game_data.group_id == 0 { "blink".to_owned() } else { "".to_owned() } } // websysmod::debug_write(&format!("replace_with_string: {}", &fn_name)); match fn_name { "wt_my_ws_uid" => Some(format!("{}", rrc.web_data.my_ws_uid)), "wt_blink_or_not_group_id" => Some(blink_or_not_group_id(rrc)), "wt_blink_or_not_nickname" => Some(storage_mod::blink_or_not_nickname(rrc)), _ => None, } } /// returns false if the fn_name is not found pub fn set_event_listener( fn_name: &str, rrc: &mut RootRenderingComponent, _vdom: dodrio::VdomWeak, event: web_sys::Event, ) -> bool { let mut is_matched_fn_name = true; match fn_name { "wl_group_id_onkeyup" => { storage_mod::group_id_onkeyup(rrc, event); } "wl_open_youtube" => { // randomly choose a link from rrc.videos let num = websysmod::get_random(0, rrc.game_data.videos.len()); #[allow(clippy::indexing_slicing)] // cannot panic:the num is 0..video.len websysmod::open_new_tab(&format!( "https://www.youtube.com/watch?v={}", rrc.game_data.videos[num] )); } "wl_join_group_on_click" => { html_template_impl_mod::open_new_local_page("#p04"); } _ => { is_matched_fn_name = false; } } //return is_matched_fn_name }
32.847059
91
0.617837
1e3d4cf890c8558c578f8a1fc4b6ec881c9ddf7b
1,154
use crate::codegen::sanitization::OPT; use proc_macro2::TokenStream; use quote::quote; use syn::Ident; ident_str! { WRITE_COMMENT = from_crate!(binary_template::write_comment); WRITE_START_STRUCT = from_crate!(binary_template::write_start_struct); WRITE_END_STRUCT = from_crate!(binary_template::write_end_struct); } pub(super) fn end() -> TokenStream { if cfg!(feature = "debug_template") { quote! { #WRITE_END_STRUCT(#OPT.variable_name); } } else { <_>::default() } } pub(super) fn handle_error() -> TokenStream { if cfg!(feature = "debug_template") { let write_end_struct = end(); quote! { .map_err(|e| { #WRITE_COMMENT(&format!("Error: {:?}", e)); #write_end_struct e }) } } else { <_>::default() } } pub(super) fn start(struct_name: &Ident) -> TokenStream { if cfg!(feature = "debug_template") { let struct_name = struct_name.to_string(); quote! { #WRITE_START_STRUCT(#struct_name); } } else { <_>::default() } }
24.553191
74
0.564991
e406f6c336bc71802fbc6880c20cde2b8a61455c
9,862
#![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; use ink_lang as ink; pub use self::erc20::Erc20; #[ink::contract] mod erc20 { use alloc::string::String; use ink_storage::{ collections::HashMap as StorageHashMap, traits::{PackedLayout, SpreadLayout}, }; /// Indicates whether a transaction is already confirmed or needs further confirmations. #[ink(storage)] pub struct Erc20 { name: String, symbol: String, total_supply: u64, decimals: u8, owner: AccountId, balances: StorageHashMap<AccountId, u64>, allowances: StorageHashMap<(AccountId, AccountId), u64>, } #[ink(event)] pub struct Transfer { #[ink(topic)] from: Option<AccountId>, #[ink(topic)] to: Option<AccountId>, #[ink(topic)] value: u64, } #[ink(event)] pub struct Approval { #[ink(topic)] owner: AccountId, #[ink(topic)] spender: AccountId, #[ink(topic)] value: u64, } #[derive(scale::Encode, scale::Decode, Clone, SpreadLayout, PackedLayout)] #[cfg_attr( feature = "std", derive(scale_info::TypeInfo, ink_storage::traits::StorageLayout) )] pub struct DisplayInfo { name: String, symbol: String, total_supply: u64, decimals: u8, owner: AccountId, } impl Erc20 { #[ink(constructor)] pub fn new(name: String, symbol: String, initial_supply: u64, decimals: u8, controller: AccountId) -> Self { let mut balances = StorageHashMap::new(); let mut instance = Self { name: name, symbol: symbol, // total_supply: initial_supply, total_supply: 0, decimals, balances, allowances: StorageHashMap::new(), owner: controller, }; instance._mint_token(controller, initial_supply); instance } #[ink(message)] pub fn name(&self) -> String { self.name.clone() } #[ink(message)] pub fn symbol(&self) -> String { self.symbol.clone() } #[ink(message)] pub fn total_supply(&self) -> u64 { self.total_supply } #[ink(message)] pub fn decimals(&self) -> u8 { self.decimals } #[ink(message)] pub fn owner(&self) -> AccountId { self.owner } #[ink(message)] pub fn query_info(&self) -> DisplayInfo { DisplayInfo { name: self.name.clone(), symbol: self.symbol.clone(), total_supply: self.total_supply, decimals: self.decimals, owner: self.owner } } #[ink(message)] pub fn balance_of(&self, owner: AccountId) -> u64 { self.balance_of_or_zero(&owner) } #[ink(message)] pub fn allowance(&self, owner: AccountId, spender: AccountId) -> u64 { self.allowance_of_or_zero(&owner, &spender) } #[ink(message)] pub fn transfer(&mut self, to: AccountId, value: u64) -> bool { let from = self.env().caller(); self.transfer_from_to(from, to, value) } #[ink(message)] pub fn approve(&mut self, spender: AccountId, value: u64) -> bool { let owner = self.env().caller(); self.allowances.insert((owner, spender), value); self.env().emit_event(Approval { owner, spender, value, }); true } #[ink(message)] pub fn transfer_from( &mut self, from: AccountId, to: AccountId, value: u64, ) -> bool { let caller = self.env().caller(); let allowance = self.allowance_of_or_zero(&from, &caller); if allowance < value { return false } self.allowances.insert((from, caller), allowance - value); self.transfer_from_to(from, to, value) } #[ink(message)] pub fn transfer_owner( &mut self, to: AccountId, ) -> bool { let caller = self.env().caller(); assert_eq!(caller == self.owner, true); self.owner = to; true } #[ink(message)] pub fn mint_token_by_owner( &mut self, to: AccountId, value: u64, ) -> bool { let caller = self.env().caller(); assert_eq!(caller == self.owner, true); self._mint_token(to, value) } #[ink(message)] pub fn destroy_token_by_owner( &mut self, from: AccountId, value: u64, ) -> bool { assert_eq!(value > 0, true); self._destroy_token(from, value) } fn transfer_from_to( &mut self, from: AccountId, to: AccountId, value: u64, ) -> bool { let from_balance = self.balance_of_or_zero(&from); if from_balance < value { return false } self.balances.insert(from, from_balance - value); let to_balance = self.balance_of_or_zero(&to); self.balances.insert(to, to_balance + value); self.env().emit_event(Transfer { from: Some(from), to: Some(to), value, }); true } fn balance_of_or_zero(&self, owner: &AccountId) -> u64 { *self.balances.get(owner).unwrap_or(&0) } fn allowance_of_or_zero( &self, owner: &AccountId, spender: &AccountId, ) -> u64 { *self.allowances.get(&(*owner, *spender)).unwrap_or(&0) } fn _mint_token( &mut self, to: AccountId, amount: u64, ) -> bool { let total_supply = self.total_supply(); assert_eq!(total_supply + amount >= total_supply, true); let to_balance = self.balance_of_or_zero(&to); assert_eq!(to_balance + amount >= to_balance, true); self.total_supply += amount; self.balances.insert(to, to_balance + amount); self.env().emit_event(Transfer { from: None, to: Some(to), value: amount, }); true } fn _destroy_token( &mut self, from: AccountId, amount: u64, ) -> bool { let total_supply = self.total_supply(); assert_eq!(total_supply - amount <= total_supply, true); let from_balance = self.balance_of_or_zero(&from); assert_eq!(from_balance - amount <= from_balance, true); self.total_supply -= amount; self.balances.insert(from, from_balance - amount); self.env().emit_event(Transfer { from: Some(from), to: None, value: amount, }); true } } #[cfg(test)] mod tests { use super::*; use ink_lang as ink; #[ink::test] fn default_works() { let accounts =ink_env::test::default_accounts::<ink_env::DefaultEnvironment>().expect("Cannot get accounts"); let token = Erc20::new( "SubDAO Token".to_string() , "SDT".to_string() , 100000000 , 4 , accounts.alice ); ink_env::debug_println!("{}", &token.name()); ink_env::debug_println!("{}",&token.symbol()); ink_env::debug_println!("{}",&format!("total supply is {}", token.total_supply())); ink_env::debug_println!("{}",&format!("decimals is {}", token.decimals())); assert_eq!(token.name(), "SubDAO Token"); assert_eq!(token.symbol(), "SDT"); assert_eq!(token.total_supply(), 100000000); assert_eq!(token.decimals(), 4); } #[ink::test] fn transfer_works() { let accounts =ink_env::test::default_accounts::<ink_env::DefaultEnvironment>().expect("Cannot get accounts"); let mut token = Erc20::new( "SubDAO Token".to_string() , "SDT".to_string() , 100000000 , 4 , accounts.alice ); ink_env::debug_println!("{}",&token.name()); ink_env::debug_println!("{}",&token.symbol()); ink_env::debug_println!("{}",&format!("total supply is {}", token.total_supply())); ink_env::debug_println!("{}",&format!("decimals is {}", token.decimals())); assert_eq!(token.name(), "SubDAO Token"); assert_eq!(token.symbol(), "SDT"); assert_eq!(token.total_supply(), 100000000); assert_eq!(token.decimals(), 4); let amount: u64 = 99_u64 * 10_u64.pow(token.decimals() as u32); token.transfer(accounts.bob, amount); ink_env::debug_println!("{}",&format!("bob balance is {}", token.balance_of(accounts.bob))); assert_eq!(token.balance_of(accounts.bob), amount); ink_env::debug_println!("{}",&format!("alice balance is {}", token.balance_of(accounts.alice))); assert_eq!(token.balance_of(accounts.alice), token.total_supply() - amount); } } }
30.532508
121
0.498682
e50cb73e4dd58a2ea5ca2e47ad51a75733473139
33,629
#![doc = "generated by AutoRust 0.1.0"] #![allow(unused_mut)] #![allow(unused_variables)] #![allow(unused_imports)] use super::{models, API_VERSION}; #[derive(Clone)] pub struct Client { endpoint: String, credential: std::sync::Arc<dyn azure_core::TokenCredential>, scopes: Vec<String>, pipeline: azure_core::pipeline::Pipeline, } #[derive(Clone)] pub struct ClientBuilder { credential: std::sync::Arc<dyn azure_core::TokenCredential>, endpoint: Option<String>, scopes: Option<Vec<String>>, } pub const DEFAULT_ENDPOINT: &str = azure_core::resource_manager_endpoint::AZURE_PUBLIC_CLOUD; impl ClientBuilder { pub fn new(credential: std::sync::Arc<dyn azure_core::TokenCredential>) -> Self { Self { credential, endpoint: None, scopes: None, } } pub fn endpoint(mut self, endpoint: impl Into<String>) -> Self { self.endpoint = Some(endpoint.into()); self } pub fn scopes(mut self, scopes: &[&str]) -> Self { self.scopes = Some(scopes.iter().map(|scope| (*scope).to_owned()).collect()); self } pub fn build(self) -> Client { let endpoint = self.endpoint.unwrap_or_else(|| DEFAULT_ENDPOINT.to_owned()); let scopes = self.scopes.unwrap_or_else(|| vec![format!("{}/", endpoint)]); Client::new(endpoint, self.credential, scopes) } } impl Client { pub(crate) fn endpoint(&self) -> &str { self.endpoint.as_str() } pub(crate) fn token_credential(&self) -> &dyn azure_core::TokenCredential { self.credential.as_ref() } pub(crate) fn scopes(&self) -> Vec<&str> { self.scopes.iter().map(String::as_str).collect() } pub(crate) async fn send(&self, request: impl Into<azure_core::Request>) -> Result<azure_core::Response, azure_core::Error> { let mut context = azure_core::Context::default(); let mut request = request.into(); self.pipeline.send(&mut context, &mut request).await } pub fn new(endpoint: impl Into<String>, credential: std::sync::Arc<dyn azure_core::TokenCredential>, scopes: Vec<String>) -> Self { let endpoint = endpoint.into(); let pipeline = azure_core::pipeline::Pipeline::new( option_env!("CARGO_PKG_NAME"), option_env!("CARGO_PKG_VERSION"), azure_core::ClientOptions::default(), Vec::new(), Vec::new(), ); Self { endpoint, credential, scopes, pipeline, } } pub fn subscription_factory(&self) -> subscription_factory::Client { subscription_factory::Client(self.clone()) } pub fn subscription_operation(&self) -> subscription_operation::Client { subscription_operation::Client(self.clone()) } pub fn subscriptions(&self) -> subscriptions::Client { subscriptions::Client(self.clone()) } pub fn tenants(&self) -> tenants::Client { tenants::Client(self.clone()) } } #[non_exhaustive] #[derive(Debug, thiserror :: Error)] #[allow(non_camel_case_types)] pub enum Error { #[error(transparent)] SubscriptionOperation_Get(#[from] subscription_operation::get::Error), #[error(transparent)] SubscriptionFactory_CreateSubscription(#[from] subscription_factory::create_subscription::Error), #[error(transparent)] SubscriptionFactory_CreateCspSubscription(#[from] subscription_factory::create_csp_subscription::Error), #[error(transparent)] Subscriptions_ListLocations(#[from] subscriptions::list_locations::Error), #[error(transparent)] Subscriptions_Get(#[from] subscriptions::get::Error), #[error(transparent)] Subscriptions_List(#[from] subscriptions::list::Error), #[error(transparent)] Tenants_List(#[from] tenants::list::Error), } pub mod subscription_operation { use super::{models, API_VERSION}; pub struct Client(pub(crate) super::Client); impl Client { pub fn get(&self, operation_id: impl Into<String>) -> get::Builder { get::Builder { client: self.0.clone(), operation_id: operation_id.into(), } } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::SubscriptionCreationResult), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrl(url::ParseError), #[error("Failed to build request: {0}")] BuildRequest(http::Error), #[error("Failed to serialize request body: {0}")] Serialize(serde_json::Error), #[error("Failed to get access token: {0}")] GetToken(azure_core::Error), #[error("Failed to execute request: {0}")] SendRequest(azure_core::Error), #[error("Failed to get response bytes: {0}")] ResponseBytes(azure_core::StreamError), #[error("Failed to deserialize response: {0}, body: {1:?}")] Deserialize(serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) operation_id: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/providers/Microsoft.Subscription/subscriptionOperations/{}", self.client.endpoint(), &self.operation_id ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::SubscriptionCreationResult = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(Response::Accepted202), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; Err(Error::UnexpectedResponse { status_code, body: rsp_body, }) } } }) } } } } pub mod subscription_factory { use super::{models, API_VERSION}; pub struct Client(pub(crate) super::Client); impl Client { pub fn create_subscription( &self, billing_account_name: impl Into<String>, billing_profile_name: impl Into<String>, invoice_section_name: impl Into<String>, body: impl Into<models::ModernSubscriptionCreationParameters>, ) -> create_subscription::Builder { create_subscription::Builder { client: self.0.clone(), billing_account_name: billing_account_name.into(), billing_profile_name: billing_profile_name.into(), invoice_section_name: invoice_section_name.into(), body: body.into(), } } pub fn create_csp_subscription( &self, billing_account_name: impl Into<String>, customer_name: impl Into<String>, body: impl Into<models::ModernCspSubscriptionCreationParameters>, ) -> create_csp_subscription::Builder { create_csp_subscription::Builder { client: self.0.clone(), billing_account_name: billing_account_name.into(), customer_name: customer_name.into(), body: body.into(), } } } pub mod create_subscription { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::SubscriptionCreationResult), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrl(url::ParseError), #[error("Failed to build request: {0}")] BuildRequest(http::Error), #[error("Failed to serialize request body: {0}")] Serialize(serde_json::Error), #[error("Failed to get access token: {0}")] GetToken(azure_core::Error), #[error("Failed to execute request: {0}")] SendRequest(azure_core::Error), #[error("Failed to get response bytes: {0}")] ResponseBytes(azure_core::StreamError), #[error("Failed to deserialize response: {0}, body: {1:?}")] Deserialize(serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) billing_account_name: String, pub(crate) billing_profile_name: String, pub(crate) invoice_section_name: String, pub(crate) body: models::ModernSubscriptionCreationParameters, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = & format ! ("{}/providers/Microsoft.Billing/billingAccounts/{}/billingProfiles/{}/invoiceSections/{}/providers/Microsoft.Subscription/createSubscription" , self . client . endpoint () , & self . billing_account_name , & self . billing_profile_name , & self . invoice_section_name) ; let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.body).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::SubscriptionCreationResult = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(Response::Accepted202), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::ErrorResponse = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod create_csp_subscription { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::SubscriptionCreationResult), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrl(url::ParseError), #[error("Failed to build request: {0}")] BuildRequest(http::Error), #[error("Failed to serialize request body: {0}")] Serialize(serde_json::Error), #[error("Failed to get access token: {0}")] GetToken(azure_core::Error), #[error("Failed to execute request: {0}")] SendRequest(azure_core::Error), #[error("Failed to get response bytes: {0}")] ResponseBytes(azure_core::StreamError), #[error("Failed to deserialize response: {0}, body: {1:?}")] Deserialize(serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) billing_account_name: String, pub(crate) customer_name: String, pub(crate) body: models::ModernCspSubscriptionCreationParameters, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = & format ! ("{}/providers/Microsoft.Billing/billingAccounts/{}/customers/{}/providers/Microsoft.Subscription/createSubscription" , self . client . endpoint () , & self . billing_account_name , & self . customer_name) ; let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.body).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::SubscriptionCreationResult = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(Response::Accepted202), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::ErrorResponse = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod subscriptions { use super::{models, API_VERSION}; pub struct Client(pub(crate) super::Client); impl Client { pub fn list_locations(&self, subscription_id: impl Into<String>) -> list_locations::Builder { list_locations::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), } } pub fn get(&self, subscription_id: impl Into<String>) -> get::Builder { get::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), } } pub fn list(&self) -> list::Builder { list::Builder { client: self.0.clone() } } } pub mod list_locations { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrl(url::ParseError), #[error("Failed to build request: {0}")] BuildRequest(http::Error), #[error("Failed to serialize request body: {0}")] Serialize(serde_json::Error), #[error("Failed to get access token: {0}")] GetToken(azure_core::Error), #[error("Failed to execute request: {0}")] SendRequest(azure_core::Error), #[error("Failed to get response bytes: {0}")] ResponseBytes(azure_core::StreamError), #[error("Failed to deserialize response: {0}, body: {1:?}")] Deserialize(serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::LocationListResult, Error>> { Box::pin(async move { let url_str = &format!("{}/subscriptions/{}/locations", self.client.endpoint(), &self.subscription_id); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::LocationListResult = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; Err(Error::UnexpectedResponse { status_code, body: rsp_body, }) } } }) } } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrl(url::ParseError), #[error("Failed to build request: {0}")] BuildRequest(http::Error), #[error("Failed to serialize request body: {0}")] Serialize(serde_json::Error), #[error("Failed to get access token: {0}")] GetToken(azure_core::Error), #[error("Failed to execute request: {0}")] SendRequest(azure_core::Error), #[error("Failed to get response bytes: {0}")] ResponseBytes(azure_core::StreamError), #[error("Failed to deserialize response: {0}, body: {1:?}")] Deserialize(serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Subscription, Error>> { Box::pin(async move { let url_str = &format!("{}/subscriptions/{}", self.client.endpoint(), &self.subscription_id); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Subscription = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; Err(Error::UnexpectedResponse { status_code, body: rsp_body, }) } } }) } } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrl(url::ParseError), #[error("Failed to build request: {0}")] BuildRequest(http::Error), #[error("Failed to serialize request body: {0}")] Serialize(serde_json::Error), #[error("Failed to get access token: {0}")] GetToken(azure_core::Error), #[error("Failed to execute request: {0}")] SendRequest(azure_core::Error), #[error("Failed to get response bytes: {0}")] ResponseBytes(azure_core::StreamError), #[error("Failed to deserialize response: {0}, body: {1:?}")] Deserialize(serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::SubscriptionListResult, Error>> { Box::pin(async move { let url_str = &format!("{}/subscriptions", self.client.endpoint(),); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::SubscriptionListResult = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; Err(Error::UnexpectedResponse { status_code, body: rsp_body, }) } } }) } } } } pub mod tenants { use super::{models, API_VERSION}; pub struct Client(pub(crate) super::Client); impl Client { pub fn list(&self) -> list::Builder { list::Builder { client: self.0.clone() } } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrl(url::ParseError), #[error("Failed to build request: {0}")] BuildRequest(http::Error), #[error("Failed to serialize request body: {0}")] Serialize(serde_json::Error), #[error("Failed to get access token: {0}")] GetToken(azure_core::Error), #[error("Failed to execute request: {0}")] SendRequest(azure_core::Error), #[error("Failed to get response bytes: {0}")] ResponseBytes(azure_core::StreamError), #[error("Failed to deserialize response: {0}, body: {1:?}")] Deserialize(serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::TenantListResult, Error>> { Box::pin(async move { let url_str = &format!("{}/tenants", self.client.endpoint(),); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::TenantListResult = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; Err(Error::UnexpectedResponse { status_code, body: rsp_body, }) } } }) } } } }
50.493994
316
0.536739
09e8d233d234b2dd2ff4b8a36dd98096cef6d27a
4,806
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ block::Block, common::{Author, Payload, Round}, sync_info::SyncInfo, }; use anyhow::{ensure, format_err, Context, Result}; use libra_types::crypto_proxies::ValidatorVerifier; use serde::{Deserialize, Serialize}; use std::fmt; /// ProposalMsg contains the required information for the proposer election protocol to make its /// choice (typically depends on round and proposer info). #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] pub struct ProposalMsg<T> { #[serde(bound(deserialize = "Block<T>: Deserialize<'de>"))] proposal: Block<T>, sync_info: SyncInfo, } /// A ProposalMsg is only accessible after verifying the signatures of a ProposalUncheckedSignatures /// via the `validate_signatures` function. pub struct ProposalUncheckedSignatures<T>(ProposalMsg<T>); #[cfg(any(test, feature = "fuzzing"))] impl<T: Payload> From<ProposalUncheckedSignatures<T>> for ProposalMsg<T> { fn from(proposal: ProposalUncheckedSignatures<T>) -> Self { proposal.0 } } impl<T: Payload> ProposalUncheckedSignatures<T> { pub fn new(proposal: ProposalMsg<T>) -> Self { Self(proposal) } /// Validates the signatures of the proposal. This includes the leader's signature over the /// block and the QC, the timeout certificate signatures. pub fn validate_signatures(self, validator: &ValidatorVerifier) -> Result<ProposalMsg<T>> { // verify block leader's signature and QC self.0 .proposal .validate_signatures(validator) .map_err(|e| format_err!("{:?}", e))?; // if there is a timeout certificate, verify its signatures if let Some(tc) = self.0.sync_info.highest_timeout_certificate() { tc.verify(validator).map_err(|e| format_err!("{:?}", e))?; } // Note that we postpone the verification of SyncInfo until it's being used. // return proposal Ok(self.0) } pub fn epoch(&self) -> u64 { self.0.proposal.epoch() } } impl<T: Payload> ProposalMsg<T> { /// Creates a new proposal. pub fn new(proposal: Block<T>, sync_info: SyncInfo) -> Self { Self { proposal, sync_info, } } /// Verifies that the ProposalMsg is well-formed. pub fn verify_well_formed(self) -> Result<Self> { ensure!( !self.proposal.is_nil_block(), "Proposal {} for a NIL block", self.proposal ); self.proposal .verify_well_formed() .context("Fail to verify ProposalMsg's block")?; ensure!( self.proposal.round() > 0, "Proposal for {} has an incorrect round of 0", self.proposal, ); ensure!( self.proposal.epoch() == self.sync_info.epoch(), "ProposalMsg has different epoch number from SyncInfo" ); ensure!( self.proposal.parent_id() == self.sync_info.highest_quorum_cert().certified_block().id(), "Proposal HQC in SyncInfo certifies {}, but block parent id is {}", self.sync_info.highest_quorum_cert().certified_block().id(), self.proposal.parent_id(), ); let previous_round = self.proposal.round() - 1; let highest_certified_round = std::cmp::max( self.proposal.quorum_cert().certified_block().round(), self.sync_info .highest_timeout_certificate() .map_or(0, |tc| tc.round()), ); ensure!( previous_round == highest_certified_round, "Proposal {} does not have a certified round {}", self.proposal, previous_round ); ensure!( self.proposal.author().is_some(), "Proposal {} does not define an author", self.proposal ); Ok(self) } pub fn proposal(&self) -> &Block<T> { &self.proposal } pub fn take_proposal(self) -> Block<T> { self.proposal } pub fn sync_info(&self) -> &SyncInfo { &self.sync_info } pub fn round(&self) -> Round { self.proposal.round() } pub fn proposer(&self) -> Author { self.proposal .author() .expect("Proposal should be verified having an author") } } impl<T: Payload> fmt::Display for ProposalMsg<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let author = match self.proposal.author() { Some(author) => author.short_str(), None => String::from("NIL"), }; write!(f, "[proposal {} from {}]", self.proposal, author,) } }
32.255034
100
0.593425
720d576b2cfcf84213fa160099371298d2c527c3
2,206
use super::{BuilderMap, MetaMapBuilder, Rect, TileType}; pub struct RoomDrawer {} impl MetaMapBuilder for RoomDrawer { fn build_map(&mut self, build_data: &mut BuilderMap) { self.build(build_data); } } impl RoomDrawer { #[allow(dead_code)] pub fn new() -> Box<RoomDrawer> { Box::new(RoomDrawer {}) } #[allow(dead_code)] fn rectangle(&mut self, build_data: &mut BuilderMap, room: &Rect) { for y in room.y1 + 1..=room.y2 { for x in room.x1 + 1..=room.x2 { let idx = build_data.map.xy_idx(x, y); if idx > 0 && idx < ((build_data.map.width * build_data.map.height) - 1) as usize { build_data.map.tiles[idx] = TileType::Floor; } } } } #[allow(dead_code)] fn circle(&mut self, build_data: &mut BuilderMap, room: &Rect) { let radius = i32::min(room.x2 - room.x1, room.y2 - room.y1) as f32 / 2.0; let center = room.center(); let center_pt = rltk::Point::new(center.0, center.1); for y in room.y1..=room.y2 { for x in room.x1..=room.x2 { let idx = build_data.map.xy_idx(x, y); let distance = rltk::DistanceAlg::Pythagoras.distance2d(center_pt, rltk::Point::new(x, y)); if idx > 0 && idx < ((build_data.map.width * build_data.map.height) - 1) as usize && distance <= radius { build_data.map.tiles[idx] = TileType::Floor; } } } } fn build(&mut self, build_data: &mut BuilderMap) { let rooms: Vec<Rect>; if let Some(rooms_builder) = &build_data.rooms { rooms = rooms_builder.clone(); } else { panic!("Room Drawing require a builder with room structures"); } for room in rooms.iter() { let room_type = crate::rng::roll_dice(1, 4); match room_type { 1 => self.circle(build_data, room), _ => self.rectangle(build_data, room), } build_data.take_snapshot(); } } }
32.925373
99
0.514959
1e506754173023ff20157c171f2af13bfaaf6ab8
10,107
// Copyright Materialize, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License in the LICENSE file at the // root of this repository, or online at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::collections::HashSet; use std::error::Error; use std::fs::File; use std::io::{BufRead, BufReader}; use std::path::Path; use std::str::FromStr; use crate::ast; use crate::lex::LexBuf; struct Context<'a> { path: &'a Path, } pub fn parse_file(path: &Path) -> Result<ast::File, Box<dyn Error>> { let cx = &Context { path }; let f = BufReader::new(File::open(path)?); let mut lines = vec![]; for (i, line) in f.lines().enumerate() { let line = line?; let mut buf = LexBuf::new(&line); match parse_line(cx, &mut buf) { Ok(None) => (), Ok(Some(line)) => lines.push(line), Err(e) => return Err(format!("parsing line {}: {}", i + 1, e).into()), } } Ok(ast::File { path: path.to_path_buf(), lines, }) } fn parse_line(cx: &Context, buf: &mut LexBuf) -> Result<Option<ast::Line>, Box<dyn Error>> { let token = match parse_token(buf) { None => return Ok(None), Some(token) => token, }; if let Some(keyword) = token.strip_suffix(":") { let value = require_token(buf, "value")?; Ok(Some(ast::Line::Directive(parse_directive( cx, keyword, value, )?))) } else { let id = token; let operation = parse_operation(buf)?; if require_token(buf, "->")? != "->" { return Err(format!("missing \"->\" token").into()); } let result = require_token(buf, "result")?; let mut conditions = HashSet::new(); while let Some(condition) = parse_token(buf) { conditions.insert(condition.parse()?); } Ok(Some(ast::Line::Test(ast::Test { id, operation, result, conditions, }))) } } fn require_token(buf: &mut LexBuf, name: &str) -> Result<String, Box<dyn Error>> { parse_token(buf).ok_or_else(|| format!("missing \"{}\" token", name).into()) } fn parse_token(buf: &mut LexBuf) -> Option<String> { while buf.consume(" ") {} if buf.consume("--") { while let Some(_) = buf.next() {} return None; } match buf.peek() { Some(quote @ '\'') | Some(quote @ '"') => { buf.next(); parse_quoted_token(buf, quote) } _ => parse_unquoted_token(buf), } } fn parse_unquoted_token(buf: &mut LexBuf) -> Option<String> { let mut token = String::new(); while let Some(ch) = buf.peek() { match ch { ' ' => break, ch => { token.push(ch); buf.next(); } } } if token.is_empty() { None } else { Some(token) } } fn parse_quoted_token(buf: &mut LexBuf, quote: char) -> Option<String> { let mut token = String::new(); while let Some(ch) = buf.next() { if ch == quote { if buf.peek() == Some(quote) { token.push(quote); buf.next(); } else { break; } } else { token.push(ch); } } Some(token) } fn parse_directive( cx: &Context, keyword: &str, value: String, ) -> Result<ast::Directive, Box<dyn Error>> { match keyword.to_lowercase().as_str() { "clamp" => Ok(ast::Directive::Clamp(parse_bool(&value)?)), "dectest" => { let path = cx.path.with_file_name(value).with_extension("decTest"); let file = parse_file(&path).map_err(|e| format!("opening {}: {}", path.display(), e))?; Ok(ast::Directive::DecTest(file)) } "extended" => Ok(ast::Directive::Extended(parse_bool(&value)?)), "maxexponent" => Ok(ast::Directive::MaxExponent(value.parse()?)), "minexponent" => Ok(ast::Directive::MinExponent(value.parse()?)), "rounding" => Ok(ast::Directive::Rounding(parse_rounding(&value)?)), "precision" => Ok(ast::Directive::Precision(value.parse()?)), "version" => Ok(ast::Directive::Version(value)), _ => Err(format!("unknown directive \"{}\"", keyword).into()), } } fn parse_rounding(s: &str) -> Result<dec::Rounding, Box<dyn Error>> { match s { "ceiling" => Ok(dec::Rounding::Ceiling), "down" => Ok(dec::Rounding::Down), "floor" => Ok(dec::Rounding::Floor), "half_down" => Ok(dec::Rounding::HalfDown), "half_even" => Ok(dec::Rounding::HalfEven), "half_up" => Ok(dec::Rounding::HalfUp), "up" => Ok(dec::Rounding::Up), "05up" => Ok(dec::Rounding::ZeroFiveUp), _ => Err(format!("unknown rounding mode \"{}\"", s).into()), } } fn parse_bool(s: &str) -> Result<bool, Box<dyn Error>> { match s { "0" => Ok(false), "1" => Ok(true), _ => Err(format!("invalid boolean \"{}\"", s).into()), } } fn parse_operation(buf: &mut LexBuf) -> Result<ast::Operation, Box<dyn Error>> { let operation = require_token(buf, "operation")?; let mut op = || require_token(buf, "operand"); match operation.to_lowercase().as_str() { "abs" => Ok(ast::Operation::Abs(op()?)), "add" => Ok(ast::Operation::Add(op()?, op()?)), "and" => Ok(ast::Operation::And(op()?, op()?)), "apply" => Ok(ast::Operation::Apply(op()?)), "canonical" => Ok(ast::Operation::Canonical(op()?)), "class" => Ok(ast::Operation::Class(op()?)), "compare" => Ok(ast::Operation::Compare(op()?, op()?)), "comparesig" => Ok(ast::Operation::CompareSig(op()?, op()?)), "comparetotal" => Ok(ast::Operation::CompareTotal(op()?, op()?)), "comparetotmag" => Ok(ast::Operation::CompareTotalMag(op()?, op()?)), "copy" => Ok(ast::Operation::Copy(op()?)), "copyabs" => Ok(ast::Operation::CopyAbs(op()?)), "copynegate" => Ok(ast::Operation::CopyNegate(op()?)), "copysign" => Ok(ast::Operation::CopySign(op()?, op()?)), "divide" => Ok(ast::Operation::Divide(op()?, op()?)), "divideint" => Ok(ast::Operation::DivideInt(op()?, op()?)), "exp" => Ok(ast::Operation::Exp(op()?)), "fma" => Ok(ast::Operation::Fma(op()?, op()?, op()?)), "invert" => Ok(ast::Operation::Invert(op()?)), "ln" => Ok(ast::Operation::Ln(op()?)), "log10" => Ok(ast::Operation::Log10(op()?)), "logb" => Ok(ast::Operation::Logb(op()?)), "max" => Ok(ast::Operation::Max(op()?, op()?)), "min" => Ok(ast::Operation::Min(op()?, op()?)), "maxmag" => Ok(ast::Operation::MaxMag(op()?, op()?)), "minmag" => Ok(ast::Operation::MinMag(op()?, op()?)), "minus" => Ok(ast::Operation::Minus(op()?)), "multiply" => Ok(ast::Operation::Multiply(op()?, op()?)), "nextminus" => Ok(ast::Operation::NextMinus(op()?)), "nextplus" => Ok(ast::Operation::NextPlus(op()?)), "nexttoward" => Ok(ast::Operation::NextToward(op()?, op()?)), "or" => Ok(ast::Operation::Or(op()?, op()?)), "plus" => Ok(ast::Operation::Plus(op()?)), "power" => Ok(ast::Operation::Power(op()?, op()?)), "quantize" => Ok(ast::Operation::Quantize(op()?, op()?)), "reduce" => Ok(ast::Operation::Reduce(op()?)), "remainder" => Ok(ast::Operation::Remainder(op()?, op()?)), "remaindernear" => Ok(ast::Operation::RemainderNear(op()?, op()?)), "rescale" => Ok(ast::Operation::Rescale(op()?, op()?)), "rotate" => Ok(ast::Operation::Rotate(op()?, op()?)), "samequantum" => Ok(ast::Operation::SameQuantum(op()?, op()?)), "scaleb" => Ok(ast::Operation::Scaleb(op()?, op()?)), "shift" => Ok(ast::Operation::Shift(op()?, op()?)), "squareroot" => Ok(ast::Operation::SquareRoot(op()?)), "subtract" => Ok(ast::Operation::Subtract(op()?, op()?)), "toeng" => Ok(ast::Operation::ToEng(op()?)), "tointegral" => Ok(ast::Operation::ToIntegral(op()?)), "tointegralx" => Ok(ast::Operation::ToIntegralX(op()?)), "tosci" => Ok(ast::Operation::ToSci(op()?)), "trim" => Ok(ast::Operation::Trim(op()?)), "xor" => Ok(ast::Operation::Xor(op()?, op()?)), _ => Err(format!("unknown operation \"{}\"", operation).into()), } } impl FromStr for ast::Condition { type Err = Box<dyn Error>; fn from_str(s: &str) -> Result<ast::Condition, Box<dyn Error>> { match s.to_lowercase().as_str() { "clamped" => Ok(ast::Condition::Clamped), "conversion_syntax" => Ok(ast::Condition::ConversionSyntax), "division_by_zero" => Ok(ast::Condition::DivisionByZero), "division_impossible" => Ok(ast::Condition::DivisionImpossible), "division_undefined" => Ok(ast::Condition::DivisionUndefined), "inexact" => Ok(ast::Condition::Inexact), "insufficient_storage" => Ok(ast::Condition::InsufficientStorage), "invalid_context" => Ok(ast::Condition::InvalidContext), "invalid_operation" => Ok(ast::Condition::InvalidOperation), "lost_digits" => Ok(ast::Condition::LostDigits), "overflow" => Ok(ast::Condition::Overflow), "rounded" => Ok(ast::Condition::Rounded), "subnormal" => Ok(ast::Condition::Subnormal), "underflow" => Ok(ast::Condition::Underflow), _ => Err(format!("unknown condition \"{}\"", s).into()), } } }
38.576336
93
0.538834
71609af803e09664716fbef37d7c21b02c261ded
85,665
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Feature gating //! //! This module implements the gating necessary for preventing certain compiler //! features from being used by default. This module will crawl a pre-expanded //! AST to ensure that there are no features which are used that are not //! enabled. //! //! Features are enabled in programs via the crate-level attributes of //! `#![feature(...)]` with a comma-separated list of features. //! //! For the purpose of future feature-tracking, once code for detection of feature //! gate usage is added, *do not remove it again* even once the feature //! becomes stable. use self::AttributeType::*; use self::AttributeGate::*; use abi::Abi; use ast::{self, NodeId, PatKind, RangeEnd}; use attr; use edition::Edition; use codemap::Spanned; use syntax_pos::{Span, DUMMY_SP}; use errors::{DiagnosticBuilder, Handler, FatalError}; use visit::{self, FnKind, Visitor}; use parse::ParseSess; use symbol::{keywords, Symbol}; use std::{env, path}; macro_rules! set { (proc_macro) => {{ fn f(features: &mut Features, span: Span) { features.declared_lib_features.push((Symbol::intern("proc_macro"), span)); features.proc_macro = true; } f as fn(&mut Features, Span) }}; ($field: ident) => {{ fn f(features: &mut Features, _: Span) { features.$field = true; } f as fn(&mut Features, Span) }} } macro_rules! declare_features { ($((active, $feature: ident, $ver: expr, $issue: expr, $edition: expr),)+) => { /// Represents active features that are currently being implemented or /// currently being considered for addition/removal. const ACTIVE_FEATURES: &'static [(&'static str, &'static str, Option<u32>, Option<Edition>, fn(&mut Features, Span))] = &[$((stringify!($feature), $ver, $issue, $edition, set!($feature))),+]; /// A set of features to be used by later passes. #[derive(Clone)] pub struct Features { /// `#![feature]` attrs for stable language features, for error reporting pub declared_stable_lang_features: Vec<(Symbol, Span)>, /// `#![feature]` attrs for non-language (library) features pub declared_lib_features: Vec<(Symbol, Span)>, $(pub $feature: bool),+ } impl Features { pub fn new() -> Features { Features { declared_stable_lang_features: Vec::new(), declared_lib_features: Vec::new(), $($feature: false),+ } } pub fn walk_feature_fields<F>(&self, mut f: F) where F: FnMut(&str, bool) { $(f(stringify!($feature), self.$feature);)+ } } }; ($((removed, $feature: ident, $ver: expr, $issue: expr, None),)+) => { /// Represents unstable features which have since been removed (it was once Active) const REMOVED_FEATURES: &'static [(&'static str, &'static str, Option<u32>)] = &[ $((stringify!($feature), $ver, $issue)),+ ]; }; ($((stable_removed, $feature: ident, $ver: expr, $issue: expr, None),)+) => { /// Represents stable features which have since been removed (it was once Accepted) const STABLE_REMOVED_FEATURES: &'static [(&'static str, &'static str, Option<u32>)] = &[ $((stringify!($feature), $ver, $issue)),+ ]; }; ($((accepted, $feature: ident, $ver: expr, $issue: expr, None),)+) => { /// Those language feature has since been Accepted (it was once Active) const ACCEPTED_FEATURES: &'static [(&'static str, &'static str, Option<u32>)] = &[ $((stringify!($feature), $ver, $issue)),+ ]; } } // If you change this, please modify src/doc/unstable-book as well. // // Don't ever remove anything from this list; set them to 'Removed'. // // The version numbers here correspond to the version in which the current status // was set. This is most important for knowing when a particular feature became // stable (active). // // NB: tools/tidy/src/features.rs parses this information directly out of the // source, so take care when modifying it. declare_features! ( (active, asm, "1.0.0", Some(29722), None), (active, concat_idents, "1.0.0", Some(29599), None), (active, link_args, "1.0.0", Some(29596), None), (active, log_syntax, "1.0.0", Some(29598), None), (active, non_ascii_idents, "1.0.0", Some(28979), None), (active, plugin_registrar, "1.0.0", Some(29597), None), (active, thread_local, "1.0.0", Some(29594), None), (active, trace_macros, "1.0.0", Some(29598), None), // rustc internal, for now: (active, intrinsics, "1.0.0", None, None), (active, lang_items, "1.0.0", None, None), (active, link_llvm_intrinsics, "1.0.0", Some(29602), None), (active, linkage, "1.0.0", Some(29603), None), (active, quote, "1.0.0", Some(29601), None), // rustc internal (active, rustc_diagnostic_macros, "1.0.0", None, None), (active, rustc_const_unstable, "1.0.0", None, None), (active, box_syntax, "1.0.0", Some(27779), None), (active, placement_in_syntax, "1.0.0", Some(27779), None), (active, unboxed_closures, "1.0.0", Some(29625), None), (active, fundamental, "1.0.0", Some(29635), None), (active, main, "1.0.0", Some(29634), None), (active, needs_allocator, "1.4.0", Some(27389), None), (active, on_unimplemented, "1.0.0", Some(29628), None), (active, plugin, "1.0.0", Some(29597), None), (active, simd_ffi, "1.0.0", Some(27731), None), (active, start, "1.0.0", Some(29633), None), (active, structural_match, "1.8.0", Some(31434), None), (active, panic_runtime, "1.10.0", Some(32837), None), (active, needs_panic_runtime, "1.10.0", Some(32837), None), // OIBIT specific features (active, optin_builtin_traits, "1.0.0", Some(13231), None), // macro re-export needs more discussion and stabilization (active, macro_reexport, "1.0.0", Some(29638), None), // Allows use of #[staged_api] // rustc internal (active, staged_api, "1.0.0", None, None), // Allows using #![no_core] (active, no_core, "1.3.0", Some(29639), None), // Allows using `box` in patterns; RFC 469 (active, box_patterns, "1.0.0", Some(29641), None), // Allows using the unsafe_destructor_blind_to_params attribute; // RFC 1238 (active, dropck_parametricity, "1.3.0", Some(28498), None), // Allows using the may_dangle attribute; RFC 1327 (active, dropck_eyepatch, "1.10.0", Some(34761), None), // Allows the use of custom attributes; RFC 572 (active, custom_attribute, "1.0.0", Some(29642), None), // Allows the use of #[derive(Anything)] as sugar for // #[derive_Anything]. (active, custom_derive, "1.0.0", Some(29644), None), // Allows the use of rustc_* attributes; RFC 572 (active, rustc_attrs, "1.0.0", Some(29642), None), // Allows the use of non lexical lifetimes; RFC 2094 (active, nll, "1.0.0", Some(43234), None), // Allows the use of #[allow_internal_unstable]. This is an // attribute on macro_rules! and can't use the attribute handling // below (it has to be checked before expansion possibly makes // macros disappear). // // rustc internal (active, allow_internal_unstable, "1.0.0", None, None), // Allows the use of #[allow_internal_unsafe]. This is an // attribute on macro_rules! and can't use the attribute handling // below (it has to be checked before expansion possibly makes // macros disappear). // // rustc internal (active, allow_internal_unsafe, "1.0.0", None, None), // #23121. Array patterns have some hazards yet. (active, slice_patterns, "1.0.0", Some(23121), None), // Allows the definition of `const fn` functions. (active, const_fn, "1.2.0", Some(24111), None), // Allows using #[prelude_import] on glob `use` items. // // rustc internal (active, prelude_import, "1.2.0", None, None), // Allows default type parameters to influence type inference. (active, default_type_parameter_fallback, "1.3.0", Some(27336), None), // Allows associated type defaults (active, associated_type_defaults, "1.2.0", Some(29661), None), // allow `repr(simd)`, and importing the various simd intrinsics (active, repr_simd, "1.4.0", Some(27731), None), // Allows cfg(target_feature = "..."). (active, cfg_target_feature, "1.4.0", Some(29717), None), // allow `extern "platform-intrinsic" { ... }` (active, platform_intrinsics, "1.4.0", Some(27731), None), // allow `#[unwind(..)]` // rust runtime internal (active, unwind_attributes, "1.4.0", None, None), // allow the use of `#[naked]` on functions. (active, naked_functions, "1.9.0", Some(32408), None), // allow `#[no_debug]` (active, no_debug, "1.5.0", Some(29721), None), // allow `#[omit_gdb_pretty_printer_section]` // rustc internal. (active, omit_gdb_pretty_printer_section, "1.5.0", None, None), // Allows cfg(target_vendor = "..."). (active, cfg_target_vendor, "1.5.0", Some(29718), None), // Allow attributes on expressions and non-item statements (active, stmt_expr_attributes, "1.6.0", Some(15701), None), // allow using type ascription in expressions (active, type_ascription, "1.6.0", Some(23416), None), // Allows cfg(target_thread_local) (active, cfg_target_thread_local, "1.7.0", Some(29594), None), // rustc internal (active, abi_vectorcall, "1.7.0", None, None), // X..Y patterns (active, exclusive_range_pattern, "1.11.0", Some(37854), None), // impl specialization (RFC 1210) (active, specialization, "1.7.0", Some(31844), None), // Allows cfg(target_has_atomic = "..."). (active, cfg_target_has_atomic, "1.9.0", Some(32976), None), // Allows exhaustive pattern matching on types that contain uninhabited types. (active, exhaustive_patterns, "1.13.0", None, None), // Allows all literals in attribute lists and values of key-value pairs. (active, attr_literals, "1.13.0", Some(34981), None), // Allows untagged unions `union U { ... }` (active, untagged_unions, "1.13.0", Some(32836), None), // Used to identify the `compiler_builtins` crate // rustc internal (active, compiler_builtins, "1.13.0", None, None), // Allows attributes on lifetime/type formal parameters in generics (RFC 1327) (active, generic_param_attrs, "1.11.0", Some(34761), None), // Allows #[link(..., cfg(..))] (active, link_cfg, "1.14.0", Some(37406), None), (active, use_extern_macros, "1.15.0", Some(35896), None), // Allows #[target_feature(...)] (active, target_feature, "1.15.0", None, None), // `extern "ptx-*" fn()` (active, abi_ptx, "1.15.0", None, None), // The `repr(i128)` annotation for enums (active, repr128, "1.16.0", Some(35118), None), // The `unadjusted` ABI. Perma unstable. (active, abi_unadjusted, "1.16.0", None, None), // Procedural macros 2.0. (active, proc_macro, "1.16.0", Some(38356), None), // Declarative macros 2.0 (`macro`). (active, decl_macro, "1.17.0", Some(39412), None), // Allows #[link(kind="static-nobundle"...] (active, static_nobundle, "1.16.0", Some(37403), None), // `extern "msp430-interrupt" fn()` (active, abi_msp430_interrupt, "1.16.0", Some(38487), None), // Used to identify crates that contain sanitizer runtimes // rustc internal (active, sanitizer_runtime, "1.17.0", None, None), // Used to identify crates that contain the profiler runtime // rustc internal (active, profiler_runtime, "1.18.0", None, None), // `extern "x86-interrupt" fn()` (active, abi_x86_interrupt, "1.17.0", Some(40180), None), // Allows the `catch {...}` expression (active, catch_expr, "1.17.0", Some(31436), None), // Used to preserve symbols (see llvm.used) (active, used, "1.18.0", Some(40289), None), // Allows module-level inline assembly by way of global_asm!() (active, global_asm, "1.18.0", Some(35119), None), // Allows overlapping impls of marker traits (active, overlapping_marker_traits, "1.18.0", Some(29864), None), // Allows use of the :vis macro fragment specifier (active, macro_vis_matcher, "1.18.0", Some(41022), None), // rustc internal (active, abi_thiscall, "1.19.0", None, None), // Allows a test to fail without failing the whole suite (active, allow_fail, "1.19.0", Some(42219), None), // Allows unsized tuple coercion. (active, unsized_tuple_coercion, "1.20.0", Some(42877), None), // Generators (active, generators, "1.21.0", None, None), // Trait aliases (active, trait_alias, "1.24.0", Some(41517), None), // global allocators and their internals (active, global_allocator, "1.20.0", None, None), (active, allocator_internals, "1.20.0", None, None), // #[doc(cfg(...))] (active, doc_cfg, "1.21.0", Some(43781), None), // #[doc(masked)] (active, doc_masked, "1.21.0", Some(44027), None), // #[doc(spotlight)] (active, doc_spotlight, "1.22.0", Some(45040), None), // #[doc(include="some-file")] (active, external_doc, "1.22.0", Some(44732), None), // allow `#[must_use]` on functions and comparison operators (RFC 1940) (active, fn_must_use, "1.21.0", Some(43302), None), // Future-proofing enums/structs with #[non_exhaustive] attribute (RFC 2008) (active, non_exhaustive, "1.22.0", Some(44109), None), // allow `'_` placeholder lifetimes (active, underscore_lifetimes, "1.22.0", Some(44524), None), // Default match binding modes (RFC 2005) (active, match_default_bindings, "1.22.0", Some(42640), None), // Trait object syntax with `dyn` prefix (active, dyn_trait, "1.22.0", Some(44662), Some(Edition::Edition2018)), // `crate` as visibility modifier, synonymous to `pub(crate)` (active, crate_visibility_modifier, "1.23.0", Some(45388), None), // extern types (active, extern_types, "1.23.0", Some(43467), None), // Allow trait methods with arbitrary self types (active, arbitrary_self_types, "1.23.0", Some(44874), None), // `crate` in paths (active, crate_in_paths, "1.23.0", Some(45477), None), // In-band lifetime bindings (e.g. `fn foo(x: &'a u8) -> &'a u8`) (active, in_band_lifetimes, "1.23.0", Some(44524), None), // generic associated types (RFC 1598) (active, generic_associated_types, "1.23.0", Some(44265), None), // Resolve absolute paths as paths from other crates (active, extern_absolute_paths, "1.24.0", Some(44660), None), // `foo.rs` as an alternative to `foo/mod.rs` (active, non_modrs_mods, "1.24.0", Some(44660), None), // Termination trait in tests (RFC 1937) (active, termination_trait_test, "1.24.0", Some(48854), None), // Allows use of the :lifetime macro fragment specifier (active, macro_lifetime_matcher, "1.24.0", Some(46895), None), // `extern` in paths (active, extern_in_paths, "1.23.0", Some(44660), None), // Allows `#[repr(transparent)]` attribute on newtype structs (active, repr_transparent, "1.25.0", Some(43036), None), // Use `?` as the Kleene "at most one" operator (active, macro_at_most_once_rep, "1.25.0", Some(48075), None), // Multiple patterns with `|` in `if let` and `while let` (active, if_while_or_patterns, "1.26.0", Some(48215), None), // Parentheses in patterns (active, pattern_parentheses, "1.26.0", None, None), // `use path as _;` and `extern crate c as _;` (active, underscore_imports, "1.26.0", Some(48216), None), // The #[wasm_custom_section] attribute (active, wasm_custom_section, "1.26.0", None, None), // The #![wasm_import_module] attribute (active, wasm_import_module, "1.26.0", None, None), // Allows keywords to be escaped for use as identifiers (active, raw_identifiers, "1.26.0", Some(48589), None), ); declare_features! ( (removed, import_shadowing, "1.0.0", None, None), (removed, managed_boxes, "1.0.0", None, None), // Allows use of unary negate on unsigned integers, e.g. -e for e: u8 (removed, negate_unsigned, "1.0.0", Some(29645), None), (removed, reflect, "1.0.0", Some(27749), None), // A way to temporarily opt out of opt in copy. This will *never* be accepted. (removed, opt_out_copy, "1.0.0", None, None), (removed, quad_precision_float, "1.0.0", None, None), (removed, struct_inherit, "1.0.0", None, None), (removed, test_removed_feature, "1.0.0", None, None), (removed, visible_private_types, "1.0.0", None, None), (removed, unsafe_no_drop_flag, "1.0.0", None, None), // Allows using items which are missing stability attributes // rustc internal (removed, unmarked_api, "1.0.0", None, None), (removed, pushpop_unsafe, "1.2.0", None, None), (removed, allocator, "1.0.0", None, None), // Allows the `#[simd]` attribute -- removed in favor of `#[repr(simd)]` (removed, simd, "1.0.0", Some(27731), None), // Merged into `slice_patterns` (removed, advanced_slice_patterns, "1.0.0", Some(23121), None), ); declare_features! ( (stable_removed, no_stack_check, "1.0.0", None, None), ); declare_features! ( (accepted, associated_types, "1.0.0", None, None), // allow overloading augmented assignment operations like `a += b` (accepted, augmented_assignments, "1.8.0", Some(28235), None), // allow empty structs and enum variants with braces (accepted, braced_empty_structs, "1.8.0", Some(29720), None), // Allows indexing into constant arrays. (accepted, const_indexing, "1.24.0", Some(29947), None), (accepted, default_type_params, "1.0.0", None, None), (accepted, globs, "1.0.0", None, None), (accepted, if_let, "1.0.0", None, None), // A temporary feature gate used to enable parser extensions needed // to bootstrap fix for #5723. (accepted, issue_5723_bootstrap, "1.0.0", None, None), (accepted, macro_rules, "1.0.0", None, None), // Allows using #![no_std] (accepted, no_std, "1.6.0", None, None), (accepted, slicing_syntax, "1.0.0", None, None), (accepted, struct_variant, "1.0.0", None, None), // These are used to test this portion of the compiler, they don't actually // mean anything (accepted, test_accepted_feature, "1.0.0", None, None), (accepted, tuple_indexing, "1.0.0", None, None), // Allows macros to appear in the type position. (accepted, type_macros, "1.13.0", Some(27245), None), (accepted, while_let, "1.0.0", None, None), // Allows `#[deprecated]` attribute (accepted, deprecated, "1.9.0", Some(29935), None), // `expr?` (accepted, question_mark, "1.13.0", Some(31436), None), // Allows `..` in tuple (struct) patterns (accepted, dotdot_in_tuple_patterns, "1.14.0", Some(33627), None), (accepted, item_like_imports, "1.15.0", Some(35120), None), // Allows using `Self` and associated types in struct expressions and patterns. (accepted, more_struct_aliases, "1.16.0", Some(37544), None), // elide `'static` lifetimes in `static`s and `const`s (accepted, static_in_const, "1.17.0", Some(35897), None), // Allows field shorthands (`x` meaning `x: x`) in struct literal expressions. (accepted, field_init_shorthand, "1.17.0", Some(37340), None), // Allows the definition recursive static items. (accepted, static_recursion, "1.17.0", Some(29719), None), // pub(restricted) visibilities (RFC 1422) (accepted, pub_restricted, "1.18.0", Some(32409), None), // The #![windows_subsystem] attribute (accepted, windows_subsystem, "1.18.0", Some(37499), None), // Allows `break {expr}` with a value inside `loop`s. (accepted, loop_break_value, "1.19.0", Some(37339), None), // Permits numeric fields in struct expressions and patterns. (accepted, relaxed_adts, "1.19.0", Some(35626), None), // Coerces non capturing closures to function pointers (accepted, closure_to_fn_coercion, "1.19.0", Some(39817), None), // Allows attributes on struct literal fields. (accepted, struct_field_attributes, "1.20.0", Some(38814), None), // Allows the definition of associated constants in `trait` or `impl` // blocks. (accepted, associated_consts, "1.20.0", Some(29646), None), // Usage of the `compile_error!` macro (accepted, compile_error, "1.20.0", Some(40872), None), // See rust-lang/rfcs#1414. Allows code like `let x: &'static u32 = &42` to work. (accepted, rvalue_static_promotion, "1.21.0", Some(38865), None), // Allow Drop types in constants (RFC 1440) (accepted, drop_types_in_const, "1.22.0", Some(33156), None), // Allows the sysV64 ABI to be specified on all platforms // instead of just the platforms on which it is the C ABI (accepted, abi_sysv64, "1.24.0", Some(36167), None), // Allows `repr(align(16))` struct attribute (RFC 1358) (accepted, repr_align, "1.25.0", Some(33626), None), // allow '|' at beginning of match arms (RFC 1925) (accepted, match_beginning_vert, "1.25.0", Some(44101), None), // Nested groups in `use` (RFC 2128) (accepted, use_nested_groups, "1.25.0", Some(44494), None), // a..=b and ..=b (accepted, inclusive_range_syntax, "1.26.0", Some(28237), None), // allow `..=` in patterns (RFC 1192) (accepted, dotdoteq_in_patterns, "1.26.0", Some(28237), None), // Termination trait in main (RFC 1937) (accepted, termination_trait, "1.26.0", Some(43301), None), // Copy/Clone closures (RFC 2132) (accepted, clone_closures, "1.26.0", Some(44490), None), (accepted, copy_closures, "1.26.0", Some(44490), None), // Allows `impl Trait` in function arguments. (accepted, universal_impl_trait, "1.26.0", Some(34511), None), // Allows `impl Trait` in function return types. (accepted, conservative_impl_trait, "1.26.0", Some(34511), None), // The `i128` type (accepted, i128_type, "1.26.0", Some(35118), None), // Default match binding modes (RFC 2005) (accepted, match_default_bindings, "1.26.0", Some(42640), None), // allow `'_` placeholder lifetimes (accepted, underscore_lifetimes, "1.26.0", Some(44524), None), ); // If you change this, please modify src/doc/unstable-book as well. You must // move that documentation into the relevant place in the other docs, and // remove the chapter on the flag. #[derive(PartialEq, Copy, Clone, Debug)] pub enum AttributeType { /// Normal, builtin attribute that is consumed /// by the compiler before the unused_attribute check Normal, /// Builtin attribute that may not be consumed by the compiler /// before the unused_attribute check. These attributes /// will be ignored by the unused_attribute lint Whitelisted, /// Builtin attribute that is only allowed at the crate level CrateLevel, } pub enum AttributeGate { /// Is gated by a given feature gate, reason /// and function to check if enabled Gated(Stability, &'static str, &'static str, fn(&Features) -> bool), /// Ungated attribute, can be used on all release channels Ungated, } impl AttributeGate { fn is_deprecated(&self) -> bool { match *self { Gated(Stability::Deprecated(_), ..) => true, _ => false, } } } #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum Stability { Unstable, // Argument is tracking issue link. Deprecated(&'static str), } // fn() is not Debug impl ::std::fmt::Debug for AttributeGate { fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { match *self { Gated(ref stab, name, expl, _) => write!(fmt, "Gated({:?}, {}, {})", stab, name, expl), Ungated => write!(fmt, "Ungated") } } } macro_rules! cfg_fn { ($field: ident) => {{ fn f(features: &Features) -> bool { features.$field } f as fn(&Features) -> bool }} } pub fn deprecated_attributes() -> Vec<&'static (&'static str, AttributeType, AttributeGate)> { BUILTIN_ATTRIBUTES.iter().filter(|a| a.2.is_deprecated()).collect() } pub fn is_builtin_attr(attr: &ast::Attribute) -> bool { BUILTIN_ATTRIBUTES.iter().any(|&(builtin_name, _, _)| attr.check_name(builtin_name)) } // Attributes that have a special meaning to rustc or rustdoc pub const BUILTIN_ATTRIBUTES: &'static [(&'static str, AttributeType, AttributeGate)] = &[ // Normal attributes ("warn", Normal, Ungated), ("allow", Normal, Ungated), ("forbid", Normal, Ungated), ("deny", Normal, Ungated), ("macro_reexport", Normal, Ungated), ("macro_use", Normal, Ungated), ("macro_export", Normal, Ungated), ("plugin_registrar", Normal, Ungated), ("cfg", Normal, Ungated), ("cfg_attr", Normal, Ungated), ("main", Normal, Ungated), ("start", Normal, Ungated), ("test", Normal, Ungated), ("bench", Normal, Ungated), ("repr", Normal, Ungated), ("path", Normal, Ungated), ("abi", Normal, Ungated), ("automatically_derived", Normal, Ungated), ("no_mangle", Normal, Ungated), ("no_link", Normal, Ungated), ("derive", Normal, Ungated), ("should_panic", Normal, Ungated), ("ignore", Normal, Ungated), ("no_implicit_prelude", Normal, Ungated), ("reexport_test_harness_main", Normal, Ungated), ("link_args", Normal, Gated(Stability::Unstable, "link_args", "the `link_args` attribute is experimental and not \ portable across platforms, it is recommended to \ use `#[link(name = \"foo\")] instead", cfg_fn!(link_args))), ("macro_escape", Normal, Ungated), // RFC #1445. ("structural_match", Whitelisted, Gated(Stability::Unstable, "structural_match", "the semantics of constant patterns is \ not yet settled", cfg_fn!(structural_match))), // RFC #2008 ("non_exhaustive", Whitelisted, Gated(Stability::Unstable, "non_exhaustive", "non exhaustive is an experimental feature", cfg_fn!(non_exhaustive))), ("plugin", CrateLevel, Gated(Stability::Unstable, "plugin", "compiler plugins are experimental \ and possibly buggy", cfg_fn!(plugin))), ("no_std", CrateLevel, Ungated), ("no_core", CrateLevel, Gated(Stability::Unstable, "no_core", "no_core is experimental", cfg_fn!(no_core))), ("lang", Normal, Gated(Stability::Unstable, "lang_items", "language items are subject to change", cfg_fn!(lang_items))), ("linkage", Whitelisted, Gated(Stability::Unstable, "linkage", "the `linkage` attribute is experimental \ and not portable across platforms", cfg_fn!(linkage))), ("thread_local", Whitelisted, Gated(Stability::Unstable, "thread_local", "`#[thread_local]` is an experimental feature, and does \ not currently handle destructors.", cfg_fn!(thread_local))), ("rustc_on_unimplemented", Normal, Gated(Stability::Unstable, "on_unimplemented", "the `#[rustc_on_unimplemented]` attribute \ is an experimental feature", cfg_fn!(on_unimplemented))), ("rustc_const_unstable", Normal, Gated(Stability::Unstable, "rustc_const_unstable", "the `#[rustc_const_unstable]` attribute \ is an internal feature", cfg_fn!(rustc_const_unstable))), ("global_allocator", Normal, Gated(Stability::Unstable, "global_allocator", "the `#[global_allocator]` attribute is \ an experimental feature", cfg_fn!(global_allocator))), ("default_lib_allocator", Whitelisted, Gated(Stability::Unstable, "allocator_internals", "the `#[default_lib_allocator]` \ attribute is an experimental feature", cfg_fn!(allocator_internals))), ("needs_allocator", Normal, Gated(Stability::Unstable, "allocator_internals", "the `#[needs_allocator]` \ attribute is an experimental \ feature", cfg_fn!(allocator_internals))), ("panic_runtime", Whitelisted, Gated(Stability::Unstable, "panic_runtime", "the `#[panic_runtime]` attribute is \ an experimental feature", cfg_fn!(panic_runtime))), ("needs_panic_runtime", Whitelisted, Gated(Stability::Unstable, "needs_panic_runtime", "the `#[needs_panic_runtime]` \ attribute is an experimental \ feature", cfg_fn!(needs_panic_runtime))), ("rustc_variance", Normal, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_variance]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_regions", Normal, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_regions]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_error", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_error]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_if_this_changed", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_if_this_changed]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_then_this_would_need", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_if_this_changed]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_dirty", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_dirty]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_clean", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_clean]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_partition_reused", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "this attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_partition_translated", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "this attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_serialize_exclude_null", Normal, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_serialize_exclude_null]` attribute \ is an internal-only feature", cfg_fn!(rustc_attrs))), ("rustc_synthetic", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "this attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_symbol_name", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "internal rustc attributes will never be stable", cfg_fn!(rustc_attrs))), ("rustc_item_path", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "internal rustc attributes will never be stable", cfg_fn!(rustc_attrs))), ("rustc_mir", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_mir]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_inherit_overflow_checks", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_inherit_overflow_checks]` \ attribute is just used to control \ overflow checking behavior of several \ libcore functions that are inlined \ across crates and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_dump_program_clauses", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_dump_program_clauses]` \ attribute is just used for rustc unit \ tests and will never be stable", cfg_fn!(rustc_attrs))), // RFC #2094 ("nll", Whitelisted, Gated(Stability::Unstable, "nll", "Non lexical lifetimes", cfg_fn!(nll))), ("compiler_builtins", Whitelisted, Gated(Stability::Unstable, "compiler_builtins", "the `#[compiler_builtins]` attribute is used to \ identify the `compiler_builtins` crate which \ contains compiler-rt intrinsics and will never be \ stable", cfg_fn!(compiler_builtins))), ("sanitizer_runtime", Whitelisted, Gated(Stability::Unstable, "sanitizer_runtime", "the `#[sanitizer_runtime]` attribute is used to \ identify crates that contain the runtime of a \ sanitizer and will never be stable", cfg_fn!(sanitizer_runtime))), ("profiler_runtime", Whitelisted, Gated(Stability::Unstable, "profiler_runtime", "the `#[profiler_runtime]` attribute is used to \ identify the `profiler_builtins` crate which \ contains the profiler runtime and will never be \ stable", cfg_fn!(profiler_runtime))), ("allow_internal_unstable", Normal, Gated(Stability::Unstable, "allow_internal_unstable", EXPLAIN_ALLOW_INTERNAL_UNSTABLE, cfg_fn!(allow_internal_unstable))), ("allow_internal_unsafe", Normal, Gated(Stability::Unstable, "allow_internal_unsafe", EXPLAIN_ALLOW_INTERNAL_UNSAFE, cfg_fn!(allow_internal_unsafe))), ("fundamental", Whitelisted, Gated(Stability::Unstable, "fundamental", "the `#[fundamental]` attribute \ is an experimental feature", cfg_fn!(fundamental))), ("proc_macro_derive", Normal, Ungated), ("rustc_copy_clone_marker", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "internal implementation detail", cfg_fn!(rustc_attrs))), // FIXME: #14408 whitelist docs since rustdoc looks at them ("doc", Whitelisted, Ungated), // FIXME: #14406 these are processed in trans, which happens after the // lint pass ("cold", Whitelisted, Ungated), ("naked", Whitelisted, Gated(Stability::Unstable, "naked_functions", "the `#[naked]` attribute \ is an experimental feature", cfg_fn!(naked_functions))), ("target_feature", Whitelisted, Gated( Stability::Unstable, "target_feature", "the `#[target_feature]` attribute is an experimental feature", cfg_fn!(target_feature))), ("export_name", Whitelisted, Ungated), ("inline", Whitelisted, Ungated), ("link", Whitelisted, Ungated), ("link_name", Whitelisted, Ungated), ("link_section", Whitelisted, Ungated), ("no_builtins", Whitelisted, Ungated), ("no_mangle", Whitelisted, Ungated), ("no_debug", Whitelisted, Gated( Stability::Deprecated("https://github.com/rust-lang/rust/issues/29721"), "no_debug", "the `#[no_debug]` attribute was an experimental feature that has been \ deprecated due to lack of demand", cfg_fn!(no_debug))), ("wasm_import_module", Normal, Gated(Stability::Unstable, "wasm_import_module", "experimental attribute", cfg_fn!(wasm_import_module))), ("omit_gdb_pretty_printer_section", Whitelisted, Gated(Stability::Unstable, "omit_gdb_pretty_printer_section", "the `#[omit_gdb_pretty_printer_section]` \ attribute is just used for the Rust test \ suite", cfg_fn!(omit_gdb_pretty_printer_section))), ("unsafe_destructor_blind_to_params", Normal, Gated(Stability::Deprecated("https://github.com/rust-lang/rust/issues/34761"), "dropck_parametricity", "unsafe_destructor_blind_to_params has been replaced by \ may_dangle and will be removed in the future", cfg_fn!(dropck_parametricity))), ("may_dangle", Normal, Gated(Stability::Unstable, "dropck_eyepatch", "may_dangle has unstable semantics and may be removed in the future", cfg_fn!(dropck_eyepatch))), ("unwind", Whitelisted, Gated(Stability::Unstable, "unwind_attributes", "#[unwind] is experimental", cfg_fn!(unwind_attributes))), ("used", Whitelisted, Gated( Stability::Unstable, "used", "the `#[used]` attribute is an experimental feature", cfg_fn!(used))), // used in resolve ("prelude_import", Whitelisted, Gated(Stability::Unstable, "prelude_import", "`#[prelude_import]` is for use by rustc only", cfg_fn!(prelude_import))), // FIXME: #14407 these are only looked at on-demand so we can't // guarantee they'll have already been checked ("rustc_deprecated", Whitelisted, Ungated), ("must_use", Whitelisted, Ungated), ("stable", Whitelisted, Ungated), ("unstable", Whitelisted, Ungated), ("deprecated", Normal, Ungated), ("rustc_paren_sugar", Normal, Gated(Stability::Unstable, "unboxed_closures", "unboxed_closures are still evolving", cfg_fn!(unboxed_closures))), ("windows_subsystem", Whitelisted, Ungated), ("proc_macro_attribute", Normal, Gated(Stability::Unstable, "proc_macro", "attribute proc macros are currently unstable", cfg_fn!(proc_macro))), ("proc_macro", Normal, Gated(Stability::Unstable, "proc_macro", "function-like proc macros are currently unstable", cfg_fn!(proc_macro))), ("rustc_derive_registrar", Normal, Gated(Stability::Unstable, "rustc_derive_registrar", "used internally by rustc", cfg_fn!(rustc_attrs))), ("allow_fail", Normal, Gated(Stability::Unstable, "allow_fail", "allow_fail attribute is currently unstable", cfg_fn!(allow_fail))), ("rustc_std_internal_symbol", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "this is an internal attribute that will \ never be stable", cfg_fn!(rustc_attrs))), // whitelists "identity-like" conversion methods to suggest on type mismatch ("rustc_conversion_suggestion", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "this is an internal attribute that will \ never be stable", cfg_fn!(rustc_attrs))), ("rustc_args_required_const", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "never will be stable", cfg_fn!(rustc_attrs))), ("wasm_custom_section", Whitelisted, Gated(Stability::Unstable, "wasm_custom_section", "attribute is currently unstable", cfg_fn!(wasm_custom_section))), // Crate level attributes ("crate_name", CrateLevel, Ungated), ("crate_type", CrateLevel, Ungated), ("crate_id", CrateLevel, Ungated), ("feature", CrateLevel, Ungated), ("no_start", CrateLevel, Ungated), ("no_main", CrateLevel, Ungated), ("no_builtins", CrateLevel, Ungated), ("recursion_limit", CrateLevel, Ungated), ("type_length_limit", CrateLevel, Ungated), ]; // cfg(...)'s that are feature gated const GATED_CFGS: &[(&str, &str, fn(&Features) -> bool)] = &[ // (name in cfg, feature, function to check if the feature is enabled) ("target_feature", "cfg_target_feature", cfg_fn!(cfg_target_feature)), ("target_vendor", "cfg_target_vendor", cfg_fn!(cfg_target_vendor)), ("target_thread_local", "cfg_target_thread_local", cfg_fn!(cfg_target_thread_local)), ("target_has_atomic", "cfg_target_has_atomic", cfg_fn!(cfg_target_has_atomic)), ]; #[derive(Debug, Eq, PartialEq)] pub struct GatedCfg { span: Span, index: usize, } impl GatedCfg { pub fn gate(cfg: &ast::MetaItem) -> Option<GatedCfg> { let name = cfg.name().as_str(); GATED_CFGS.iter() .position(|info| info.0 == name) .map(|idx| { GatedCfg { span: cfg.span, index: idx } }) } pub fn check_and_emit(&self, sess: &ParseSess, features: &Features) { let (cfg, feature, has_feature) = GATED_CFGS[self.index]; if !has_feature(features) && !self.span.allows_unstable() { let explain = format!("`cfg({})` is experimental and subject to change", cfg); emit_feature_err(sess, feature, self.span, GateIssue::Language, &explain); } } } struct Context<'a> { features: &'a Features, parse_sess: &'a ParseSess, plugin_attributes: &'a [(String, AttributeType)], } macro_rules! gate_feature_fn { ($cx: expr, $has_feature: expr, $span: expr, $name: expr, $explain: expr, $level: expr) => {{ let (cx, has_feature, span, name, explain, level) = ($cx, $has_feature, $span, $name, $explain, $level); let has_feature: bool = has_feature(&$cx.features); debug!("gate_feature(feature = {:?}, span = {:?}); has? {}", name, span, has_feature); if !has_feature && !span.allows_unstable() { leveled_feature_err(cx.parse_sess, name, span, GateIssue::Language, explain, level) .emit(); } }} } macro_rules! gate_feature { ($cx: expr, $feature: ident, $span: expr, $explain: expr) => { gate_feature_fn!($cx, |x:&Features| x.$feature, $span, stringify!($feature), $explain, GateStrength::Hard) }; ($cx: expr, $feature: ident, $span: expr, $explain: expr, $level: expr) => { gate_feature_fn!($cx, |x:&Features| x.$feature, $span, stringify!($feature), $explain, $level) }; } impl<'a> Context<'a> { fn check_attribute(&self, attr: &ast::Attribute, is_macro: bool) { debug!("check_attribute(attr = {:?})", attr); let name = unwrap_or!(attr.name(), return).as_str(); for &(n, ty, ref gateage) in BUILTIN_ATTRIBUTES { if name == n { if let Gated(_, name, desc, ref has_feature) = *gateage { gate_feature_fn!(self, has_feature, attr.span, name, desc, GateStrength::Hard); } else if name == "doc" { if let Some(content) = attr.meta_item_list() { if content.iter().any(|c| c.check_name("include")) { gate_feature!(self, external_doc, attr.span, "#[doc(include = \"...\")] is experimental" ); } } } debug!("check_attribute: {:?} is builtin, {:?}, {:?}", attr.path, ty, gateage); return; } } for &(ref n, ref ty) in self.plugin_attributes { if attr.path == &**n { // Plugins can't gate attributes, so we don't check for it // unlike the code above; we only use this loop to // short-circuit to avoid the checks below debug!("check_attribute: {:?} is registered by a plugin, {:?}", attr.path, ty); return; } } if name.starts_with("rustc_") { gate_feature!(self, rustc_attrs, attr.span, "unless otherwise specified, attributes \ with the prefix `rustc_` \ are reserved for internal compiler diagnostics"); } else if name.starts_with("derive_") { gate_feature!(self, custom_derive, attr.span, EXPLAIN_DERIVE_UNDERSCORE); } else if !attr::is_known(attr) { // Only run the custom attribute lint during regular // feature gate checking. Macro gating runs // before the plugin attributes are registered // so we skip this then if !is_macro { gate_feature!(self, custom_attribute, attr.span, &format!("The attribute `{}` is currently \ unknown to the compiler and \ may have meaning \ added to it in the future", attr.path)); } } } } pub fn check_attribute(attr: &ast::Attribute, parse_sess: &ParseSess, features: &Features) { let cx = Context { features: features, parse_sess: parse_sess, plugin_attributes: &[] }; cx.check_attribute(attr, true); } pub fn find_lang_feature_accepted_version(feature: &str) -> Option<&'static str> { ACCEPTED_FEATURES.iter().find(|t| t.0 == feature).map(|t| t.1) } fn find_lang_feature_issue(feature: &str) -> Option<u32> { if let Some(info) = ACTIVE_FEATURES.iter().find(|t| t.0 == feature) { let issue = info.2; // FIXME (#28244): enforce that active features have issue numbers // assert!(issue.is_some()) issue } else { // search in Accepted, Removed, or Stable Removed features let found = ACCEPTED_FEATURES.iter().chain(REMOVED_FEATURES).chain(STABLE_REMOVED_FEATURES) .find(|t| t.0 == feature); match found { Some(&(_, _, issue)) => issue, None => panic!("Feature `{}` is not declared anywhere", feature), } } } pub enum GateIssue { Language, Library(Option<u32>) } #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum GateStrength { /// A hard error. (Most feature gates should use this.) Hard, /// Only a warning. (Use this only as backwards-compatibility demands.) Soft, } pub fn emit_feature_err(sess: &ParseSess, feature: &str, span: Span, issue: GateIssue, explain: &str) { feature_err(sess, feature, span, issue, explain).emit(); } pub fn feature_err<'a>(sess: &'a ParseSess, feature: &str, span: Span, issue: GateIssue, explain: &str) -> DiagnosticBuilder<'a> { leveled_feature_err(sess, feature, span, issue, explain, GateStrength::Hard) } fn leveled_feature_err<'a>(sess: &'a ParseSess, feature: &str, span: Span, issue: GateIssue, explain: &str, level: GateStrength) -> DiagnosticBuilder<'a> { let diag = &sess.span_diagnostic; let issue = match issue { GateIssue::Language => find_lang_feature_issue(feature), GateIssue::Library(lib) => lib, }; let explanation = if let Some(n) = issue { format!("{} (see issue #{})", explain, n) } else { explain.to_owned() }; let mut err = match level { GateStrength::Hard => { diag.struct_span_err_with_code(span, &explanation, stringify_error_code!(E0658)) } GateStrength::Soft => diag.struct_span_warn(span, &explanation), }; // #23973: do not suggest `#![feature(...)]` if we are in beta/stable if sess.unstable_features.is_nightly_build() { err.help(&format!("add #![feature({})] to the \ crate attributes to enable", feature)); } // If we're on stable and only emitting a "soft" warning, add a note to // clarify that the feature isn't "on" (rather than being on but // warning-worthy). if !sess.unstable_features.is_nightly_build() && level == GateStrength::Soft { err.help("a nightly build of the compiler is required to enable this feature"); } err } const EXPLAIN_BOX_SYNTAX: &'static str = "box expression syntax is experimental; you can call `Box::new` instead."; pub const EXPLAIN_STMT_ATTR_SYNTAX: &'static str = "attributes on expressions are experimental."; pub const EXPLAIN_ASM: &'static str = "inline assembly is not stable enough for use and is subject to change"; pub const EXPLAIN_GLOBAL_ASM: &'static str = "`global_asm!` is not stable enough for use and is subject to change"; pub const EXPLAIN_LOG_SYNTAX: &'static str = "`log_syntax!` is not stable enough for use and is subject to change"; pub const EXPLAIN_CONCAT_IDENTS: &'static str = "`concat_idents` is not stable enough for use and is subject to change"; pub const EXPLAIN_TRACE_MACROS: &'static str = "`trace_macros` is not stable enough for use and is subject to change"; pub const EXPLAIN_ALLOW_INTERNAL_UNSTABLE: &'static str = "allow_internal_unstable side-steps feature gating and stability checks"; pub const EXPLAIN_ALLOW_INTERNAL_UNSAFE: &'static str = "allow_internal_unsafe side-steps the unsafe_code lint"; pub const EXPLAIN_CUSTOM_DERIVE: &'static str = "`#[derive]` for custom traits is deprecated and will be removed in the future."; pub const EXPLAIN_DEPR_CUSTOM_DERIVE: &'static str = "`#[derive]` for custom traits is deprecated and will be removed in the future. \ Prefer using procedural macro custom derive."; pub const EXPLAIN_DERIVE_UNDERSCORE: &'static str = "attributes of the form `#[derive_*]` are reserved for the compiler"; pub const EXPLAIN_VIS_MATCHER: &'static str = ":vis fragment specifier is experimental and subject to change"; pub const EXPLAIN_LIFETIME_MATCHER: &'static str = ":lifetime fragment specifier is experimental and subject to change"; pub const EXPLAIN_PLACEMENT_IN: &'static str = "placement-in expression syntax is experimental and subject to change."; pub const EXPLAIN_UNSIZED_TUPLE_COERCION: &'static str = "Unsized tuple coercion is not stable enough for use and is subject to change"; pub const EXPLAIN_MACRO_AT_MOST_ONCE_REP: &'static str = "Using the `?` macro Kleene operator for \"at most one\" repetition is unstable"; struct PostExpansionVisitor<'a> { context: &'a Context<'a>, } macro_rules! gate_feature_post { ($cx: expr, $feature: ident, $span: expr, $explain: expr) => {{ let (cx, span) = ($cx, $span); if !span.allows_unstable() { gate_feature!(cx.context, $feature, span, $explain) } }}; ($cx: expr, $feature: ident, $span: expr, $explain: expr, $level: expr) => {{ let (cx, span) = ($cx, $span); if !span.allows_unstable() { gate_feature!(cx.context, $feature, span, $explain, $level) } }} } impl<'a> PostExpansionVisitor<'a> { fn check_abi(&self, abi: Abi, span: Span) { match abi { Abi::RustIntrinsic => { gate_feature_post!(&self, intrinsics, span, "intrinsics are subject to change"); }, Abi::PlatformIntrinsic => { gate_feature_post!(&self, platform_intrinsics, span, "platform intrinsics are experimental and possibly buggy"); }, Abi::Vectorcall => { gate_feature_post!(&self, abi_vectorcall, span, "vectorcall is experimental and subject to change"); }, Abi::Thiscall => { gate_feature_post!(&self, abi_thiscall, span, "thiscall is experimental and subject to change"); }, Abi::RustCall => { gate_feature_post!(&self, unboxed_closures, span, "rust-call ABI is subject to change"); }, Abi::PtxKernel => { gate_feature_post!(&self, abi_ptx, span, "PTX ABIs are experimental and subject to change"); }, Abi::Unadjusted => { gate_feature_post!(&self, abi_unadjusted, span, "unadjusted ABI is an implementation detail and perma-unstable"); }, Abi::Msp430Interrupt => { gate_feature_post!(&self, abi_msp430_interrupt, span, "msp430-interrupt ABI is experimental and subject to change"); }, Abi::X86Interrupt => { gate_feature_post!(&self, abi_x86_interrupt, span, "x86-interrupt ABI is experimental and subject to change"); }, // Stable Abi::Cdecl | Abi::Stdcall | Abi::Fastcall | Abi::Aapcs | Abi::Win64 | Abi::SysV64 | Abi::Rust | Abi::C | Abi::System => {} } } } fn contains_novel_literal(item: &ast::MetaItem) -> bool { use ast::MetaItemKind::*; use ast::NestedMetaItemKind::*; match item.node { Word => false, NameValue(ref lit) => !lit.node.is_str(), List(ref list) => list.iter().any(|li| { match li.node { MetaItem(ref mi) => contains_novel_literal(mi), Literal(_) => true, } }), } } impl<'a> PostExpansionVisitor<'a> { fn whole_crate_feature_gates(&mut self, _krate: &ast::Crate) { for &(ident, span) in &*self.context.parse_sess.non_modrs_mods.borrow() { if !span.allows_unstable() { let cx = &self.context; let level = GateStrength::Hard; let has_feature = cx.features.non_modrs_mods; let name = "non_modrs_mods"; debug!("gate_feature(feature = {:?}, span = {:?}); has? {}", name, span, has_feature); if !has_feature && !span.allows_unstable() { leveled_feature_err( cx.parse_sess, name, span, GateIssue::Language, "mod statements in non-mod.rs files are unstable", level ) .help(&format!("on stable builds, rename this file to {}{}mod.rs", ident, path::MAIN_SEPARATOR)) .emit(); } } } } } impl<'a> Visitor<'a> for PostExpansionVisitor<'a> { fn visit_attribute(&mut self, attr: &ast::Attribute) { if !attr.span.allows_unstable() { // check for gated attributes self.context.check_attribute(attr, false); } if attr.check_name("doc") { if let Some(content) = attr.meta_item_list() { if content.len() == 1 && content[0].check_name("cfg") { gate_feature_post!(&self, doc_cfg, attr.span, "#[doc(cfg(...))] is experimental" ); } else if content.iter().any(|c| c.check_name("masked")) { gate_feature_post!(&self, doc_masked, attr.span, "#[doc(masked)] is experimental" ); } else if content.iter().any(|c| c.check_name("spotlight")) { gate_feature_post!(&self, doc_spotlight, attr.span, "#[doc(spotlight)] is experimental" ); } } } // allow attr_literals in #[repr(align(x))] let mut is_repr_align = false; if attr.path == "repr" { if let Some(content) = attr.meta_item_list() { is_repr_align = content.iter().any(|c| c.check_name("align")); } } if self.context.features.proc_macro && attr::is_known(attr) { return } if !is_repr_align { let meta = panictry!(attr.parse_meta(self.context.parse_sess)); if contains_novel_literal(&meta) { gate_feature_post!(&self, attr_literals, attr.span, "non-string literals in attributes, or string \ literals in top-level positions, are experimental"); } } } fn visit_name(&mut self, sp: Span, name: ast::Name) { if !name.as_str().is_ascii() { gate_feature_post!(&self, non_ascii_idents, self.context.parse_sess.codemap().def_span(sp), "non-ascii idents are not fully supported."); } } fn visit_use_tree(&mut self, use_tree: &'a ast::UseTree, id: NodeId, _nested: bool) { if let ast::UseTreeKind::Simple(Some(ident)) = use_tree.kind { if ident.name == "_" { gate_feature_post!(&self, underscore_imports, use_tree.span, "renaming imports with `_` is unstable"); } } visit::walk_use_tree(self, use_tree, id); } fn visit_item(&mut self, i: &'a ast::Item) { match i.node { ast::ItemKind::ExternCrate(_) => { if i.ident.name == "_" { gate_feature_post!(&self, underscore_imports, i.span, "renaming extern crates with `_` is unstable"); } if let Some(attr) = attr::find_by_name(&i.attrs[..], "macro_reexport") { gate_feature_post!(&self, macro_reexport, attr.span, "macros re-exports are experimental \ and possibly buggy"); } } ast::ItemKind::ForeignMod(ref foreign_module) => { self.check_abi(foreign_module.abi, i.span); } ast::ItemKind::Fn(..) => { if attr::contains_name(&i.attrs[..], "plugin_registrar") { gate_feature_post!(&self, plugin_registrar, i.span, "compiler plugins are experimental and possibly buggy"); } if attr::contains_name(&i.attrs[..], "start") { gate_feature_post!(&self, start, i.span, "a #[start] function is an experimental \ feature whose signature may change \ over time"); } if attr::contains_name(&i.attrs[..], "main") { gate_feature_post!(&self, main, i.span, "declaration of a nonstandard #[main] \ function may change over time, for now \ a top-level `fn main()` is required"); } if let Some(attr) = attr::find_by_name(&i.attrs[..], "must_use") { gate_feature_post!(&self, fn_must_use, attr.span, "`#[must_use]` on functions is experimental", GateStrength::Soft); } } ast::ItemKind::Struct(..) => { if let Some(attr) = attr::find_by_name(&i.attrs[..], "repr") { for item in attr.meta_item_list().unwrap_or_else(Vec::new) { if item.check_name("simd") { gate_feature_post!(&self, repr_simd, attr.span, "SIMD types are experimental and possibly buggy"); } if item.check_name("transparent") { gate_feature_post!(&self, repr_transparent, attr.span, "the `#[repr(transparent)]` attribute \ is experimental"); } } } } ast::ItemKind::TraitAlias(..) => { gate_feature_post!(&self, trait_alias, i.span, "trait aliases are not yet fully implemented"); } ast::ItemKind::Impl(_, polarity, defaultness, _, _, _, ref impl_items) => { if polarity == ast::ImplPolarity::Negative { gate_feature_post!(&self, optin_builtin_traits, i.span, "negative trait bounds are not yet fully implemented; \ use marker types for now"); } if let ast::Defaultness::Default = defaultness { gate_feature_post!(&self, specialization, i.span, "specialization is unstable"); } for impl_item in impl_items { if let ast::ImplItemKind::Method(..) = impl_item.node { if let Some(attr) = attr::find_by_name(&impl_item.attrs[..], "must_use") { gate_feature_post!(&self, fn_must_use, attr.span, "`#[must_use]` on methods is experimental", GateStrength::Soft); } } } } ast::ItemKind::Trait(ast::IsAuto::Yes, ..) => { gate_feature_post!(&self, optin_builtin_traits, i.span, "auto traits are experimental and possibly buggy"); } ast::ItemKind::MacroDef(ast::MacroDef { legacy: false, .. }) => { let msg = "`macro` is experimental"; gate_feature_post!(&self, decl_macro, i.span, msg); } _ => {} } visit::walk_item(self, i); } fn visit_foreign_item(&mut self, i: &'a ast::ForeignItem) { match i.node { ast::ForeignItemKind::Fn(..) | ast::ForeignItemKind::Static(..) => { let link_name = attr::first_attr_value_str_by_name(&i.attrs, "link_name"); let links_to_llvm = match link_name { Some(val) => val.as_str().starts_with("llvm."), _ => false }; if links_to_llvm { gate_feature_post!(&self, link_llvm_intrinsics, i.span, "linking to LLVM intrinsics is experimental"); } } ast::ForeignItemKind::Ty => { gate_feature_post!(&self, extern_types, i.span, "extern types are experimental"); } } visit::walk_foreign_item(self, i) } fn visit_ty(&mut self, ty: &'a ast::Ty) { match ty.node { ast::TyKind::BareFn(ref bare_fn_ty) => { self.check_abi(bare_fn_ty.abi, ty.span); } ast::TyKind::TraitObject(_, ast::TraitObjectSyntax::Dyn) => { gate_feature_post!(&self, dyn_trait, ty.span, "`dyn Trait` syntax is unstable"); } _ => {} } visit::walk_ty(self, ty) } fn visit_fn_ret_ty(&mut self, ret_ty: &'a ast::FunctionRetTy) { if let ast::FunctionRetTy::Ty(ref output_ty) = *ret_ty { if output_ty.node != ast::TyKind::Never { self.visit_ty(output_ty) } } } fn visit_expr(&mut self, e: &'a ast::Expr) { match e.node { ast::ExprKind::Box(_) => { gate_feature_post!(&self, box_syntax, e.span, EXPLAIN_BOX_SYNTAX); } ast::ExprKind::Type(..) => { gate_feature_post!(&self, type_ascription, e.span, "type ascription is experimental"); } ast::ExprKind::InPlace(..) => { gate_feature_post!(&self, placement_in_syntax, e.span, EXPLAIN_PLACEMENT_IN); } ast::ExprKind::Yield(..) => { gate_feature_post!(&self, generators, e.span, "yield syntax is experimental"); } ast::ExprKind::Catch(_) => { gate_feature_post!(&self, catch_expr, e.span, "`catch` expression is experimental"); } ast::ExprKind::IfLet(ref pats, ..) | ast::ExprKind::WhileLet(ref pats, ..) => { if pats.len() > 1 { gate_feature_post!(&self, if_while_or_patterns, e.span, "multiple patterns in `if let` and `while let` are unstable"); } } _ => {} } visit::walk_expr(self, e); } fn visit_arm(&mut self, arm: &'a ast::Arm) { visit::walk_arm(self, arm) } fn visit_pat(&mut self, pattern: &'a ast::Pat) { match pattern.node { PatKind::Slice(_, Some(ref subslice), _) => { gate_feature_post!(&self, slice_patterns, subslice.span, "syntax for subslices in slice patterns is not yet stabilized"); } PatKind::Box(..) => { gate_feature_post!(&self, box_patterns, pattern.span, "box pattern syntax is experimental"); } PatKind::Range(_, _, RangeEnd::Excluded) => { gate_feature_post!(&self, exclusive_range_pattern, pattern.span, "exclusive range pattern syntax is experimental"); } PatKind::Paren(..) => { gate_feature_post!(&self, pattern_parentheses, pattern.span, "parentheses in patterns are unstable"); } _ => {} } visit::walk_pat(self, pattern) } fn visit_fn(&mut self, fn_kind: FnKind<'a>, fn_decl: &'a ast::FnDecl, span: Span, _node_id: NodeId) { // check for const fn declarations if let FnKind::ItemFn(_, _, Spanned { node: ast::Constness::Const, .. }, _, _, _) = fn_kind { gate_feature_post!(&self, const_fn, span, "const fn is unstable"); } // stability of const fn methods are covered in // visit_trait_item and visit_impl_item below; this is // because default methods don't pass through this // point. match fn_kind { FnKind::ItemFn(_, _, _, abi, _, _) | FnKind::Method(_, &ast::MethodSig { abi, .. }, _, _) => { self.check_abi(abi, span); } _ => {} } visit::walk_fn(self, fn_kind, fn_decl, span); } fn visit_trait_item(&mut self, ti: &'a ast::TraitItem) { match ti.node { ast::TraitItemKind::Method(ref sig, ref block) => { if block.is_none() { self.check_abi(sig.abi, ti.span); } if sig.constness.node == ast::Constness::Const { gate_feature_post!(&self, const_fn, ti.span, "const fn is unstable"); } } ast::TraitItemKind::Type(_, ref default) => { // We use two if statements instead of something like match guards so that both // of these errors can be emitted if both cases apply. if default.is_some() { gate_feature_post!(&self, associated_type_defaults, ti.span, "associated type defaults are unstable"); } if ti.generics.is_parameterized() { gate_feature_post!(&self, generic_associated_types, ti.span, "generic associated types are unstable"); } } _ => {} } visit::walk_trait_item(self, ti); } fn visit_impl_item(&mut self, ii: &'a ast::ImplItem) { if ii.defaultness == ast::Defaultness::Default { gate_feature_post!(&self, specialization, ii.span, "specialization is unstable"); } match ii.node { ast::ImplItemKind::Method(ref sig, _) => { if sig.constness.node == ast::Constness::Const { gate_feature_post!(&self, const_fn, ii.span, "const fn is unstable"); } } ast::ImplItemKind::Type(_) if ii.generics.is_parameterized() => { gate_feature_post!(&self, generic_associated_types, ii.span, "generic associated types are unstable"); } _ => {} } visit::walk_impl_item(self, ii); } fn visit_path(&mut self, path: &'a ast::Path, _id: NodeId) { for segment in &path.segments { if segment.identifier.name == keywords::Crate.name() { gate_feature_post!(&self, crate_in_paths, segment.span, "`crate` in paths is experimental"); } else if segment.identifier.name == keywords::Extern.name() { gate_feature_post!(&self, extern_in_paths, segment.span, "`extern` in paths is experimental"); } } visit::walk_path(self, path); } fn visit_vis(&mut self, vis: &'a ast::Visibility) { if let ast::VisibilityKind::Crate(ast::CrateSugar::JustCrate) = vis.node { gate_feature_post!(&self, crate_visibility_modifier, vis.span, "`crate` visibility modifier is experimental"); } visit::walk_vis(self, vis); } fn visit_generic_param(&mut self, param: &'a ast::GenericParam) { let (attrs, explain) = match *param { ast::GenericParam::Lifetime(ref ld) => (&ld.attrs, "attributes on lifetime bindings are experimental"), ast::GenericParam::Type(ref t) => (&t.attrs, "attributes on type parameter bindings are experimental"), }; if !attrs.is_empty() { gate_feature_post!(&self, generic_param_attrs, attrs[0].span, explain); } visit::walk_generic_param(self, param) } } pub fn get_features(span_handler: &Handler, krate_attrs: &[ast::Attribute], edition: Edition) -> Features { let mut features = Features::new(); let mut feature_checker = FeatureChecker::default(); for &(.., f_edition, set) in ACTIVE_FEATURES.iter() { if let Some(f_edition) = f_edition { if edition >= f_edition { // FIXME(Manishearth) there is currently no way to set // lang features by edition set(&mut features, DUMMY_SP); } } } for attr in krate_attrs { if !attr.check_name("feature") { continue } match attr.meta_item_list() { None => { span_err!(span_handler, attr.span, E0555, "malformed feature attribute, expected #![feature(...)]"); } Some(list) => { for mi in list { let name = if let Some(word) = mi.word() { word.name() } else { span_err!(span_handler, mi.span, E0556, "malformed feature, expected just one word"); continue }; if let Some(&(_, _, _, _, set)) = ACTIVE_FEATURES.iter() .find(|& &(n, ..)| name == n) { set(&mut features, mi.span); feature_checker.collect(&features, mi.span); } else if let Some(&(_, _, _)) = REMOVED_FEATURES.iter() .find(|& &(n, _, _)| name == n) .or_else(|| STABLE_REMOVED_FEATURES.iter() .find(|& &(n, _, _)| name == n)) { span_err!(span_handler, mi.span, E0557, "feature has been removed"); } else if let Some(&(_, _, _)) = ACCEPTED_FEATURES.iter() .find(|& &(n, _, _)| name == n) { features.declared_stable_lang_features.push((name, mi.span)); } else { features.declared_lib_features.push((name, mi.span)); } } } } } feature_checker.check(span_handler); features } /// A collector for mutually exclusive and interdependent features and their flag spans. #[derive(Default)] struct FeatureChecker { proc_macro: Option<Span>, custom_attribute: Option<Span>, } impl FeatureChecker { // If this method turns out to be a hotspot due to branching, // the branching can be eliminated by modifying `set!()` to set these spans // only for the features that need to be checked for mutual exclusion. fn collect(&mut self, features: &Features, span: Span) { if features.proc_macro { // If self.proc_macro is None, set to Some(span) self.proc_macro = self.proc_macro.or(Some(span)); } if features.custom_attribute { self.custom_attribute = self.custom_attribute.or(Some(span)); } } fn check(self, handler: &Handler) { if let (Some(pm_span), Some(ca_span)) = (self.proc_macro, self.custom_attribute) { handler.struct_span_err(pm_span, "Cannot use `#![feature(proc_macro)]` and \ `#![feature(custom_attribute)] at the same time") .span_note(ca_span, "`#![feature(custom_attribute)]` declared here") .emit(); FatalError.raise(); } } } pub fn check_crate(krate: &ast::Crate, sess: &ParseSess, features: &Features, plugin_attributes: &[(String, AttributeType)], unstable: UnstableFeatures) { maybe_stage_features(&sess.span_diagnostic, krate, unstable); let ctx = Context { features, parse_sess: sess, plugin_attributes, }; if !features.raw_identifiers { for &span in sess.raw_identifier_spans.borrow().iter() { if !span.allows_unstable() { gate_feature!(&ctx, raw_identifiers, span, "raw identifiers are experimental and subject to change" ); } } } let visitor = &mut PostExpansionVisitor { context: &ctx }; visitor.whole_crate_feature_gates(krate); visit::walk_crate(visitor, krate); } #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub enum UnstableFeatures { /// Hard errors for unstable features are active, as on /// beta/stable channels. Disallow, /// Allow features to be activated, as on nightly. Allow, /// Errors are bypassed for bootstrapping. This is required any time /// during the build that feature-related lints are set to warn or above /// because the build turns on warnings-as-errors and uses lots of unstable /// features. As a result, this is always required for building Rust itself. Cheat } impl UnstableFeatures { pub fn from_environment() -> UnstableFeatures { // Whether this is a feature-staged build, i.e. on the beta or stable channel let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some(); // Whether we should enable unstable features for bootstrapping let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok(); match (disable_unstable_features, bootstrap) { (_, true) => UnstableFeatures::Cheat, (true, _) => UnstableFeatures::Disallow, (false, _) => UnstableFeatures::Allow } } pub fn is_nightly_build(&self) -> bool { match *self { UnstableFeatures::Allow | UnstableFeatures::Cheat => true, _ => false, } } } fn maybe_stage_features(span_handler: &Handler, krate: &ast::Crate, unstable: UnstableFeatures) { let allow_features = match unstable { UnstableFeatures::Allow => true, UnstableFeatures::Disallow => false, UnstableFeatures::Cheat => true }; if !allow_features { for attr in &krate.attrs { if attr.check_name("feature") { let release_channel = option_env!("CFG_RELEASE_CHANNEL").unwrap_or("(unknown)"); span_err!(span_handler, attr.span, E0554, "#![feature] may not be used on the {} release channel", release_channel); } } } }
43.287014
100
0.526399
0a4409b810432ab3c2b82f704d22599d22adbf92
530
use serde::{Deserialize, Serialize}; #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ConfirmOrderResp { pub code: i64, pub message: String, pub ttl: i64, pub data: Option<Data>, } #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Data { pub state: String, #[serde(rename = "order_id")] pub order_id: String, #[serde(rename = "pay_data")] pub pay_data: String, }
25.238095
67
0.671698
08d80fc42aae26648b6c601816321a1ecb0c1a08
105
pub mod little_helpers; mod one; mod two; mod three; mod four; mod five; mod six; mod seven; mod eight;
9.545455
23
0.72381
3398854acf2283e75a617b4d267cc0072b6be436
1,421
struct Solution; impl Solution { fn min_window(s: String, t: String) -> String { let m = t.len(); let n = s.len(); let s: Vec<char> = s.chars().collect(); let t: Vec<char> = t.chars().collect(); let mut dp: Vec<Vec<usize>> = vec![vec![0; n + 1]; m + 1]; for j in 0..=n { dp[0][j] = j + 1; } for i in 1..=m { for j in 1..=n { if t[i - 1] == s[j - 1] { dp[i][j] = dp[i - 1][j - 1]; } else { dp[i][j] = dp[i][j - 1]; } } } let mut start = 0; let mut len = std::usize::MAX; for j in 1..=n { if dp[m][j] != 0 { if j - dp[m][j] + 1 < len { start = dp[m][j] - 1; len = j - dp[m][j] + 1; } } } if len == std::usize::MAX { "".to_string() } else { s[start..start + len].iter().collect() } } } #[test] fn test() { // let s = "abcdebdde".to_string(); // let t = "bde".to_string(); // let res = "bcde".to_string(); // assert_eq!(Solution::min_window(s, t), res); let s = "jmeqksfrsdcmsiwvaovztaqenprpvnbstl".to_string(); let t = "l".to_string(); let res = "l".to_string(); assert_eq!(Solution::min_window(s, t), res); }
27.862745
66
0.388459
1ad8f5b5c8ca691713fe44561e38e7b00abb2885
17,144
use std::{ collections::BTreeMap, convert::{TryFrom, TryInto}, io, }; use crate::{cid::Cid, ipld::kind::Node, Error, Result}; // TODO: https://github.com/cbor/test-vectors /// TAG ID for IPLD Content identifier, registered with IANA. pub const TAG_IPLD_CID: u64 = 42; /// Recursion limit for nested Cbor objects. pub const RECURSION_LIMIT: u32 = 1000; /// Cbor type, sole purpose is to correspond with [Basic] data-model. #[derive(Clone)] pub enum Cbor { Major0(Info, u64), // uint 0-23,24,25,26,27 Major1(Info, u64), // nint 0-23,24,25,26,27 Major2(Info, Vec<u8>), // byts 0-23,24,25,26,27,31 Major3(Info, Vec<u8>), // text 0-23,24,25,26,27,31 Major4(Info, Vec<Cbor>), // list 0-23,24,25,26,27,31 Major5(Info, BTreeMap<String, Cbor>), // dict 0-23,24,25,26,27,31 Major6(Info, Tag), // tags similar to major0 Major7(Info, SimpleValue), // type refer SimpleValue } impl TryFrom<&dyn Node> for Cbor { type Error = Error; fn try_from(node: &dyn Node) -> Result<Cbor> { use crate::ipld::kind::{Key, Kind::*}; use Cbor::*; let val: Cbor = match node.to_kind() { Null => Cbor::try_from(SimpleValue::Null)?, Bool => match node.to_bool().unwrap() { true => Cbor::try_from(SimpleValue::True)?, false => Cbor::try_from(SimpleValue::False)?, }, Integer => match node.to_integer().unwrap() { num if num >= 0 => { let num: u64 = err_at!(FailConvert, num.try_into())?; Major0(num.into(), num) } num => { let num: u64 = err_at!(FailConvert, u64::try_from(i128::abs(num)))? - 1; Major1(num.into(), num) } }, Float => Cbor::try_from(SimpleValue::F64(node.to_float().unwrap()))?, Bytes => { let byts = node.as_bytes().unwrap().to_vec(); let n: u64 = err_at!(FailConvert, byts.len().try_into())?; Major2(n.into(), byts) } Text => { let text = node.as_ffi_string().unwrap().as_bytes().to_vec(); let n: u64 = err_at!(FailConvert, text.len().try_into())?; Major3(n.into(), text) } Link => { let tag = Tag::Link(node.as_link().unwrap().clone()); Major6(u64::from(tag.clone()).into(), tag) } List => { let mut items = vec![]; for x in node.iter() { items.push(Cbor::try_from(x)?) } let n: u64 = err_at!(FailConvert, items.len().try_into())?; Major4(n.into(), items) } Map => { let mut map: BTreeMap<String, Cbor> = BTreeMap::new(); for (key, value) in node.iter_entries() { let key = match key { Key::Text(key) => Ok(key), _ => err_at!(FailConvert, msg: "invalid key type"), }?; let value = Cbor::try_from(value)?; map.insert(key, value); } let n: u64 = err_at!(FailConvert, map.len().try_into())?; Major5(n.into(), map) } }; Ok(val) } } impl Cbor { /// Serialize this cbor value. pub fn encode(&self, buf: &mut Vec<u8>) -> Result<usize> { self.do_encode(buf, 1) } fn do_encode(&self, buf: &mut Vec<u8>, depth: u32) -> Result<usize> { if depth > RECURSION_LIMIT { return err_at!(FailCbor, msg: "encode recursion limit exceeded"); } match self { Cbor::Major0(info, num) => { let n = encode_hdr(Major::M0, *info, buf)?; Ok(n + encode_addnl(*num, buf)?) } Cbor::Major1(info, num) => { let n = encode_hdr(Major::M1, *info, buf)?; Ok(n + encode_addnl(*num - 1, buf)?) } Cbor::Major2(info, byts) => { let n = encode_hdr(Major::M2, *info, buf)?; let m = encode_addnl(byts.len().try_into().unwrap(), buf)?; buf.copy_from_slice(&byts); Ok(n + m + byts.len()) } Cbor::Major3(info, text) => { let n = encode_hdr(Major::M3, *info, buf)?; let m = encode_addnl(text.len().try_into().unwrap(), buf)?; buf.copy_from_slice(text); Ok(n + m + text.len()) } Cbor::Major4(info, list) => { let n = encode_hdr(Major::M4, *info, buf)?; let m = encode_addnl(list.len().try_into().unwrap(), buf)?; let mut acc = 0; for x in list { acc += x.do_encode(buf, depth + 1)?; } Ok(n + m + acc) } Cbor::Major5(info, dict) => { let n = encode_hdr(Major::M5, *info, buf)?; let m = encode_addnl(dict.len().try_into().unwrap(), buf)?; let mut acc = 0; for (key, val) in dict.iter() { let info: Info = { let num: u64 = key.len().try_into().unwrap(); num.into() }; acc += Cbor::Major3(info, key.as_bytes().to_vec()).encode(buf)?; acc += val.do_encode(buf, depth + 1)?; } Ok(n + m + acc) } Cbor::Major6(info, tagg) => { let n = encode_hdr(Major::M6, *info, buf)?; let m = tagg.encode(buf)?; Ok(n + m) } Cbor::Major7(info, sval) => { let n = encode_hdr(Major::M7, *info, buf)?; let m = sval.encode(buf)?; Ok(n + m) } } } /// Deserialize a bytes from reader `r` to Cbor value. pub fn decode<R: io::Read>(r: &mut R) -> Result<Cbor> { Self::do_decode(r, 1) } fn do_decode<R: io::Read>(r: &mut R, depth: u32) -> Result<Cbor> { if depth > RECURSION_LIMIT { return err_at!(FailCbor, msg: "decode recursion limt exceeded"); } let (major, info) = decode_hdr(r)?; let val = match major { Major::M0 => Cbor::Major0(info, decode_addnl(info, r)?), Major::M1 => Cbor::Major1(info, decode_addnl(info, r)?), Major::M2 => { let n: usize = decode_addnl(info, r)?.try_into().unwrap(); let mut data = vec![0; n]; err_at!(IOError, r.read(&mut data))?; Cbor::Major2(info, data) } Major::M3 => { let n: usize = decode_addnl(info, r)?.try_into().unwrap(); let mut data = vec![0; n]; err_at!(IOError, r.read(&mut data))?; Cbor::Major3(info, data) } Major::M4 => { let mut list: Vec<Cbor> = vec![]; let n = decode_addnl(info, r)?; for _ in 0..n { list.push(Self::do_decode(r, depth + 1)?); } Cbor::Major4(info, list) } Major::M5 => { let mut dict: BTreeMap<String, Cbor> = BTreeMap::new(); let n = decode_addnl(info, r)?; for _ in 0..n { let key = extract_key(Self::decode(r)?)?; let val = Self::do_decode(r, depth + 1)?; dict.insert(key, val); } Cbor::Major5(info, dict) } Major::M6 => Cbor::Major6(info, Tag::decode(info, r)?), Major::M7 => Cbor::Major7(info, SimpleValue::decode(info, r)?), }; Ok(val) } } /// 3-bit value for major-type. #[derive(Copy, Clone)] pub enum Major { M0 = 0, M1, M2, M3, M4, M5, M6, M7, } impl TryFrom<u8> for Major { type Error = Error; fn try_from(b: u8) -> Result<Major> { let val = match b { 0 => Major::M0, 1 => Major::M1, 2 => Major::M2, 3 => Major::M3, 4 => Major::M4, 5 => Major::M5, 6 => Major::M6, 7 => Major::M7, _ => err_at!(Fatal, msg: "unreachable")?, }; Ok(val) } } /// 5-bit value for additional info. #[derive(Copy, Clone)] pub enum Info { Tiny(u8), // 0..=23 U8, U16, U32, U64, Reserved28, Reserved29, Reserved30, Indefinite, } impl TryFrom<u8> for Info { type Error = Error; fn try_from(b: u8) -> Result<Info> { let val = match b { 0..=23 => Info::Tiny(b), 24 => Info::U8, 25 => Info::U16, 26 => Info::U32, 27 => Info::U64, 28 => Info::Reserved28, 29 => Info::Reserved29, 30 => Info::Reserved30, 31 => Info::Indefinite, _ => err_at!(Fatal, msg: "unreachable")?, }; Ok(val) } } impl From<u64> for Info { fn from(num: u64) -> Info { match num { 0..=23 => Info::Tiny(num as u8), n if n <= (u8::MAX as u64) => Info::U8, n if n <= (u16::MAX as u64) => Info::U16, n if n <= (u32::MAX as u64) => Info::U32, _ => Info::U64, } } } fn encode_hdr(major: Major, info: Info, buf: &mut Vec<u8>) -> Result<usize> { let info = match info { Info::Tiny(val) if val <= 23 => val, Info::Tiny(val) => err_at!(FailCbor, msg: "{} > 23", val)?, Info::U8 => 24, Info::U16 => 25, Info::U32 => 26, Info::U64 => 27, Info::Reserved28 => 28, Info::Reserved29 => 29, Info::Reserved30 => 30, Info::Indefinite => 31, }; buf.push((major as u8) << 5 | info); Ok(1) } fn decode_hdr<R: io::Read>(r: &mut R) -> Result<(Major, Info)> { let mut scratch = [0_u8; 8]; err_at!(IOError, r.read(&mut scratch[..1]))?; let b = scratch[0]; let major = (b & 0xe0) >> 5; let info = b & 0x1f; Ok((major.try_into()?, info.try_into()?)) } fn encode_addnl(num: u64, buf: &mut Vec<u8>) -> Result<usize> { let mut scratch = [0_u8; 8]; let n = match num { 0..=23 => 0, n if n <= (u8::MAX as u64) => { scratch.copy_from_slice(&(n as u8).to_be_bytes()); 1 } n if n <= (u16::MAX as u64) => { scratch.copy_from_slice(&(n as u16).to_be_bytes()); 2 } n if n <= (u32::MAX as u64) => { scratch.copy_from_slice(&(n as u32).to_be_bytes()); 4 } n => { scratch.copy_from_slice(&n.to_be_bytes()); 8 } }; buf.copy_from_slice(&scratch[..n]); Ok(n) } fn decode_addnl<R: io::Read>(info: Info, r: &mut R) -> Result<u64> { let mut scratch = [0_u8; 8]; let n = match info { Info::Tiny(num) => num as u64, Info::U8 => { err_at!(IOError, r.read(&mut scratch[..1]))?; u8::from_be_bytes(scratch[..1].try_into().unwrap()) as u64 } Info::U16 => { err_at!(IOError, r.read(&mut scratch[..2]))?; u16::from_be_bytes(scratch[..2].try_into().unwrap()) as u64 } Info::U32 => { err_at!(IOError, r.read(&mut scratch[..4]))?; u32::from_be_bytes(scratch[..4].try_into().unwrap()) as u64 } Info::U64 => { err_at!(IOError, r.read(&mut scratch[..8]))?; u64::from_be_bytes(scratch[..8].try_into().unwrap()) as u64 } _ => err_at!(FailCbor, msg: "no additional value")?, }; Ok(n) } #[derive(Clone)] pub enum Tag { Link(Cid), // TAG_IPLD_CID } impl From<Tag> for u64 { fn from(tag: Tag) -> u64 { match tag { Tag::Link(_) => TAG_IPLD_CID, } } } impl Tag { fn encode(&self, buf: &mut Vec<u8>) -> Result<usize> { match self { Tag::Link(cid) => { buf.copy_from_slice(&TAG_IPLD_CID.to_be_bytes()); let n = { let data = cid.encode()?; let m: u64 = err_at!(FailCbor, data.len().try_into())?; Cbor::Major2(m.into(), data).encode(buf)? }; Ok(1 + n) } } } fn decode<R: io::Read>(info: Info, r: &mut R) -> Result<Tag> { match decode_addnl(info, r)? { 42 => match Cbor::decode(r)? { Cbor::Major2(_, bytes) => { let (cid, _) = Cid::decode(&bytes)?; Ok(Tag::Link(cid)) } _ => err_at!(FailCbor, msg: "invalid cid"), }, num => err_at!(FailCbor, msg: "invalid tag value {}", num), } } } #[derive(Copy, Clone)] pub enum SimpleValue { // 0..=19 unassigned Unassigned, True, // 20, tiny simple-value False, // 21, tiny simple-value Null, // 22, tiny simple-value Undefined, // 23, tiny simple-value Reserved24(u8), // 24, one-byte simple-value F16(u16), // 25, not-implemented F32(f32), // 26, single-precision float F64(f64), // 27, single-precision float // 28..=30 unassigned Break, // 31 // 32..=255 on-byte simple-value unassigned } impl TryFrom<SimpleValue> for Cbor { type Error = Error; fn try_from(sval: SimpleValue) -> Result<Cbor> { use SimpleValue::*; let val = match sval { Unassigned => err_at!(FailConvert, msg: "simple-value-unassigned")?, True => Cbor::Major7(Info::Tiny(20), sval), False => Cbor::Major7(Info::Tiny(21), sval), Null => Cbor::Major7(Info::Tiny(22), sval), Undefined => err_at!(FailConvert, msg: "simple-value-undefined")?, Reserved24(_) => err_at!(FailConvert, msg: "simple-value-unassigned1")?, F16(_) => err_at!(FailConvert, msg: "simple-value-f16")?, F32(_) => Cbor::Major7(Info::U32, sval), F64(_) => Cbor::Major7(Info::U64, sval), Break => err_at!(FailConvert, msg: "simple-value-break")?, }; Ok(val) } } impl SimpleValue { fn encode(&self, buf: &mut Vec<u8>) -> Result<usize> { use SimpleValue::*; let mut scratch = [0_u8; 8]; let n = match self { True | False | Null | Undefined | Break | Unassigned => 0, Reserved24(num) => { scratch[0] = *num; 1 } F16(f) => { scratch.copy_from_slice(&f.to_be_bytes()); 2 } F32(f) => { scratch.copy_from_slice(&f.to_be_bytes()); 4 } F64(f) => { scratch.copy_from_slice(&f.to_be_bytes()); 8 } }; buf.copy_from_slice(&scratch[..n]); Ok(n) } fn decode<R: io::Read>(info: Info, r: &mut R) -> Result<SimpleValue> { let mut scratch = [0_u8; 8]; let val = match info { Info::Tiny(20) => SimpleValue::True, Info::Tiny(21) => SimpleValue::False, Info::Tiny(22) => SimpleValue::Null, Info::Tiny(23) => err_at!(FailCbor, msg: "simple-value-undefined")?, Info::Tiny(_) => err_at!(FailCbor, msg: "simple-value-unassigned")?, Info::U8 => err_at!(FailCbor, msg: "simple-value-unassigned1")?, Info::U16 => err_at!(FailCbor, msg: "simple-value-f16")?, Info::U32 => { err_at!(IOError, r.read(&mut scratch[..4]))?; let val = f32::from_be_bytes(scratch[..4].try_into().unwrap()); SimpleValue::F32(val) } Info::U64 => { err_at!(IOError, r.read(&mut scratch[..8]))?; let val = f64::from_be_bytes(scratch[..8].try_into().unwrap()); SimpleValue::F64(val) } Info::Reserved28 => err_at!(FailCbor, msg: "simple-value-reserved")?, Info::Reserved29 => err_at!(FailCbor, msg: "simple-value-reserved")?, Info::Reserved30 => err_at!(FailCbor, msg: "simple-value-reserved")?, Info::Indefinite => err_at!(FailCbor, msg: "simple-value-break")?, }; Ok(val) } } fn extract_key(val: Cbor) -> Result<String> { match val { Cbor::Major3(_, s) => { let key = err_at!(FailConvert, std::str::from_utf8(&s))?; Ok(key.to_string()) } _ => err_at!(FailCbor, msg: "invalid key"), } }
32.90595
92
0.459694
28b88e7b25b7580663e369c2bf478367ffd5816f
254
#[macro_use] extern crate nickel; use nickel::Nickel; fn main() { let mut server = Nickel::new(); server.utilize(router! { get "**" => |_req, _res| { "Hello world!" } }); server.listen("127.0.0.1:6767"); }
15.875
36
0.511811
c12d00ccb8775076f75977ba4b4b28b586fbedb2
1,091
use super::vec3::{Point3, Vec3}; #[derive(Default)] pub struct Ray { pub orig: Point3, pub dir: Vec3, } impl Ray { pub fn new(origin: Point3, direction: Vec3) -> Ray { Ray { orig: origin, dir: direction, } } pub fn origin(&self) -> Point3 { self.orig } pub fn direction(&self) -> Vec3 { self.dir } pub fn at(&self, t: f64) -> Point3 { self.orig + t * self.dir } } #[test] fn origin_test() { let orig = Point3::new(1.0, 2.0, 3.0); let dir = Vec3::new(4.0, 5.0, 6.0); let r = Ray::new(orig, dir); assert_eq!(r.origin(), orig); } #[test] fn dir_test() { let orig = Point3::new(1.0, 2.0, 3.0); let dir = Vec3::new(4.0, 5.0, 6.0); let r = Ray::new(orig, dir); assert_eq!(r.direction(), dir); } #[test] fn at_test() { let orig = Point3::new(1.0, 2.0, 3.0); let dir = Vec3::new(4.0, 5.0, 6.0); let t = 2.0; let r = Ray::new(orig, dir); assert_eq!( r.at(t), Point3 { e: [9.0, 12.0, 15.0] } ); }
18.491525
56
0.483043
71b6253f58a66a1931f942aacc421669180ebb78
81
/// Gets the current terminal size pub fn get() -> Option<super::Size> { None }
16.2
37
0.654321
5d68f34afb29c1cec1f0561b341fb6372d08c24d
1,319
use x86_64::structures::gdt::{Descriptor, GlobalDescriptorTable, SegmentSelector}; use x86_64::structures::tss::TaskStateSegment; use x86_64::VirtAddr; use lazy_static::lazy_static; pub fn init() { GDT.0.load(); use x86_64::instructions::segmentation::set_cs; use x86_64::instructions::tables::load_tss; unsafe { set_cs(GDT.1.code_selector); load_tss(GDT.1.tss_selector); } } lazy_static! { static ref GDT:(GlobalDescriptorTable,Selectors)={ let mut gdt=GlobalDescriptorTable::new(); let code_selector=gdt.add_entry(Descriptor::kernel_code_segment()); let tss_selector=gdt.add_entry(Descriptor::tss_segment(&TSS)); (gdt,Selectors{code_selector,tss_selector}) }; } struct Selectors { code_selector: SegmentSelector, tss_selector: SegmentSelector, } pub const DOUBLE_FAULT_IST_INDEX: u16 = 0; lazy_static! { static ref TSS:TaskStateSegment={ let mut tss=TaskStateSegment::new(); tss.interrupt_stack_table[DOUBLE_FAULT_IST_INDEX as usize]={ const STACK_SIZE:usize=4096; static mut STACK:[u8;STACK_SIZE]=[0;STACK_SIZE]; let stack_start=VirtAddr::from_ptr(unsafe{&STACK}); let stack_end=stack_start+STACK_SIZE; stack_end }; tss }; }
28.06383
82
0.677786
e957253687b5fdddf384fb9b66401b24a772122d
3,053
use crate::extn::core::kernel; use crate::extn::core::kernel::require::RelativePath; use crate::extn::prelude::*; pub fn integer(interp: &mut Artichoke, mut arg: Value, base: Option<Value>) -> Result<Value, Error> { let base = base.and_then(|base| interp.convert(base)); // Safety: // // Extract the `Copy` radix integer first since implicit conversions can // trigger garbage collections. let base = interp.try_convert_mut(base)?; let arg = interp.try_convert_mut(&mut arg)?; let integer = kernel::integer::method(arg, base)?; Ok(interp.convert(integer)) } pub fn load(interp: &mut Artichoke, path: Value) -> Result<Value, Error> { let success = kernel::require::load(interp, path)?; Ok(interp.convert(bool::from(success))) } pub fn print<T>(interp: &mut Artichoke, args: T) -> Result<Value, Error> where T: IntoIterator<Item = Value>, { for value in args { let display = value.to_s(interp); interp.print(&display)?; } Ok(Value::nil()) } pub fn puts<T>(interp: &mut Artichoke, args: T) -> Result<Value, Error> where T: IntoIterator<Item = Value>, { fn puts_foreach(interp: &mut Artichoke, value: &Value) -> Result<(), Error> { // TODO(GH-310): Use `Value::implicitly_convert_to_array` when // implemented so `Value`s that respond to `to_ary` are converted // and iterated over. if let Ok(array) = value.try_convert_into_mut::<Vec<_>>(interp) { for value in &array { puts_foreach(interp, value)?; } } else { let display = value.to_s(interp); interp.puts(&display)?; } Ok(()) } let mut args = args.into_iter(); if let Some(first) = args.next() { puts_foreach(interp, &first)?; for value in args { puts_foreach(interp, &value)?; } } else { interp.print(b"\n")?; } Ok(Value::nil()) } pub fn p<T>(interp: &mut Artichoke, args: T) -> Result<Value, Error> where T: IntoIterator<Item = Value>, { let mut args = args.into_iter().peekable(); if let Some(first) = args.next() { let display = first.inspect(interp); interp.puts(&display)?; if args.peek().is_none() { return Ok(first); } let mut result = vec![first]; for value in args { let display = value.inspect(interp); interp.puts(&display)?; result.push(value); } interp.try_convert_mut(result) } else { Ok(Value::nil()) } } pub fn require(interp: &mut Artichoke, path: Value) -> Result<Value, Error> { let success = kernel::require::require(interp, path)?; Ok(interp.convert(bool::from(success))) } pub fn require_relative(interp: &mut Artichoke, path: Value) -> Result<Value, Error> { let relative_base = RelativePath::try_from_interp(interp)?; let success = kernel::require::require_relative(interp, path, relative_base)?; Ok(interp.convert(bool::from(success))) }
31.474227
101
0.603341
898e1d5ac7519889f019be7ff099eec2bc6e3ea9
110
fn main() { let q = requestty::questions![Input { when: todo!(), when: todo!(), }]; }
15.714286
41
0.445455
01a9de20418a6c845973644ac7e96f5ea4ce1f54
754
/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "TestNativeAcos.rs" #pragma rs_fp_relaxed // Don't edit this file! It is auto-generated by frameworks/rs/api/gen_runtime.
35.904762
80
0.742706
eb766aa26447d69a63ab4621e50b58f2c8d85ecd
787
//! JSON printer and parser which tries to follow //! [protobuf conventions](https://developers.google.com/protocol-buffers/docs/proto3#json) mod base64; mod float; mod json_name; mod parse; mod print; mod rfc_3339; mod well_known_wrapper; pub(crate) use self::json_name::json_name; pub use self::parse::merge_from_str; pub use self::parse::merge_from_str_with_options; pub use self::parse::parse_dynamic_from_str; pub use self::parse::parse_dynamic_from_str_with_options; pub use self::parse::parse_from_str; pub use self::parse::parse_from_str_with_options; pub use self::parse::ParseError; pub use self::parse::ParseOptions; pub use self::print::print_to_string; pub use self::print::print_to_string_with_options; pub use self::print::PrintError; pub use self::print::PrintOptions;
31.48
91
0.795426
f8da46943fd060afb28753e99bd9d9c43251f646
29,911
// Take a look at the license at the top of the repository in the LICENSE file. //! Module that contains the basic infrastructure for subclassing `GObject`. use crate::object::{Cast, ObjectSubclassIs, ObjectType}; use crate::translate::*; use crate::{Closure, Object, StaticType, Type, Value}; use std::marker; use std::mem; use std::ptr; use std::{any::Any, collections::HashMap}; use super::SignalId; /// A newly registered `glib::Type` that is currently still being initialized. /// /// This allows running additional type-setup functions. #[derive(Debug, PartialEq, Eq)] pub struct InitializingType<T>(pub(crate) Type, pub(crate) marker::PhantomData<*const T>); impl<T> ToGlib for InitializingType<T> { type GlibType = ffi::GType; fn to_glib(&self) -> ffi::GType { self.0.to_glib() } } /// Struct used for the instance private data of the GObject. struct PrivateStruct<T: ObjectSubclass> { imp: T, instance_data: Option<HashMap<Type, Box<dyn Any + Send + Sync>>>, } /// Trait implemented by structs that implement a `GObject` C instance struct. /// /// The struct must be `#[repr(C)]` and have the parent type's instance struct /// as the first field. /// /// See [`basic::InstanceStruct`] for a basic implementation of this that can /// be used most of the time and should only not be used if additional fields are /// required in the instance struct. /// /// [`basic::InstanceStruct`]: ../basic/struct.InstanceStruct.html pub unsafe trait InstanceStruct: Sized + 'static { /// Corresponding object subclass type for this instance struct. type Type: ObjectSubclass; /// Returns the implementation for from this instance struct, that /// is the implementor of [`ObjectImpl`] or subtraits. /// /// [`ObjectImpl`]: ../object/trait.ObjectImpl.html fn get_impl(&self) -> &Self::Type { unsafe { let data = Self::Type::type_data(); let private_offset = data.as_ref().get_impl_offset(); let ptr: *const u8 = self as *const _ as *const u8; let imp_ptr = ptr.offset(private_offset); let imp = imp_ptr as *const Self::Type; &*imp } } /// Returns the class struct for this specific instance. fn get_class(&self) -> &<Self::Type as ObjectSubclass>::Class { unsafe { &**(self as *const _ as *const *const <Self::Type as ObjectSubclass>::Class) } } /// Instance specific initialization. /// /// This is automatically called during instance initialization and must call `instance_init()` /// of the parent class. fn instance_init(&mut self) { unsafe { let obj = from_glib_borrow::<_, Object>(self as *mut _ as *mut gobject_ffi::GObject); let obj = Borrowed::new(obj.into_inner().unsafe_cast()); let mut obj = InitializingObject(obj); <<Self::Type as ObjectSubclass>::ParentType as IsSubclassable<Self::Type>>::instance_init( &mut obj, ); } } } /// Trait implemented by structs that implement a `GObject` C class struct. /// /// The struct must be `#[repr(C)]` and have the parent type's class struct /// as the first field. /// /// See [`basic::ClassStruct`] for a basic implementation of this that can /// be used most of the time and should only not be used if additional fields are /// required in the class struct, e.g. for declaring new virtual methods. /// /// [`basic::ClassStruct`]: ../basic/struct.ClassStruct.html pub unsafe trait ClassStruct: Sized + 'static { /// Corresponding object subclass type for this class struct. type Type: ObjectSubclass; /// Override the vfuncs of all parent types. /// /// This is automatically called during type initialization. fn class_init(&mut self) { unsafe { let base = &mut *(self as *mut _ as *mut crate::Class<<Self::Type as ObjectSubclass>::ParentType>); <<Self::Type as ObjectSubclass>::ParentType as IsSubclassable<Self::Type>>::class_init( base, ); } } } /// Trait for subclassable class structs. pub unsafe trait IsSubclassable<T: ObjectSubclass>: crate::object::IsClass { /// Override the virtual methods of this class for the given subclass and do other class /// initialization. /// /// This is automatically called during type initialization and must call `class_init()` of the /// parent class. fn class_init(class: &mut crate::Class<Self>); /// Instance specific initialization. /// /// This is automatically called during instance initialization and must call `instance_init()` /// of the parent class. fn instance_init(instance: &mut InitializingObject<T>); } /// Trait for implementable interfaces. pub unsafe trait IsImplementable<T: ObjectSubclass>: crate::object::IsInterface where <Self as ObjectType>::GlibClassType: Copy, { /// Override the virtual methods of this interface for the given subclass and do other /// interface initialization. /// /// This is automatically called during type initialization. fn interface_init(iface: &mut crate::Interface<Self>); /// Instance specific initialization. /// /// This is automatically called during instance initialization. fn instance_init(_instance: &mut InitializingObject<T>); } unsafe extern "C" fn interface_init<T: ObjectSubclass, A: IsImplementable<T>>( iface: ffi::gpointer, _iface_data: ffi::gpointer, ) where <A as ObjectType>::GlibClassType: Copy, { let iface = &mut *(iface as *mut crate::Interface<A>); let mut data = T::type_data(); if data.as_ref().parent_ifaces.is_none() { data.as_mut().parent_ifaces = Some(HashMap::new()); } { let copy = Box::new(*iface.as_ref()); data.as_mut() .parent_ifaces .as_mut() .unwrap() .insert(A::static_type(), Box::into_raw(copy) as ffi::gpointer); } A::interface_init(iface); } /// Trait for a type list of interfaces. pub trait InterfaceList<T: ObjectSubclass> { /// Returns the list of types and corresponding interface infos for this list. fn iface_infos() -> Vec<(ffi::GType, gobject_ffi::GInterfaceInfo)>; /// Runs `instance_init` on each of the `IsImplementable` items. fn instance_init(_instance: &mut InitializingObject<T>); } impl<T: ObjectSubclass> InterfaceList<T> for () { fn iface_infos() -> Vec<(ffi::GType, gobject_ffi::GInterfaceInfo)> { vec![] } fn instance_init(_instance: &mut InitializingObject<T>) {} } impl<T: ObjectSubclass, A: IsImplementable<T>> InterfaceList<T> for (A,) where <A as ObjectType>::GlibClassType: Copy, { fn iface_infos() -> Vec<(ffi::GType, gobject_ffi::GInterfaceInfo)> { vec![( A::static_type().to_glib(), gobject_ffi::GInterfaceInfo { interface_init: Some(interface_init::<T, A>), interface_finalize: None, interface_data: ptr::null_mut(), }, )] } fn instance_init(instance: &mut InitializingObject<T>) { A::instance_init(instance); } } // Generates all the InterfaceList impls for interface_lists of arbitrary sizes based on a list of type // parameters like A B C. It would generate the impl then for (A, B) and (A, B, C). macro_rules! interface_list_trait( ($name1:ident, $name2: ident, $($name:ident),*) => ( interface_list_trait!(__impl $name1, $name2; $($name),*); ); (__impl $($name:ident),+; $name1:ident, $($name2:ident),*) => ( interface_list_trait_impl!($($name),+); interface_list_trait!(__impl $($name),+ , $name1; $($name2),*); ); (__impl $($name:ident),+; $name1:ident) => ( interface_list_trait_impl!($($name),+); interface_list_trait_impl!($($name),+, $name1); ); ); // Generates the impl block for InterfaceList on interface_lists or arbitrary sizes based on its // arguments. Takes a list of type parameters as parameters, e.g. A B C // and then implements the trait on (A, B, C). macro_rules! interface_list_trait_impl( ($($name:ident),+) => ( impl<T: ObjectSubclass, $($name: IsImplementable<T>),+> InterfaceList<T> for ( $($name),+ ) where $(<$name as ObjectType>::GlibClassType: Copy),+ { fn iface_infos() -> Vec<(ffi::GType, gobject_ffi::GInterfaceInfo)> { vec![ $( ( $name::static_type().to_glib(), gobject_ffi::GInterfaceInfo { interface_init: Some(interface_init::<T, $name>), interface_finalize: None, interface_data: ptr::null_mut(), }, ) ),+ ] } fn instance_init(instance: &mut InitializingObject<T>) { $( $name::instance_init(instance); )+ } } ); ); interface_list_trait!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S); /// Type-specific data that is filled in during type creation. // FIXME: Once trait bounds other than `Sized` on const fn parameters are stable // the content of `TypeData` can be made private and we can add a `const fn new` // for initialization by the `object_subclass_internal!` macro. pub struct TypeData { #[doc(hidden)] pub type_: Type, #[doc(hidden)] pub parent_class: ffi::gpointer, #[doc(hidden)] pub parent_ifaces: Option<HashMap<Type, ffi::gpointer>>, #[doc(hidden)] pub class_data: Option<HashMap<Type, Box<dyn Any + Send + Sync>>>, #[doc(hidden)] pub private_offset: isize, #[doc(hidden)] pub private_imp_offset: isize, } unsafe impl Send for TypeData {} unsafe impl Sync for TypeData {} impl TypeData { /// Returns the type ID. pub fn get_type(&self) -> Type { self.type_ } /// Returns a pointer to the native parent class. /// /// This is used for chaining up to the parent class' implementation /// of virtual methods. pub fn get_parent_class(&self) -> ffi::gpointer { debug_assert!(!self.parent_class.is_null()); self.parent_class } /// Returns a pointer to the native parent interface struct for interface `type_`. /// /// This is used for chaining up to the parent interface's implementation /// of virtual methods. /// /// # Panics /// /// This function panics if the type to which the `TypeData` belongs does not implement the /// given interface or was not registered yet. pub fn get_parent_interface<I: crate::object::IsInterface>(&self) -> ffi::gpointer { match self.parent_ifaces { None => unreachable!("No parent interfaces"), Some(ref parent_ifaces) => *parent_ifaces .get(&I::static_type()) .expect("Parent interface not found"), } } /// Returns a pointer to the class implementation specific data. /// /// This is used for class implementations to store additional data. pub fn get_class_data<T: Any + Send + Sync + 'static>(&self, type_: Type) -> Option<&T> { match self.class_data { None => None, Some(ref data) => data.get(&type_).and_then(|ptr| ptr.downcast_ref()), } } /// Gets a mutable reference of the class implementation specific data. /// /// # Safety /// /// This can only be used while the type is being initialized. pub unsafe fn get_class_data_mut<T: Any + Send + Sync + 'static>( &mut self, type_: Type, ) -> Option<&mut T> { match self.class_data { None => None, Some(ref mut data) => data.get_mut(&type_).and_then(|v| v.downcast_mut()), } } /// Sets class specific implementation data. /// /// # Safety /// /// This can only be used while the type is being initialized. /// /// # Panics /// /// If the class_data already contains a data for the specified `type_`. pub unsafe fn set_class_data<T: Any + Send + Sync + 'static>(&mut self, type_: Type, data: T) { if self.class_data.is_none() { self.class_data = Some(HashMap::new()); } if let Some(ref mut class_data) = self.class_data { if class_data.get(&type_).is_some() { panic!("The class_data already contains a key for {}", type_); } class_data.insert(type_, Box::new(data)); } } /// Returns the offset of the private implementation struct in bytes relative to the beginning /// of the instance struct. pub fn get_impl_offset(&self) -> isize { self.private_offset + self.private_imp_offset } } /// Type methods required for an [`ObjectSubclass`] implementation. /// /// This is usually generated by the [`#[object_subclass]`](crate::object_subclass) attribute macro. pub unsafe trait ObjectSubclassType { /// Storage for the type-specific data used during registration. fn type_data() -> ptr::NonNull<TypeData>; /// Returns the `glib::Type` ID of the subclass. /// /// This will register the type with the type system on the first call. fn get_type() -> Type; } /// The central trait for subclassing a `GObject` type. /// /// Links together the type name, parent type and the instance and /// class structs for type registration and allows subclasses to /// hook into various steps of the type registration and initialization. /// /// See [`register_type`] for registering an implementation of this trait /// with the type system. /// /// [`register_type`]: fn.register_type.html pub trait ObjectSubclass: ObjectSubclassType + Sized + 'static { /// `GObject` type name. /// /// This must be unique in the whole process. const NAME: &'static str; /// If this subclass is an abstract class or not. /// /// By default all subclasses are non-abstract types but setting this to `true` will create an /// abstract class instead. /// /// Abstract classes can't be instantiated and require a non-abstract subclass. /// /// Optional. const ABSTRACT: bool = false; /// Wrapper around this subclass defined with `wrapper!` type Type: ObjectType + ObjectSubclassIs<Subclass = Self> + FromGlibPtrFull<*mut <Self::Type as ObjectType>::GlibType> + FromGlibPtrBorrow<*mut <Self::Type as ObjectType>::GlibType> + FromGlibPtrNone<*mut <Self::Type as ObjectType>::GlibType>; /// Parent Rust type to inherit from. type ParentType: IsSubclassable<Self> + FromGlibPtrFull<*mut <Self::ParentType as ObjectType>::GlibType> + FromGlibPtrBorrow<*mut <Self::ParentType as ObjectType>::GlibType> + FromGlibPtrNone<*mut <Self::ParentType as ObjectType>::GlibType>; /// List of interfaces implemented by this type. type Interfaces: InterfaceList<Self>; /// The C instance struct. /// /// See [`basic::InstanceStruct`] for an basic instance struct that should be /// used in most cases. /// /// [`basic::InstanceStruct`]: ../basic/struct.InstanceStruct.html // TODO: Should default to basic::InstanceStruct<Self> once associated // type defaults are stabilized https://github.com/rust-lang/rust/issues/29661 type Instance: InstanceStruct<Type = Self>; /// The C class struct. /// /// See [`basic::ClassStruct`] for an basic instance struct that should be /// used in most cases. /// /// [`basic::ClassStruct`]: ../basic/struct.ClassStruct.html // TODO: Should default to basic::ClassStruct<Self> once associated // type defaults are stabilized https://github.com/rust-lang/rust/issues/29661 type Class: ClassStruct<Type = Self>; /// Additional type initialization. /// /// This is called right after the type was registered and allows /// subclasses to do additional type-specific initialization, e.g. /// for implementing `GObject` interfaces. /// /// Optional fn type_init(_type_: &mut InitializingType<Self>) {} /// Class initialization. /// /// This is called after `type_init` and before the first instance /// of the subclass is created. Subclasses can use this to do class- /// specific initialization, e.g. for registering signals on the class /// or calling class methods. /// /// Optional fn class_init(_klass: &mut Self::Class) {} /// Constructor. /// /// This is called during object instantiation before further subclasses /// are initialized, and should return a new instance of the subclass /// private struct. /// /// Optional, either implement this or `with_class()`. fn new() -> Self { unimplemented!(); } /// Constructor. /// /// This is called during object instantiation before further subclasses /// are initialized, and should return a new instance of the subclass /// private struct. /// /// Different to `new()` above it also gets the class of this type passed /// to itself for providing additional context. /// /// Optional, either implement this or `new()`. fn with_class(_klass: &Self::Class) -> Self { Self::new() } /// Performs additional instance initialization. /// /// Called just after `with_class()`. At this point the initialization has not completed yet, so /// only a limited set of operations is safe (see `InitializingObject`). fn instance_init(_obj: &InitializingObject<Self>) {} } /// Extension methods for all `ObjectSubclass` impls. pub trait ObjectSubclassExt: ObjectSubclass { /// Returns the corresponding object instance. fn get_instance(&self) -> Self::Type; /// Returns the implementation from an instance. fn from_instance(obj: &Self::Type) -> &Self; /// Returns a pointer to the instance implementation specific data. /// /// This is used for the subclassing infrastructure to store additional instance data. fn get_instance_data<U: Any + Send + Sync + 'static>(&self, type_: Type) -> Option<&U>; } impl<T: ObjectSubclass> ObjectSubclassExt for T { fn get_instance(&self) -> Self::Type { unsafe { let data = Self::type_data(); let type_ = data.as_ref().get_type(); assert!(type_.is_valid()); let offset = -data.as_ref().get_impl_offset(); let ptr = self as *const Self as *const u8; let ptr = ptr.offset(offset); let ptr = ptr as *mut u8 as *mut <Self::Type as ObjectType>::GlibType; // The object might just be finalized, and in that case it's unsafe to access // it and use any API on it. This can only happen from inside the Drop impl // of Self. assert_ne!((*(ptr as *mut gobject_ffi::GObject)).ref_count, 0); // Don't steal floating reference here via from_glib_none() but // preserve it if needed by reffing manually. gobject_ffi::g_object_ref(ptr as *mut gobject_ffi::GObject); from_glib_full(ptr) } } fn from_instance(obj: &Self::Type) -> &Self { unsafe { let ptr = obj.as_ptr() as *const Self::Instance; (*ptr).get_impl() } } /// Returns a pointer to the instance implementation specific data. /// /// This is used for the subclassing infrastructure to store additional instance data. fn get_instance_data<U: Any + Send + Sync + 'static>(&self, type_: Type) -> Option<&U> { unsafe { let type_data = Self::type_data(); let self_type_ = type_data.as_ref().get_type(); assert!(self_type_.is_valid()); let offset = -type_data.as_ref().private_imp_offset; let ptr = self as *const Self as *const u8; let ptr = ptr.offset(offset); let ptr = ptr as *const PrivateStruct<Self>; let priv_ = &*ptr; match priv_.instance_data { None => None, Some(ref data) => data.get(&type_).and_then(|ptr| ptr.downcast_ref()), } } } } /// An object that is currently being initialized. /// /// Binding crates should use traits for adding methods to this struct. Only methods explicitly safe /// to call during `instance_init()` should be added. pub struct InitializingObject<T: ObjectSubclass>(Borrowed<T::Type>); impl<T: ObjectSubclass> InitializingObject<T> { /// Returns a reference to the object. /// /// # Safety /// /// The returned object has not been completely initialized at this point. Use of the object /// should be restricted to methods that are explicitly documented to be safe to call during /// `instance_init()`. pub unsafe fn as_ref(&self) -> &T::Type { &self.0 } /// Returns a pointer to the object. /// /// # Safety /// /// The returned object has not been completely initialized at this point. Use of the object /// should be restricted to methods that are explicitly documented to be safe to call during /// `instance_init()`. pub unsafe fn as_ptr(&self) -> *mut T::Type { self.0.as_ptr() as *const T::Type as *mut T::Type } /// Sets instance specific implementation data. /// /// # Panics /// /// If the instance_data already contains a data for the specified `type_`. pub fn set_instance_data<U: Any + Send + Sync + 'static>(&mut self, type_: Type, data: U) { unsafe { let type_data = T::type_data(); let self_type_ = type_data.as_ref().get_type(); assert!(self_type_.is_valid()); let offset = type_data.as_ref().private_offset; let ptr = self.0.as_ptr() as *mut u8; let ptr = ptr.offset(offset); let ptr = ptr as *mut PrivateStruct<T>; let priv_ = &mut *ptr; if priv_.instance_data.is_none() { priv_.instance_data = Some(HashMap::new()); } if let Some(ref mut instance_data) = priv_.instance_data { if instance_data.get(&type_).is_some() { panic!("The class_data already contains a key for {}", type_); } instance_data.insert(type_, Box::new(data)); } } } } unsafe extern "C" fn class_init<T: ObjectSubclass>( klass: ffi::gpointer, _klass_data: ffi::gpointer, ) { let mut data = T::type_data(); // We have to update the private struct offset once the class is actually // being initialized. let mut private_offset = data.as_ref().private_offset as i32; gobject_ffi::g_type_class_adjust_private_offset(klass, &mut private_offset); (*data.as_mut()).private_offset = private_offset as isize; // Set trampolines for the basic GObject virtual methods. { let gobject_klass = &mut *(klass as *mut gobject_ffi::GObjectClass); gobject_klass.finalize = Some(finalize::<T>); } // And finally peek the parent class struct (containing the parent class' // implementations of virtual methods for chaining up), and call the subclass' // class initialization function. { let klass = &mut *(klass as *mut T::Class); let parent_class = gobject_ffi::g_type_class_peek_parent(klass as *mut _ as ffi::gpointer) as *mut <T::ParentType as ObjectType>::GlibClassType; assert!(!parent_class.is_null()); (*data.as_mut()).parent_class = parent_class as ffi::gpointer; klass.class_init(); T::class_init(klass); } } unsafe extern "C" fn instance_init<T: ObjectSubclass>( obj: *mut gobject_ffi::GTypeInstance, klass: ffi::gpointer, ) { // Get offset to the storage of our private struct, create it // and actually store it in that place. let mut data = T::type_data(); let private_offset = (*data.as_mut()).private_offset; let ptr = obj as *mut u8; let priv_ptr = ptr.offset(private_offset); let priv_storage = priv_ptr as *mut PrivateStruct<T>; let klass = &*(klass as *const T::Class); let imp = T::with_class(klass); ptr::write( priv_storage, PrivateStruct { imp, instance_data: None, }, ); // Any additional instance initialization. T::Instance::instance_init(&mut *(obj as *mut _)); let obj = from_glib_borrow::<_, Object>(obj.cast()); let obj = Borrowed::new(obj.into_inner().unsafe_cast()); let mut obj = InitializingObject(obj); T::Interfaces::instance_init(&mut obj); T::instance_init(&obj); } unsafe extern "C" fn finalize<T: ObjectSubclass>(obj: *mut gobject_ffi::GObject) { // Retrieve the private struct and drop it for freeing all associated memory. let mut data = T::type_data(); let private_offset = (*data.as_mut()).private_offset; let ptr = obj as *mut u8; let priv_ptr = ptr.offset(private_offset); let priv_storage = &mut *(priv_ptr as *mut PrivateStruct<T>); ptr::drop_in_place(&mut priv_storage.imp); if let Some(instance_data) = priv_storage.instance_data.take() { drop(instance_data); } // Chain up to the parent class' finalize implementation, if any. let parent_class = &*(data.as_ref().get_parent_class() as *const gobject_ffi::GObjectClass); if let Some(ref func) = parent_class.finalize { func(obj); } } /// Register a `glib::Type` ID for `T`. /// /// This must be called only once and will panic on a second call. /// /// The [`object_subclass!`] macro will create a `get_type()` function around this, which will /// ensure that it's only ever called once. /// /// [`object_subclass!`]: ../../macro.object_subclass.html pub fn register_type<T: ObjectSubclass>() -> Type { // GLib aligns the type private data to two gsizes so we can't safely store any type there that // requires a bigger alignment. if mem::align_of::<T>() > 2 * mem::size_of::<usize>() { panic!( "Alignment {} of type not supported, bigger than {}", mem::align_of::<T>(), 2 * mem::size_of::<usize>(), ); } unsafe { use std::ffi::CString; let type_name = CString::new(T::NAME).unwrap(); if gobject_ffi::g_type_from_name(type_name.as_ptr()) != gobject_ffi::G_TYPE_INVALID { panic!( "Type {} has already been registered", type_name.to_str().unwrap() ); } let type_ = from_glib(gobject_ffi::g_type_register_static_simple( <T::ParentType as StaticType>::static_type().to_glib(), type_name.as_ptr(), mem::size_of::<T::Class>() as u32, Some(class_init::<T>), mem::size_of::<T::Instance>() as u32, Some(instance_init::<T>), if T::ABSTRACT { gobject_ffi::G_TYPE_FLAG_ABSTRACT } else { 0 }, )); let mut data = T::type_data(); (*data.as_mut()).type_ = type_; let private_offset = gobject_ffi::g_type_add_instance_private( type_.to_glib(), mem::size_of::<PrivateStruct<T>>(), ); (*data.as_mut()).private_offset = private_offset as isize; // Get the offset from PrivateStruct<T> to the imp field in it. This has to go through // some hoops because Rust doesn't have an offsetof operator yet. (*data.as_mut()).private_imp_offset = { // Must not be a dangling pointer so let's create some uninitialized memory let priv_ = std::mem::MaybeUninit::<PrivateStruct<T>>::uninit(); let ptr = priv_.as_ptr(); // FIXME: Technically UB but we'd need std::ptr::raw_const for this let imp_ptr = &(*ptr).imp as *const _ as *const u8; let ptr = ptr as *const u8; imp_ptr as isize - ptr as isize }; let iface_types = T::Interfaces::iface_infos(); for (iface_type, iface_info) in iface_types { gobject_ffi::g_type_add_interface_static(type_.to_glib(), iface_type, &iface_info); } T::type_init(&mut InitializingType::<T>(type_, marker::PhantomData)); type_ } } pub(crate) unsafe fn signal_override_class_handler<F>( name: &str, type_: ffi::GType, class_handler: F, ) where F: Fn(&super::SignalClassHandlerToken, &[Value]) -> Option<Value> + Send + Sync + 'static, { let class_handler = Closure::new(move |values| { let instance = gobject_ffi::g_value_get_object(values[0].to_glib_none().0); class_handler(&super::SignalClassHandlerToken(instance as *mut _), values) }); if let Some((signal_id, _)) = SignalId::parse_name(name, from_glib(type_), false) { gobject_ffi::g_signal_override_class_closure( signal_id.to_glib(), type_, class_handler.to_glib_none().0, ); } else { panic!("Signal '{}' not found", name); } } pub(crate) unsafe fn signal_chain_from_overridden( instance: *mut gobject_ffi::GTypeInstance, token: &super::SignalClassHandlerToken, values: &[Value], ) -> Option<Value> { assert_eq!(instance, token.0); let mut result = Value::uninitialized(); gobject_ffi::g_signal_chain_from_overridden( values.as_ptr() as *mut Value as *mut gobject_ffi::GValue, result.to_glib_none_mut().0, ); Some(result).filter(|r| r.type_().is_valid() && r.type_() != Type::UNIT) }
36.124396
103
0.621544
71361aa937bf4bb00a27558a22d0f148f707edf6
3,563
// Copyright 2018 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. extern crate env_logger; extern crate grin_chain as chain; extern crate grin_core as core; extern crate grin_keychain as keychain; extern crate grin_store as store; extern crate grin_wallet as wallet; extern crate rand; use std::fs; use std::sync::Arc; use chain::Tip; use core::core::hash::Hashed; use core::core::{Block, BlockHeader}; use core::global::{self, ChainTypes}; use core::pow::{self, Difficulty}; use keychain::{ExtKeychain, Keychain}; use wallet::libtx; fn clean_output_dir(dir_name: &str) { let _ = fs::remove_dir_all(dir_name); } #[test] fn test_various_store_indices() { match env_logger::try_init() { Ok(_) => println!("Initializing env logger"), Err(e) => println!("env logger already initialized: {:?}", e), }; let chain_dir = ".grin_idx_1"; clean_output_dir(chain_dir); let keychain = ExtKeychain::from_random_seed().unwrap(); let key_id = keychain.derive_key_id(1).unwrap(); let db_env = Arc::new(store::new_env(chain_dir.to_string())); let chain_store = chain::store::ChainStore::new(db_env).unwrap(); global::set_mining_mode(ChainTypes::AutomatedTesting); let genesis = pow::mine_genesis_block().unwrap(); let reward = libtx::reward::output(&keychain, &key_id, 0, 1).unwrap(); let block = Block::new(&genesis.header, vec![], Difficulty::one(), reward).unwrap(); let block_hash = block.hash(); { let batch = chain_store.batch().unwrap(); batch.save_block(&genesis).unwrap(); batch .setup_height(&genesis.header, &Tip::new(genesis.hash())) .unwrap(); batch.commit().unwrap(); } { let batch = chain_store.batch().unwrap(); batch.save_block(&block).unwrap(); batch .setup_height(&block.header, &Tip::from_block(&block.header)) .unwrap(); batch.commit().unwrap(); } let block_header = chain_store.get_block_header(&block_hash).unwrap(); assert_eq!(block_header.hash(), block_hash); let block_header = chain_store.get_header_by_height(1).unwrap(); assert_eq!(block_header.hash(), block_hash); } #[test] fn test_store_header_height() { match env_logger::try_init() { Ok(_) => println!("Initializing env logger"), Err(e) => println!("env logger already initialized: {:?}", e), }; let chain_dir = ".grin_idx_2"; clean_output_dir(chain_dir); let db_env = Arc::new(store::new_env(chain_dir.to_string())); let chain_store = chain::store::ChainStore::new(db_env).unwrap(); let mut block_header = BlockHeader::default(); block_header.height = 1; { let batch = chain_store.batch().unwrap(); batch.save_block_header(&block_header).unwrap(); batch.save_header_height(&block_header).unwrap(); batch.commit().unwrap(); } let stored_block_header = chain_store.get_header_by_height(1).unwrap(); assert_eq!(block_header.hash(), stored_block_header.hash()); { let batch = chain_store.batch().unwrap(); batch.delete_header_by_height(1).unwrap(); batch.commit().unwrap(); } let result = chain_store.get_header_by_height(1); assert_eq!(result.is_err(), true); }
30.194915
85
0.718776
8ab03ea8066ca1756ae3c294e17483a56f82535a
2,011
use gtk; use gtk::prelude::*; pub struct MenuBar { widget: gtk::MenuBar, } impl MenuBar { pub fn new<F: Fn(Vec<PathBuf>) + 'static>(parent: &gtk::Window, on_files: F) -> MenuBar { let menubar = gtk::MenuBar::new(); let menu_file = { let menu = gtk::Menu::new(); let item = menu_item("_File", |_| ()); item.set_submenu(Some(&menu)); let parent = parent.clone(); let item_open = menu_item("_Add files", move |_| on_files(prompt_files_add(&parent))); menu.add(&item_open); item }; menubar.append(&menu_file); return MenuBar { widget: menubar }; } pub fn get_gtk_menubar(&self) -> &gtk::MenuBar { &self.widget } } use std::path::PathBuf; pub fn menu_item<F: Fn(&gtk::MenuItem) + 'static>(label: &'static str, action: F) -> gtk::MenuItem { let menuitem = gtk::MenuItem::new_with_mnemonic(label); menuitem.connect_activate(action); menuitem } pub fn prompt_files_add(parent: &gtk::Window) -> Vec<PathBuf> { let response_ok: i32 = gtk::ResponseType::Ok.into(); let response_cancel: i32 = gtk::ResponseType::Cancel.into(); let response_delete_event: i32 = gtk::ResponseType::DeleteEvent.into(); let dialog = gtk::FileChooserDialog::new(None, Some(parent), gtk::FileChooserAction::Open); dialog.set_local_only(true); dialog.set_select_multiple(true); dialog.add_button("_Cancel", response_cancel); dialog.add_button("_Add", response_ok); dialog.set_default_response(response_ok); let retval = match dialog.run() { n if n == response_ok => { let filename = dialog.get_filenames(); dialog.destroy(); filename } n if n == response_cancel => { dialog.destroy(); vec![] } n if n == response_delete_event => vec![], n => panic!("Unexpected response type in prompt_directory {}", n), }; retval }
30.014925
100
0.596718
db4c56e5f9f956085ec75485d8375bc740e106fe
33,751
#[cfg(not(feature = "std"))] use alloc::{string::String, vec::Vec}; #[cfg(not(feature = "std"))] use core::{convert, fmt, iter, ops, str}; #[cfg(feature = "std")] use std::{convert, fmt, iter, ops, str}; /// Size of an account address when serialized in binary. /// NB: This is different from the Base58 representation. pub const ACCOUNT_ADDRESS_SIZE: usize = 32; #[cfg(feature = "derive-serde")] use serde::{Deserialize as SerdeDeserialize, Serialize as SerdeSerialize}; /// The type of amounts on the chain #[repr(transparent)] #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] pub struct Amount { pub micro_gtu: u64, } #[cfg(feature = "derive-serde")] impl SerdeSerialize for Amount { fn serialize<S: serde::Serializer>(&self, ser: S) -> Result<S::Ok, S::Error> { ser.serialize_str(&self.micro_gtu.to_string()) } } #[cfg(feature = "derive-serde")] impl<'de> SerdeDeserialize<'de> for Amount { fn deserialize<D: serde::de::Deserializer<'de>>(des: D) -> Result<Self, D::Error> { let s = String::deserialize(des)?; let micro_gtu = s.parse::<u64>().map_err(|e| serde::de::Error::custom(format!("{}", e)))?; Ok(Amount { micro_gtu, }) } } #[derive(Clone, Copy, Debug, Eq, PartialEq)] /// An error indicating why parsing of an amount failed. /// Since amount parsing is typically a user-facing activity /// this is fairly precise, so we can notify the user why we failed, and what /// they can do to fix it. pub enum AmountParseError { Overflow, ExpectedDot, ExpectedDigit, ExpectedMore, ExpectedDigitOrDot, AtMostSixDecimals, } impl fmt::Display for AmountParseError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use AmountParseError::*; match self { Overflow => write!(f, "Amount overflow."), ExpectedDot => write!(f, "Expected dot."), ExpectedDigit => write!(f, "Expected digit."), ExpectedMore => write!(f, "Expected more input."), ExpectedDigitOrDot => write!(f, "Expected digit or dot."), AtMostSixDecimals => write!(f, "Amounts can have at most six decimals."), } } } /// Parse from string in GTU units. The input string must be of the form /// `n[.m]` where `n` and `m` are both digits. The notation `[.m]` indicates /// that that part is optional. /// /// - if `n` starts with 0 then it must be 0l /// - `m` can have at most 6 digits, and must have at least 1 /// - both `n` and `m` must be non-negative. impl str::FromStr for Amount { type Err = AmountParseError; fn from_str(v: &str) -> Result<Self, Self::Err> { let mut micro_gtu: u64 = 0; let mut after_dot = 0; let mut state = 0; for c in v.chars() { match state { 0 => { // looking at the first character. if let Some(d) = c.to_digit(10) { if d == 0 { state = 1; } else { micro_gtu = u64::from(d); state = 2; } } else { return Err(AmountParseError::ExpectedDigit); } } 1 => { // we want to be looking at a dot now (unless we reached the end, in which case // this is not reachable anyhow) if c != '.' { return Err(AmountParseError::ExpectedDot); } else { state = 3; } } 2 => { // we are reading a normal number until we hit the dot. if let Some(d) = c.to_digit(10) { micro_gtu = micro_gtu.checked_mul(10).ok_or(AmountParseError::Overflow)?; micro_gtu = micro_gtu .checked_add(u64::from(d)) .ok_or(AmountParseError::Overflow)?; } else if c == '.' { state = 3; } else { return Err(AmountParseError::ExpectedDigitOrDot); } } 3 => { // we're reading after the dot. if after_dot >= 6 { return Err(AmountParseError::AtMostSixDecimals); } if let Some(d) = c.to_digit(10) { micro_gtu = micro_gtu.checked_mul(10).ok_or(AmountParseError::Overflow)?; micro_gtu = micro_gtu .checked_add(u64::from(d)) .ok_or(AmountParseError::Overflow)?; after_dot += 1; } else { return Err(AmountParseError::ExpectedDigit); } } _ => unreachable!(), } } if state == 0 || state >= 3 && after_dot == 0 { return Err(AmountParseError::ExpectedMore); } for _ in 0..6 - after_dot { micro_gtu = micro_gtu.checked_mul(10).ok_or(AmountParseError::Overflow)?; } Ok(Amount { micro_gtu, }) } } impl Amount { /// Create amount from a number of microGTU #[inline(always)] pub fn from_micro_gtu(micro_gtu: u64) -> Amount { Amount { micro_gtu, } } /// Create amount from a number of GTU #[inline(always)] pub fn from_gtu(gtu: u64) -> Amount { Amount { micro_gtu: gtu * 1000000, } } /// Create zero amount #[inline(always)] pub fn zero() -> Amount { Amount { micro_gtu: 0, } } /// Add a number of micro GTU to an amount #[inline(always)] pub fn add_micro_gtu(self, micro_gtu: u64) -> Amount { Amount { micro_gtu: self.micro_gtu + micro_gtu, } } /// Checked addition. Adds another amount and return None if overflow /// occurred #[inline(always)] pub fn checked_add(self, other: Amount) -> Option<Amount> { self.micro_gtu.checked_add(other.micro_gtu).map(Amount::from_micro_gtu) } /// Add a number of GTU to an amount #[inline(always)] pub fn add_gtu(self, gtu: u64) -> Amount { Amount { micro_gtu: self.micro_gtu + gtu * 1000000, } } /// Subtract a number of micro GTU to an amount #[inline(always)] pub fn subtract_micro_gtu(self, micro_gtu: u64) -> Amount { Amount { micro_gtu: self.micro_gtu - micro_gtu, } } /// Subtract a number of GTU to an amount #[inline(always)] pub fn subtract_gtu(self, gtu: u64) -> Amount { Amount { micro_gtu: self.micro_gtu - gtu * 1000000, } } /// Calculates the quotient and remainder of integer division #[inline(always)] pub fn quotient_remainder(self, denominator: u64) -> (Amount, Amount) { let div = Amount { micro_gtu: self.micro_gtu / denominator, }; let rem = self % denominator; (div, rem) } } impl ops::Mul<u64> for Amount { type Output = Self; #[inline(always)] fn mul(self, other: u64) -> Self::Output { Amount { micro_gtu: self.micro_gtu * other, } } } impl ops::Mul<Amount> for u64 { type Output = Amount; #[inline(always)] fn mul(self, other: Amount) -> Self::Output { Amount { micro_gtu: self * other.micro_gtu, } } } impl ops::Add<Amount> for Amount { type Output = Self; #[inline(always)] fn add(self, other: Amount) -> Self::Output { Amount { micro_gtu: self.micro_gtu + other.micro_gtu, } } } impl ops::Sub<Amount> for Amount { type Output = Self; #[inline(always)] fn sub(self, other: Amount) -> Self::Output { Amount { micro_gtu: self.micro_gtu - other.micro_gtu, } } } impl ops::Rem<u64> for Amount { type Output = Self; #[inline(always)] fn rem(self, other: u64) -> Self::Output { Amount { micro_gtu: self.micro_gtu % other, } } } impl iter::Sum for Amount { fn sum<I: Iterator<Item = Self>>(iter: I) -> Self { iter.fold(Amount::from_micro_gtu(0), ops::Add::add) } } impl ops::AddAssign for Amount { #[inline(always)] fn add_assign(&mut self, other: Amount) { *self = *self + other; } } impl ops::SubAssign for Amount { #[inline(always)] fn sub_assign(&mut self, other: Amount) { *self = *self - other; } } impl ops::MulAssign<u64> for Amount { #[inline(always)] fn mul_assign(&mut self, other: u64) { *self = *self * other; } } impl ops::RemAssign<u64> for Amount { #[inline(always)] fn rem_assign(&mut self, other: u64) { *self = *self % other; } } /// Timestamp represented as milliseconds since unix epoch. /// /// Timestamps from before January 1st 1970 at 00:00 are not supported. #[repr(transparent)] #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] pub struct Timestamp { /// Milliseconds since unix epoch. pub(crate) milliseconds: u64, } impl Timestamp { /// Construct timestamp from milliseconds since unix epoch. #[inline(always)] pub fn from_timestamp_millis(milliseconds: u64) -> Self { Self { milliseconds, } } /// Milliseconds since the UNIX epoch. #[inline(always)] pub fn timestamp_millis(&self) -> u64 { self.milliseconds } /// Add duration to the timestamp. Returns `None` if the resulting timestamp /// is not representable, i.e., too far in the future. #[inline(always)] pub fn checked_add(self, duration: Duration) -> Option<Self> { self.milliseconds.checked_add(duration.milliseconds).map(Self::from_timestamp_millis) } /// Subtract duration from the timestamp. Returns `None` instead of /// overflowing if the resulting timestamp would be before the Unix /// epoch. #[inline(always)] pub fn checked_sub(self, duration: Duration) -> Option<Self> { self.milliseconds.checked_sub(duration.milliseconds).map(Self::from_timestamp_millis) } /// Compute the duration between the self and another timestamp. /// The duration is always positive, and is the difference between /// the the more recent timestamp and the one further in the past. #[inline(always)] pub fn duration_between(self, other: Timestamp) -> Duration { let millis = if self >= other { self.milliseconds - other.milliseconds } else { other.milliseconds - self.milliseconds }; Duration::from_millis(millis) } /// Compute duration since a given timestamp. Returns `None` if given time /// is in the future compared to self. #[inline(always)] pub fn duration_since(self, before: Timestamp) -> Option<Duration> { self.milliseconds.checked_sub(before.milliseconds).map(Duration::from_millis) } } #[cfg(feature = "derive-serde")] #[derive(Clone, Debug, Eq, PartialEq)] pub enum ParseTimestampError { ParseError(chrono::format::ParseError), BeforeUnixEpoch, } #[cfg(feature = "derive-serde")] impl fmt::Display for ParseTimestampError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use ParseTimestampError::*; match self { ParseError(err) => err.fmt(f), BeforeUnixEpoch => write!(f, "Timestamp is before January 1st 1970 00:00."), } } } #[cfg(feature = "derive-serde")] /// The FromStr parses the time according to RFC3339. impl str::FromStr for Timestamp { type Err = ParseTimestampError; fn from_str(s: &str) -> Result<Self, Self::Err> { use convert::TryInto; let datetime = chrono::DateTime::parse_from_rfc3339(s).map_err(ParseTimestampError::ParseError)?; let millis = datetime .timestamp_millis() .try_into() .map_err(|_| ParseTimestampError::BeforeUnixEpoch)?; Ok(Timestamp::from_timestamp_millis(millis)) } } #[cfg(feature = "derive-serde")] /// The display implementation displays the timestamp according to RFC3339 /// format in the UTC time zone. impl fmt::Display for Timestamp { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use chrono::offset::TimeZone; let time = self.timestamp_millis() as i64; let date = chrono::Utc.timestamp_millis(time); write!(f, "{}", date.to_rfc3339()) } } #[cfg(feature = "derive-serde")] /// The JSON serialization serialized the string obtained by using the Display /// implementation of the Timestamp. impl SerdeSerialize for Timestamp { fn serialize<S: serde::Serializer>(&self, ser: S) -> Result<S::Ok, S::Error> { ser.serialize_str(&self.to_string()) } } #[cfg(feature = "derive-serde")] /// Deserialize from a string via the RFC3339 format. impl<'de> SerdeDeserialize<'de> for Timestamp { fn deserialize<D: serde::de::Deserializer<'de>>(des: D) -> Result<Self, D::Error> { let s = String::deserialize(des)?; let t = str::FromStr::from_str(&s).map_err(serde::de::Error::custom)?; Ok(t) } } /// Duration of time in milliseconds. /// /// Negative durations are not allowed. #[repr(transparent)] #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] pub struct Duration { pub(crate) milliseconds: u64, } impl Duration { /// Construct duration from milliseconds. #[inline(always)] pub fn from_millis(milliseconds: u64) -> Self { Self { milliseconds, } } /// Construct duration from seconds. #[inline(always)] pub fn from_seconds(seconds: u64) -> Self { Self::from_millis(seconds * 1000) } /// Construct duration from minutes. #[inline(always)] pub fn from_minutes(minutes: u64) -> Self { Self::from_millis(minutes * 1000 * 60) } /// Construct duration from hours. #[inline(always)] pub fn from_hours(hours: u64) -> Self { Self::from_millis(hours * 1000 * 60 * 60) } /// Construct duration from days. #[inline(always)] pub fn from_days(days: u64) -> Self { Self::from_millis(days * 1000 * 60 * 60 * 24) } /// Get number of milliseconds in the duration. #[inline(always)] pub fn millis(&self) -> u64 { self.milliseconds } /// Get number of seconds in the duration. #[inline(always)] pub fn seconds(&self) -> u64 { self.milliseconds / 1000 } /// Get number of minutes in the duration. #[inline(always)] pub fn minutes(&self) -> u64 { self.milliseconds / (1000 * 60) } /// Get number of hours in the duration. #[inline(always)] pub fn hours(&self) -> u64 { self.milliseconds / (1000 * 60 * 60) } /// Get number of days in the duration. #[inline(always)] pub fn days(&self) -> u64 { self.milliseconds / (1000 * 60 * 60 * 24) } /// Add duration. Returns `None` instead of overflowing. #[inline(always)] pub fn checked_add(self, other: Duration) -> Option<Self> { self.milliseconds.checked_add(other.milliseconds).map(Self::from_millis) } /// Subtract duration. Returns `None` instead of overflowing. #[inline(always)] pub fn checked_sub(self, other: Duration) -> Option<Self> { self.milliseconds.checked_sub(other.milliseconds).map(Self::from_millis) } } #[derive(Debug, Clone, PartialEq, Eq)] pub enum ParseDurationError { MissingUnit, FailedParsingNumber, InvalidUnit(String), } impl fmt::Display for ParseDurationError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use ParseDurationError::*; match self { MissingUnit => write!(f, "Missing unit on duration measure."), FailedParsingNumber => write!(f, "Failed parsing number"), InvalidUnit(s) => write!(f, "Unknown unit \"{}\".", s), } } } /// Parse a string containing a list of duration measures separated by /// whitespaces. A measure is a number followed by the unit (no whitespace /// between is allowed). Every measure is accumulated into a duration. The /// string is allowed to contain any number of measures with the same unit in no /// particular order. /// /// The supported units are: /// - `ms` for milliseconds /// - `s` for seconds /// - `m` for minutes /// - `h` for hours /// - `d` for days /// /// # Example /// The duration of 10 days, 1 hour, 2minutes and 7 seconds is: /// ```ignore /// "10d 1h 2m 3s 4s" /// ``` impl str::FromStr for Duration { type Err = ParseDurationError; fn from_str(s: &str) -> Result<Self, Self::Err> { use ParseDurationError::*; let mut duration = 0; for measure in s.split_whitespace() { let split_index = measure.find(|c: char| !c.is_ascii_digit()).ok_or(MissingUnit)?; let (n, unit) = measure.split_at(split_index); let n: u64 = n.parse().map_err(|_| FailedParsingNumber)?; let unit: u64 = match unit { "ms" => 1, "s" => 1000, "m" => 1000 * 60, "h" => 1000 * 60 * 60, "d" => 1000 * 60 * 60 * 24, other => return Err(InvalidUnit(String::from(other))), }; duration += n * unit; } Ok(Duration::from_millis(duration)) } } impl fmt::Display for Duration { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { let days = self.days(); let hours = Duration::from_millis(self.millis() % (1000 * 60 * 60 * 24)).hours(); let minutes = Duration::from_millis(self.millis() % (1000 * 60 * 60)).minutes(); let seconds = Duration::from_millis(self.millis() % (1000 * 60)).seconds(); let milliseconds = Duration::from_millis(self.millis() % 1000).millis(); write!(formatter, "{}d {}h {}m {}s {}ms", days, hours, minutes, seconds, milliseconds) } } /// Address of an account, as raw bytes. #[derive(Eq, PartialEq, Copy, Clone, PartialOrd, Ord, Debug)] pub struct AccountAddress(pub [u8; ACCOUNT_ADDRESS_SIZE]); impl convert::AsRef<[u8; 32]> for AccountAddress { fn as_ref(&self) -> &[u8; 32] { &self.0 } } impl convert::AsRef<[u8]> for AccountAddress { fn as_ref(&self) -> &[u8] { &self.0 } } /// Address of a contract. #[derive(Eq, PartialEq, Copy, Clone, Debug)] #[cfg_attr(feature = "derive-serde", derive(SerdeSerialize, SerdeDeserialize))] pub struct ContractAddress { pub index: u64, pub subindex: u64, } /// Either an address of an account, or contract. #[cfg_attr( feature = "derive-serde", derive(SerdeSerialize, SerdeDeserialize), serde(tag = "type", content = "address", rename_all = "lowercase") )] #[derive(PartialEq, Eq, Copy, Clone)] pub enum Address { Account(AccountAddress), Contract(ContractAddress), } /// Genesis block has slot number 0, and otherwise it is always the case that a /// parent of a block has a slot number strictly less than the block itself. /// However in contrast to `BlockHeight`, slot numbers are not strictly /// sequential, there will be gaps. pub type SlotNumber = u64; /// Height of the block. Height of the genesis block is 0, and otherwise it is /// always the case that a block has height one more than its parent. pub type BlockHeight = u64; /// Finalized height. In the context of chain metadata this is the height of the /// block which is explicitly recorded as the last finalized block in the block /// under consideration. pub type FinalizedHeight = u64; /// Time at the beginning of the current slot, in miliseconds since unix epoch. pub type SlotTime = Timestamp; /// Chain metadata accessible to both receive and init methods. #[cfg_attr( feature = "derive-serde", derive(SerdeSerialize, SerdeDeserialize), serde(rename_all = "camelCase") )] pub struct ChainMetadata { pub slot_time: SlotTime, } /// Add offset tracking inside a data structure. pub struct Cursor<T> { pub offset: usize, pub data: T, } /// Tag of an attribute. See the module [attributes](./attributes/index.html) /// for the currently supported attributes. #[repr(transparent)] #[derive(Clone, Copy, Debug, Ord, PartialOrd, Eq, PartialEq)] pub struct AttributeTag(pub u8); /// A borrowed attribute value. The slice will have at most 31 bytes. /// The meaning of the bytes is dependent on the type of the attribute. pub type AttributeValue<'a> = &'a [u8]; /// An owned counterpart of `AttributeValue`, more convenient for testing. pub type OwnedAttributeValue = Vec<u8>; /// A policy with a vector of attributes, fully allocated and owned. /// This is in contrast to a policy which is lazily read from a read source. /// The latter is useful for efficiency, this type is more useful for testing /// since the values are easier to construct. pub type OwnedPolicy = Policy<Vec<(AttributeTag, OwnedAttributeValue)>>; /// Index of the identity provider on the chain. /// An identity provider with the given index will not be replaced, /// so this is a stable identifier. pub type IdentityProvider = u32; /// Policy on the credential of the account. /// /// This is one of the key features of the Concordium blockchain. Each account /// on the chain is backed by an identity. The policy is verified and signed by /// the identity provider before an account can be created on the chain. /// /// The type is parameterized by the choice of `Attributes`. These are either /// borrowed or owned, in the form of an iterator over key-value pairs or a /// vector of such. This flexibility is needed so that attributes can be /// accessed efficiently, as well as constructed conveniently for testing. #[derive(Debug, Clone)] pub struct Policy<Attributes> { /// Identity of the identity provider who signed the identity object that /// this policy is derived from. pub identity_provider: IdentityProvider, /// Timestamp at the beginning of the month when the identity object backing /// this policy was created. This timestamp has very coarse granularity /// in order for the identity provider to not be able to link identities /// they have created with accounts that users created on the chain. /// as a timestamp (which has millisecond granularity) in order to make it /// easier to compare with, e.g., `slot_time`. pub created_at: Timestamp, /// Beginning of the month where the identity is __no longer valid__. pub valid_to: Timestamp, /// List of attributes, in ascending order of the tag. pub items: Attributes, } /// This implementation of deserialize is only useful when used /// to deserialize JSON. Other formats could be implemented in the future. #[cfg(feature = "derive-serde")] impl<'de> SerdeDeserialize<'de> for OwnedPolicy { fn deserialize<D>(deserializer: D) -> Result<OwnedPolicy, D::Error> where D: serde::Deserializer<'de>, { deserializer.deserialize_map(policy_json::OwnedPolicyVisitor) } } #[cfg(feature = "derive-serde")] mod policy_json { use super::*; use convert::{TryFrom, TryInto}; pub(crate) struct OwnedPolicyVisitor; impl<'de> serde::de::Visitor<'de> for OwnedPolicyVisitor { type Value = OwnedPolicy; fn visit_map<A: serde::de::MapAccess<'de>>( self, mut map: A, ) -> Result<Self::Value, A::Error> { let mut idp = None; let mut ca = None; let mut vt = None; let mut items = Vec::new(); let parse_date = |s: &str| { if !s.chars().all(|c| c.is_numeric() && c.is_ascii()) || s.len() != 6 { return Err(serde::de::Error::custom("Incorrect YYYYMM format.")); } let (s_year, s_month) = s.split_at(4); let year = s_year.parse::<u16>().map_err(|_| serde::de::Error::custom("Invalid year."))?; let month = s_month .parse::<u8>() .map_err(|_| serde::de::Error::custom("Invalid month."))?; if month > 12 { return Err(serde::de::Error::custom("Month out of range.")); } if year < 1000 { return Err(serde::de::Error::custom("Year out of range.")); } let dt = chrono::naive::NaiveDate::from_ymd(i32::from(year), u32::from(month), 1) .and_hms(0, 0, 0); let timestamp: u64 = dt.timestamp_millis().try_into().map_err(|_| { serde::de::Error::custom("Times before 1970 are not supported.") })?; Ok(timestamp) }; while let Some((k, v)) = map.next_entry::<String, serde_json::Value>()? { match k.as_str() { "identityProvider" => { idp = Some(serde_json::from_value(v).map_err(|_| { serde::de::Error::custom("Unsupported identity provider value.") })?) } "createdAt" => { if let Some(s) = v.as_str() { ca = Some(parse_date(s)?); } else { return Err(serde::de::Error::custom("Unsupported creation format.")); } } "validTo" => { if let Some(s) = v.as_str() { vt = Some(parse_date(s)?); } else { return Err(serde::de::Error::custom("Unsupported valid to format.")); } } s => { if let Ok(tag) = AttributeTag::try_from(s) { match v { serde_json::Value::String(value_string) if value_string.as_bytes().len() <= 31 => { items.push((tag, value_string.into_bytes())) } _ => { return Err(serde::de::Error::custom( "Invalid attribute value. Attributes must be at most 31 \ characters in utf8 encoding.", )) } } } // ignore this value otherwise. } } } let identity_provider = idp.ok_or_else(|| serde::de::Error::custom("Missing field 'identityProvider'"))?; let created_at = ca.ok_or_else(|| serde::de::Error::custom("Missing field 'createdAt'"))?; let valid_to = vt.ok_or_else(|| serde::de::Error::custom("Missing field 'validTo'"))?; Ok(Policy { identity_provider, created_at: Timestamp::from_timestamp_millis(created_at), valid_to: Timestamp::from_timestamp_millis(valid_to), items, }) } fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("an object representing a policy.") } } } /// Currently defined attributes possible in a policy. pub mod attributes { // NB: These names and values must match the rest of the Concordium ecosystem. use super::{convert, AttributeTag}; pub const FIRST_NAME: AttributeTag = AttributeTag(0u8); pub const LAST_NAME: AttributeTag = AttributeTag(1u8); pub const SEX: AttributeTag = AttributeTag(2u8); pub const DOB: AttributeTag = AttributeTag(3u8); pub const COUNTRY_OF_RESIDENCE: AttributeTag = AttributeTag(4u8); pub const NATIONALITY: AttributeTag = AttributeTag(5u8); pub const ID_DOC_TYPE: AttributeTag = AttributeTag(6u8); pub const ID_DOC_NUMBER: AttributeTag = AttributeTag(7u8); pub const ID_DOC_ISSUER: AttributeTag = AttributeTag(8u8); pub const ID_DOC_ISSUED_AT: AttributeTag = AttributeTag(9u8); pub const ID_DOC_EXPIRES_AT: AttributeTag = AttributeTag(10u8); pub const NATIONAL_ID_NO: AttributeTag = AttributeTag(11u8); pub const TAX_ID_NO: AttributeTag = AttributeTag(12u8); // NB: These names must match the rest of the Concordium ecosystem. impl<'a> convert::TryFrom<&'a str> for AttributeTag { type Error = super::ParseError; fn try_from(v: &'a str) -> Result<Self, Self::Error> { match v { "firstName" => Ok(FIRST_NAME), "lastName" => Ok(LAST_NAME), "sex" => Ok(SEX), "dob" => Ok(DOB), "countryOfResidence" => Ok(COUNTRY_OF_RESIDENCE), "nationality" => Ok(NATIONALITY), "idDocType" => Ok(ID_DOC_TYPE), "idDocNo" => Ok(ID_DOC_NUMBER), "idDocIssuer" => Ok(ID_DOC_ISSUER), "idDocIssuedAt" => Ok(ID_DOC_ISSUED_AT), "idDocExpiresAt" => Ok(ID_DOC_EXPIRES_AT), "nationalIdNo" => Ok(NATIONAL_ID_NO), "taxIdNo" => Ok(TAX_ID_NO), _ => Err(super::ParseError {}), } } } } /// Zero-sized type to represent an error when reading bytes and deserializing. /// /// When using custom error types in your smart contract, it is convenient to /// implement the trait `From<ParseError>` for you custom error type, to allow /// using the `?` operator when deserializing bytes, such as the contract state /// or parameters. /// /// ```ignore /// enum MyCustomReceiveError { /// Parsing /// } /// /// impl From<ParseError> for MyCustomReceiveError { /// fn from(_: ParseError) -> Self { MyCustomReceiveError::ParseParams } /// } /// /// #[receive(contract = "mycontract", name="some_receive_name")] /// fn contract_receive<R: HasReceiveContext<()>, L: HasLogger, A: HasActions>( /// ctx: &R, /// receive_amount: Amount, /// logger: &mut L, /// state: &mut State, /// ) -> Result<A, MyCustomReceiveError> { /// ... /// let msg: MyParameterType = ctx.parameter_cursor().get()?; /// ... /// } /// ``` #[derive(Debug, Default, PartialEq, Eq)] pub struct ParseError {} /// A type alias used to indicate that the value is a result /// of parsing from binary via the `Serial` instance. pub type ParseResult<A> = Result<A, ParseError>; #[cfg(feature = "derive-serde")] mod serde_impl { // FIXME: This is duplicated from crypto/id/types. use super::*; use base58check::*; use serde::{de, de::Visitor, Deserializer, Serializer}; use std::fmt; // Parse from string assuming base58 check encoding. impl str::FromStr for AccountAddress { type Err = (); fn from_str(v: &str) -> Result<Self, Self::Err> { let (version, body) = v.from_base58check().map_err(|_| ())?; if version == 1 && body.len() == ACCOUNT_ADDRESS_SIZE { let mut buf = [0u8; ACCOUNT_ADDRESS_SIZE]; buf.copy_from_slice(&body); Ok(AccountAddress(buf)) } else { Err(()) } } } impl fmt::Display for AccountAddress { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.0.to_base58check(1)) } } impl SerdeSerialize for AccountAddress { fn serialize<S: Serializer>(&self, ser: S) -> Result<S::Ok, S::Error> { let b58_str = self.to_string(); ser.serialize_str(&b58_str) } } impl<'de> SerdeDeserialize<'de> for AccountAddress { fn deserialize<D: Deserializer<'de>>(des: D) -> Result<Self, D::Error> { des.deserialize_str(Base58Visitor) } } impl fmt::Display for ContractAddress { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "<{},{}>", self.index, self.subindex) } } impl fmt::Display for Amount { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let q = self.micro_gtu / 1000000; let r = self.micro_gtu % 1000000; if r == 0 { write!(f, "{}.0", q) } else { write!(f, "{}.{:06}", q, r) } } } struct Base58Visitor; impl<'de> Visitor<'de> for Base58Visitor { type Value = AccountAddress; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { write!(formatter, "A base58 string, version 1.") } fn visit_str<E: de::Error>(self, v: &str) -> Result<Self::Value, E> { v.parse::<AccountAddress>().map_err(|_| de::Error::custom("Wrong Base58 version.")) } } } #[cfg(test)] mod test { use super::*; use std::str::FromStr; #[test] fn test_duration_from_string_simple() { let duration = Duration::from_str("12d 1h 39s 3m 2h").unwrap(); assert_eq!( duration.millis(), 1000 * 60 * 60 * 24 * 12 // 12d + 1000 * 60 * 60 // 1h + 1000 * 39 // 39s + 1000 * 60 * 3 // 3m + 1000 * 60 * 60 * 2 // 2h ) } }
34.723251
99
0.575094
09801769a15e7585c70df449ec81d6f426997fbc
14,485
// This file is part of the uutils coreutils package. // // (c) Jian Zeng <[email protected]> // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (ToDO) COMFOLLOW Chowner Passwd RFILE RFILE's derefer dgid duid #[macro_use] extern crate uucore; pub use uucore::entries::{self, Group, Locate, Passwd}; use uucore::fs::resolve_relative_path; use uucore::libc::{self, gid_t, lchown, uid_t}; extern crate walkdir; use walkdir::WalkDir; use std::fs::{self, Metadata}; use std::os::unix::fs::MetadataExt; use std::io; use std::io::Result as IOResult; use std::convert::AsRef; use std::path::Path; use std::ffi::CString; use std::os::unix::ffi::OsStrExt; static SYNTAX: &str = "[OPTION]... [OWNER][:[GROUP]] FILE...\n chown [OPTION]... --reference=RFILE FILE..."; static SUMMARY: &str = "change file owner and group"; const FTS_COMFOLLOW: u8 = 1; const FTS_PHYSICAL: u8 = 1 << 1; const FTS_LOGICAL: u8 = 1 << 2; pub fn uumain(args: impl uucore::Args) -> i32 { let args = args.collect_str(); let mut opts = app!(SYNTAX, SUMMARY, ""); opts.optflag("c", "changes", "like verbose but report only when a change is made") .optflag("f", "silent", "") .optflag("", "quiet", "suppress most error messages") .optflag("v", "verbose", "output a diagnostic for every file processed") .optflag("", "dereference", "affect the referent of each symbolic link (this is the default), rather than the symbolic link itself") .optflag("h", "no-dereference", "affect symbolic links instead of any referenced file (useful only on systems that can change the ownership of a symlink)") .optopt("", "from", "change the owner and/or group of each file only if its current owner and/or group match those specified here. Either may be omitted, in which case a match is not required for the omitted attribute", "CURRENT_OWNER:CURRENT_GROUP") .optopt("", "reference", "use RFILE's owner and group rather than specifying OWNER:GROUP values", "RFILE") .optflag("", "no-preserve-root", "do not treat '/' specially (the default)") .optflag("", "preserve-root", "fail to operate recursively on '/'") .optflag("R", "recursive", "operate on files and directories recursively") .optflag("H", "", "if a command line argument is a symbolic link to a directory, traverse it") .optflag("L", "", "traverse every symbolic link to a directory encountered") .optflag("P", "", "do not traverse any symbolic links (default)"); let mut bit_flag = FTS_PHYSICAL; let mut preserve_root = false; let mut derefer = -1; let flags: &[char] = &['H', 'L', 'P']; for opt in &args { match opt.as_str() { // If more than one is specified, only the final one takes effect. s if s.contains(flags) => { if let Some(idx) = s.rfind(flags) { match s.chars().nth(idx).unwrap() { 'H' => bit_flag = FTS_COMFOLLOW | FTS_PHYSICAL, 'L' => bit_flag = FTS_LOGICAL, 'P' => bit_flag = FTS_PHYSICAL, _ => (), } } } "--no-preserve-root" => preserve_root = false, "--preserve-root" => preserve_root = true, "--dereference" => derefer = 1, "--no-dereference" => derefer = 0, _ => (), } } let matches = opts.parse(args); let recursive = matches.opt_present("recursive"); if recursive { if bit_flag == FTS_PHYSICAL { if derefer == 1 { show_info!("-R --dereference requires -H or -L"); return 1; } derefer = 0; } } else { bit_flag = FTS_PHYSICAL; } let verbosity = if matches.opt_present("changes") { Verbosity::Changes } else if matches.opt_present("silent") || matches.opt_present("quiet") { Verbosity::Silent } else if matches.opt_present("verbose") { Verbosity::Verbose } else { Verbosity::Normal }; let filter = if let Some(spec) = matches.opt_str("from") { match parse_spec(&spec) { Ok((Some(uid), None)) => IfFrom::User(uid), Ok((None, Some(gid))) => IfFrom::Group(gid), Ok((Some(uid), Some(gid))) => IfFrom::UserGroup(uid, gid), Ok((None, None)) => IfFrom::All, Err(e) => { show_info!("{}", e); return 1; } } } else { IfFrom::All }; if matches.free.is_empty() { show_usage_error!("missing operand"); return 1; } else if matches.free.len() < 2 && !matches.opt_present("reference") { show_usage_error!("missing operand after ‘{}’", matches.free[0]); return 1; } let mut files; let dest_uid: Option<u32>; let dest_gid: Option<u32>; if let Some(file) = matches.opt_str("reference") { match fs::metadata(&file) { Ok(meta) => { dest_gid = Some(meta.gid()); dest_uid = Some(meta.uid()); } Err(e) => { show_info!("failed to get attributes of '{}': {}", file, e); return 1; } } files = matches.free; } else { match parse_spec(&matches.free[0]) { Ok((u, g)) => { dest_uid = u; dest_gid = g; } Err(e) => { show_info!("{}", e); return 1; } } files = matches.free; files.remove(0); } let executor = Chowner { bit_flag, dest_uid, dest_gid, verbosity, recursive, dereference: derefer != 0, filter, preserve_root, files, }; executor.exec() } fn parse_spec(spec: &str) -> Result<(Option<u32>, Option<u32>), String> { let args = spec.split(':').collect::<Vec<_>>(); let usr_only = args.len() == 1; let grp_only = args.len() == 2 && args[0].is_empty() && !args[1].is_empty(); let usr_grp = args.len() == 2 && !args[0].is_empty() && !args[1].is_empty(); if usr_only { Ok(( Some(match Passwd::locate(args[0]) { Ok(v) => v.uid(), _ => return Err(format!("invalid user: ‘{}’", spec)), }), None, )) } else if grp_only { Ok(( None, Some(match Group::locate(args[1]) { Ok(v) => v.gid(), _ => return Err(format!("invalid group: ‘{}’", spec)), }), )) } else if usr_grp { Ok(( Some(match Passwd::locate(args[0]) { Ok(v) => v.uid(), _ => return Err(format!("invalid user: ‘{}’", spec)), }), Some(match Group::locate(args[1]) { Ok(v) => v.gid(), _ => return Err(format!("invalid group: ‘{}’", spec)), }), )) } else { Ok((None, None)) } } #[derive(PartialEq, Debug)] enum Verbosity { Silent, Changes, Verbose, Normal, } enum IfFrom { All, User(u32), Group(u32), UserGroup(u32, u32), } struct Chowner { dest_uid: Option<u32>, dest_gid: Option<u32>, bit_flag: u8, verbosity: Verbosity, filter: IfFrom, files: Vec<String>, recursive: bool, preserve_root: bool, dereference: bool, } macro_rules! unwrap { ($m:expr, $e:ident, $err:block) => { match $m { Ok(meta) => meta, Err($e) => $err, } }; } impl Chowner { fn exec(&self) -> i32 { let mut ret = 0; for f in &self.files { ret |= self.traverse(f); } ret } fn chown<P: AsRef<Path>>( &self, path: P, duid: uid_t, dgid: gid_t, follow: bool, ) -> IOResult<()> { let path = path.as_ref(); let s = CString::new(path.as_os_str().as_bytes()).unwrap(); let ret = unsafe { if follow { libc::chown(s.as_ptr(), duid, dgid) } else { lchown(s.as_ptr(), duid, dgid) } }; if ret == 0 { Ok(()) } else { Err(io::Error::last_os_error()) } } fn traverse<P: AsRef<Path>>(&self, root: P) -> i32 { let follow_arg = self.dereference || self.bit_flag != FTS_PHYSICAL; let path = root.as_ref(); let meta = match self.obtain_meta(path, follow_arg) { Some(m) => m, _ => return 1, }; // Prohibit only if: // (--preserve-root and -R present) && // ( // (argument is not symlink && resolved to be '/') || // (argument is symlink && should follow argument && resolved to be '/') // ) if self.recursive && self.preserve_root { let may_exist = if follow_arg { path.canonicalize().ok() } else { let real = resolve_relative_path(path); if real.is_dir() { Some(real.canonicalize().expect("failed to get real path")) } else { Some(real.into_owned()) } }; if let Some(p) = may_exist { if p.parent().is_none() { show_info!("it is dangerous to operate recursively on '/'"); show_info!("use --no-preserve-root to override this failsafe"); return 1; } } } let ret = if self.matched(meta.uid(), meta.gid()) { self.wrap_chown(path, &meta, follow_arg) } else { 0 }; if !self.recursive { ret } else { ret | self.dive_into(&root) } } fn dive_into<P: AsRef<Path>>(&self, root: P) -> i32 { let mut ret = 0; let root = root.as_ref(); let follow = self.dereference || self.bit_flag & FTS_LOGICAL != 0; for entry in WalkDir::new(root).follow_links(follow).min_depth(1) { let entry = unwrap!(entry, e, { ret = 1; show_info!("{}", e); continue; }); let path = entry.path(); let meta = match self.obtain_meta(path, follow) { Some(m) => m, _ => { ret = 1; continue; } }; if !self.matched(meta.uid(), meta.gid()) { continue; } ret = self.wrap_chown(path, &meta, follow); } ret } fn obtain_meta<P: AsRef<Path>>(&self, path: P, follow: bool) -> Option<Metadata> { use self::Verbosity::*; let path = path.as_ref(); let meta = if follow { unwrap!(path.metadata(), e, { match self.verbosity { Silent => (), _ => show_info!("cannot access '{}': {}", path.display(), e), } return None; }) } else { unwrap!(path.symlink_metadata(), e, { match self.verbosity { Silent => (), _ => show_info!("cannot dereference '{}': {}", path.display(), e), } return None; }) }; Some(meta) } fn wrap_chown<P: AsRef<Path>>(&self, path: P, meta: &Metadata, follow: bool) -> i32 { use self::Verbosity::*; let mut ret = 0; let dest_uid = self.dest_uid.unwrap_or_else(|| meta.uid()); let dest_gid = self.dest_gid.unwrap_or_else(|| meta.gid()); let path = path.as_ref(); if let Err(e) = self.chown(path, dest_uid, dest_gid, follow) { match self.verbosity { Silent => (), _ => { show_info!("changing ownership of '{}': {}", path.display(), e); if self.verbosity == Verbose { println!( "failed to change ownership of {} from {}:{} to {}:{}", path.display(), entries::uid2usr(meta.uid()).unwrap(), entries::gid2grp(meta.gid()).unwrap(), entries::uid2usr(dest_uid).unwrap(), entries::gid2grp(dest_gid).unwrap() ); }; } } ret = 1; } else { let changed = dest_uid != meta.uid() || dest_gid != meta.gid(); if changed { match self.verbosity { Changes | Verbose => { println!( "changed ownership of {} from {}:{} to {}:{}", path.display(), entries::uid2usr(meta.uid()).unwrap(), entries::gid2grp(meta.gid()).unwrap(), entries::uid2usr(dest_uid).unwrap(), entries::gid2grp(dest_gid).unwrap() ); } _ => (), }; } else if self.verbosity == Verbose { println!( "ownership of {} retained as {}:{}", path.display(), entries::uid2usr(dest_uid).unwrap(), entries::gid2grp(dest_gid).unwrap() ); } } ret } #[inline] fn matched(&self, uid: uid_t, gid: gid_t) -> bool { match self.filter { IfFrom::All => true, IfFrom::User(u) => u == uid, IfFrom::Group(g) => g == gid, IfFrom::UserGroup(u, g) => u == uid && g == gid, } } }
31.695842
258
0.466621
72c0d7913e5252699412ba41843dcf94b9bc400d
6,689
// run-pass // ignore-wasm32-bare compiled with panic=abort by default // revisions: mir thir // [thir]compile-flags: -Zthir-unsafeck // This test checks panic emitted from `mem::{uninitialized,zeroed}`. #![feature(never_type, arbitrary_enum_discriminant)] #![allow(deprecated, invalid_value)] use std::{ mem::{self, MaybeUninit, ManuallyDrop}, panic, ptr::NonNull, num, }; #[allow(dead_code)] struct Foo { x: u8, y: !, } enum Bar {} #[allow(dead_code)] enum OneVariant { Variant(i32) } #[allow(dead_code, non_camel_case_types)] enum OneVariant_NonZero { Variant(i32, i32, num::NonZeroI32), DeadVariant(Bar), } // An `Aggregate` abi enum where 0 is not a valid discriminant. #[allow(dead_code)] #[repr(i32)] enum NoNullVariant { Variant1(i32, i32) = 1, Variant2(i32, i32) = 2, } // An enum with ScalarPair layout #[allow(dead_code)] enum LR { Left(i64), Right(i64), } #[allow(dead_code, non_camel_case_types)] enum LR_NonZero { Left(num::NonZeroI64), Right(num::NonZeroI64), } fn test_panic_msg<T>(op: impl (FnOnce() -> T) + panic::UnwindSafe, msg: &str) { let err = panic::catch_unwind(op).err(); assert_eq!( err.as_ref().and_then(|a| a.downcast_ref::<&str>()), Some(&msg) ); } fn main() { unsafe { // Uninhabited types test_panic_msg( || mem::uninitialized::<!>(), "attempted to instantiate uninhabited type `!`" ); test_panic_msg( || mem::zeroed::<!>(), "attempted to instantiate uninhabited type `!`" ); test_panic_msg( || MaybeUninit::<!>::uninit().assume_init(), "attempted to instantiate uninhabited type `!`" ); test_panic_msg( || mem::uninitialized::<Foo>(), "attempted to instantiate uninhabited type `Foo`" ); test_panic_msg( || mem::zeroed::<Foo>(), "attempted to instantiate uninhabited type `Foo`" ); test_panic_msg( || MaybeUninit::<Foo>::uninit().assume_init(), "attempted to instantiate uninhabited type `Foo`" ); test_panic_msg( || mem::uninitialized::<Bar>(), "attempted to instantiate uninhabited type `Bar`" ); test_panic_msg( || mem::zeroed::<Bar>(), "attempted to instantiate uninhabited type `Bar`" ); test_panic_msg( || MaybeUninit::<Bar>::uninit().assume_init(), "attempted to instantiate uninhabited type `Bar`" ); // Types that do not like zero-initialziation test_panic_msg( || mem::uninitialized::<fn()>(), "attempted to leave type `fn()` uninitialized, which is invalid" ); test_panic_msg( || mem::zeroed::<fn()>(), "attempted to zero-initialize type `fn()`, which is invalid" ); test_panic_msg( || mem::uninitialized::<*const dyn Send>(), "attempted to leave type `*const dyn std::marker::Send` uninitialized, which is invalid" ); test_panic_msg( || mem::zeroed::<*const dyn Send>(), "attempted to zero-initialize type `*const dyn std::marker::Send`, which is invalid" ); /* FIXME(#66151) we conservatively do not error here yet. test_panic_msg( || mem::uninitialized::<LR_NonZero>(), "attempted to leave type `LR_NonZero` uninitialized, which is invalid" ); test_panic_msg( || mem::zeroed::<LR_NonZero>(), "attempted to zero-initialize type `LR_NonZero`, which is invalid" ); test_panic_msg( || mem::uninitialized::<ManuallyDrop<LR_NonZero>>(), "attempted to leave type `std::mem::ManuallyDrop<LR_NonZero>` uninitialized, \ which is invalid" ); test_panic_msg( || mem::zeroed::<ManuallyDrop<LR_NonZero>>(), "attempted to zero-initialize type `std::mem::ManuallyDrop<LR_NonZero>`, \ which is invalid" ); */ test_panic_msg( || mem::uninitialized::<(NonNull<u32>, u32, u32)>(), "attempted to leave type `(std::ptr::NonNull<u32>, u32, u32)` uninitialized, \ which is invalid" ); test_panic_msg( || mem::zeroed::<(NonNull<u32>, u32, u32)>(), "attempted to zero-initialize type `(std::ptr::NonNull<u32>, u32, u32)`, \ which is invalid" ); test_panic_msg( || mem::uninitialized::<OneVariant_NonZero>(), "attempted to leave type `OneVariant_NonZero` uninitialized, \ which is invalid" ); test_panic_msg( || mem::zeroed::<OneVariant_NonZero>(), "attempted to zero-initialize type `OneVariant_NonZero`, \ which is invalid" ); test_panic_msg( || mem::uninitialized::<NoNullVariant>(), "attempted to leave type `NoNullVariant` uninitialized, \ which is invalid" ); test_panic_msg( || mem::zeroed::<NoNullVariant>(), "attempted to zero-initialize type `NoNullVariant`, \ which is invalid" ); // Types that can be zero, but not uninit. test_panic_msg( || mem::uninitialized::<bool>(), "attempted to leave type `bool` uninitialized, which is invalid" ); test_panic_msg( || mem::uninitialized::<LR>(), "attempted to leave type `LR` uninitialized, which is invalid" ); test_panic_msg( || mem::uninitialized::<ManuallyDrop<LR>>(), "attempted to leave type `std::mem::ManuallyDrop<LR>` uninitialized, which is invalid" ); // Some things that should work. let _val = mem::zeroed::<bool>(); let _val = mem::zeroed::<LR>(); let _val = mem::zeroed::<ManuallyDrop<LR>>(); let _val = mem::zeroed::<OneVariant>(); let _val = mem::zeroed::<Option<&'static i32>>(); let _val = mem::zeroed::<MaybeUninit<NonNull<u32>>>(); let _val = mem::uninitialized::<MaybeUninit<bool>>(); // These are UB because they have not been officially blessed, but we await the resolution // of <https://github.com/rust-lang/unsafe-code-guidelines/issues/71> before doing // anything about that. let _val = mem::uninitialized::<i32>(); let _val = mem::uninitialized::<*const ()>(); } }
32.004785
100
0.560024
2fc43dfe8e0189d3eb31d725614d65fa6c3ddeea
5,468
use crate::ffi; use crate::types::descriptor::ReflectDescriptorSet; use crate::types::image::ReflectFormat; use crate::types::op::{ReflectBuiltIn, ReflectOp}; use crate::types::traits::*; bitflags! { #[cfg_attr(feature = "serde", derive(serde::Serialize))] pub struct ReflectDecorationFlags: u32 { const NONE = 0; const BLOCK = 1; const BUFFER_BLOCK = 2; const ROW_MAJOR = 4; const COLUMN_MAJOR = 8; const BUILT_IN = 16; const NO_PERSPECTIVE = 32; const FLAT = 64; const NON_WRITABLE = 128; } } impl Default for ReflectDecorationFlags { fn default() -> Self { ReflectDecorationFlags::NONE } } bitflags! { #[cfg_attr(feature = "serde", derive(serde::Serialize))] pub struct ReflectTypeFlags: u32 { const UNDEFINED = 0; const VOID = 1; const BOOL = 2; const INT = 4; const FLOAT = 8; const VECTOR = 256; const MATRIX = 512; const EXTERNAL_IMAGE = 65536; const EXTERNAL_SAMPLER = 131_072; const EXTERNAL_SAMPLED_IMAGE = 262_144; const EXTERNAL_BLOCK = 524_288; const EXTERNAL_ACCELERATION_STRUCTURE_NV = 1_048_576; const EXTERNAL_MASK = 2_031_616; const STRUCT = 268_435_456; const ARRAY = 536_870_912; } } impl Default for ReflectTypeFlags { fn default() -> Self { ReflectTypeFlags::UNDEFINED } } bitflags! { #[cfg_attr(feature = "serde", derive(serde::Serialize))] pub struct ReflectShaderStageFlags: u32 { const UNDEFINED = 0x0000_0000; const VERTEX = 0x0000_0001; const TESSELLATION_CONTROL = 0x0000_0002; const TESSELLATION_EVALUATION = 0x0000_0004; const GEOMETRY = 0x0000_0008; const FRAGMENT = 0x0000_0010; const COMPUTE = 0x0000_0020; const RAYGEN_BIT_NV = 256; const ANY_HIT_BIT_NV = 512; const CLOSEST_HIT_BIT_NV = 1024; const MISS_BIT_NV = 2048; const INTERSECTION_BIT_NV = 4096; const CALLABLE_BIT_NV = 8192; } } impl Default for ReflectShaderStageFlags { fn default() -> Self { ReflectShaderStageFlags::UNDEFINED } } #[derive(Debug, Copy, Clone, PartialEq)] #[cfg_attr(feature = "serde", derive(serde::Serialize))] pub enum ReflectDimension { Undefined, Type1d, Type2d, Type3d, Cube, Rect, Buffer, SubPassData, } impl Default for ReflectDimension { fn default() -> Self { ReflectDimension::Undefined } } #[derive(Default, Debug, Clone, PartialEq)] #[cfg_attr(feature = "serde", derive(serde::Serialize))] pub struct ReflectTypeDescription { pub id: u32, #[cfg_attr(feature = "serde", serde(skip_serializing))] pub op: ReflectOp, // TODO: Serialization support pub type_name: String, pub struct_member_name: String, pub storage_class: ReflectStorageClass, pub type_flags: ReflectTypeFlags, pub decoration_flags: ReflectDecorationFlags, pub traits: ReflectTypeDescriptionTraits, pub members: Vec<ReflectTypeDescription>, } #[derive(Default, Debug, Clone, PartialEq)] #[cfg_attr(feature = "serde", derive(serde::Serialize))] pub struct ReflectBlockVariable { pub spirv_id: u32, pub name: String, pub offset: u32, pub absolute_offset: u32, pub size: u32, pub padded_size: u32, pub decoration_flags: ReflectDecorationFlags, pub numeric: ReflectNumericTraits, pub array: ReflectArrayTraits, pub members: Vec<ReflectBlockVariable>, pub type_description: Option<ReflectTypeDescription>, } #[derive(Debug, Copy, Clone, PartialEq)] #[cfg_attr(feature = "serde", derive(serde::Serialize))] pub enum ReflectStorageClass { Undefined, UniformConstant, Input, Uniform, Output, WorkGroup, CrossWorkGroup, Private, Function, Generic, PushConstant, AtomicCounter, Image, StorageBuffer, } impl Default for ReflectStorageClass { fn default() -> Self { ReflectStorageClass::Undefined } } #[derive(Debug, Clone)] #[cfg_attr(feature = "serde", derive(serde::Serialize))] pub struct ReflectInterfaceVariable { pub spirv_id: u32, pub name: String, pub location: u32, pub storage_class: ReflectStorageClass, pub semantic: String, pub decoration_flags: ReflectDecorationFlags, #[cfg_attr(feature = "serde", serde(skip_serializing))] pub built_in: ReflectBuiltIn, // TODO: Serialization support pub numeric: ReflectNumericTraits, pub array: ReflectArrayTraits, pub members: Vec<ReflectInterfaceVariable>, pub format: ReflectFormat, pub type_description: Option<ReflectTypeDescription>, pub word_offset: u32, #[cfg_attr(feature = "serde", serde(skip_serializing))] pub(crate) internal_data: *const ffi::SpvReflectInterfaceVariable, } #[derive(Debug, Clone)] #[cfg_attr(feature = "serde", derive(serde::Serialize))] pub struct ReflectEntryPoint { pub name: String, pub id: u32, #[cfg_attr(feature = "serde", serde(skip_serializing))] pub spirv_execution_model: spirv_headers::ExecutionModel, // TODO: Serialization support pub shader_stage: ReflectShaderStageFlags, pub input_variables: Vec<ReflectInterfaceVariable>, pub output_variables: Vec<ReflectInterfaceVariable>, pub descriptor_sets: Vec<ReflectDescriptorSet>, pub used_uniforms: Vec<u32>, pub used_push_constants: Vec<u32>, }
28.778947
92
0.678676
b9befe7b887408686606703b25801b0407ae0655
7,964
// Copyright 2016 Joe Wilm, The Alacritty Project Contributors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //! Compatibility layer for different font engines //! //! CoreText is used on Mac OS. //! FreeType is used on everything that's not Mac OS. //! Eventually, ClearType support will be available for windows #![deny(clippy::all, clippy::if_not_else, clippy::enum_glob_use, clippy::wrong_pub_self_convention)] #[cfg(not(any(target_os = "macos", windows)))] extern crate fontconfig; #[cfg(not(any(target_os = "macos", windows)))] extern crate freetype; #[cfg(target_os = "macos")] extern crate core_foundation; #[cfg(target_os = "macos")] extern crate core_foundation_sys; #[cfg(target_os = "macos")] extern crate core_graphics; #[cfg(target_os = "macos")] extern crate core_text; #[cfg(target_os = "macos")] extern crate euclid; extern crate libc; #[cfg(not(any(target_os = "macos", windows)))] #[macro_use] extern crate foreign_types; #[cfg(not(any(target_os = "macos", windows)))] extern crate harfbuzz_rs; #[cfg_attr(not(windows), macro_use)] extern crate log; use std::fmt; use std::hash::{Hash, Hasher}; use std::ops::{Add, Mul}; use std::sync::atomic::{AtomicUsize, Ordering}; // If target isn't macos or windows, reexport everything from ft #[cfg(not(any(target_os = "macos", windows)))] pub mod ft; #[cfg(not(any(target_os = "macos", windows)))] pub use ft::{Error, FreeTypeRasterizer as Rasterizer}; #[cfg(windows)] pub mod directwrite; #[cfg(windows)] pub use crate::directwrite::{DirectWriteRasterizer as Rasterizer, Error}; // If target is macos, reexport everything from darwin #[cfg(target_os = "macos")] mod darwin; #[cfg(target_os = "macos")] pub use darwin::*; /// Placeholder glyph key that represents a blank gylph pub const PLACEHOLDER_GLYPH: KeyType = KeyType::Placeholder; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct FontDesc { name: String, style: Style, } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum Slant { Normal, Italic, Oblique, } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum Weight { Normal, Bold, } /// Style of font #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum Style { Specific(String), Description { slant: Slant, weight: Weight }, } impl fmt::Display for Style { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Style::Specific(ref s) => f.write_str(&s), Style::Description { slant, weight } => { write!(f, "slant={:?}, weight={:?}", slant, weight) }, } } } impl FontDesc { pub fn new<S>(name: S, style: Style) -> FontDesc where S: Into<String>, { FontDesc { name: name.into(), style } } } impl fmt::Display for FontDesc { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "name {} and style {}", self.name, self.style) } } /// Identifier for a Font for use in maps/etc #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] pub struct FontKey { token: u16, } impl FontKey { /// Get next font key for given size /// /// The generated key will be globally unique pub fn next() -> FontKey { static TOKEN: AtomicUsize = AtomicUsize::new(0); FontKey { token: TOKEN.fetch_add(1, Ordering::SeqCst) as _ } } } /// Captures possible outcomes of shaping, if shaping succeeded it will return a `GlyphIndex`. /// If shaping failed or did not occur, `Fallback` will be returned. #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] pub enum KeyType { /// A valid glyph index from Font face to be rasterized to a glyph GlyphIndex(u32), /// A character that has not been converted to an index before rasterizing Char(char), /// Placeholder glyph useful when we need a glyph but it shouldn't ever render as anything /// (cursors, wide_char_spacers, etc.) Placeholder, } impl Default for KeyType { fn default() -> Self { PLACEHOLDER_GLYPH } } impl From<u32> for KeyType { fn from(val: u32) -> Self { KeyType::GlyphIndex(val) } } impl From<char> for KeyType { fn from(val: char) -> Self { KeyType::Char(val) } } #[derive(Debug, Copy, Clone, Eq, Hash, PartialEq)] pub struct GlyphKey { pub id: KeyType, pub font_key: FontKey, pub size: Size, } /// Font size stored as integer #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] pub struct Size(i16); impl Size { /// Create a new `Size` from a f32 size in points pub fn new(size: f32) -> Size { Size((size * Size::factor()) as i16) } /// Scale factor between font "Size" type and point size #[inline] pub fn factor() -> f32 { 2.0 } /// Get the f32 size in points pub fn as_f32_pts(self) -> f32 { f32::from(self.0) / Size::factor() } } impl<T: Into<Size>> Add<T> for Size { type Output = Size; fn add(self, other: T) -> Size { Size(self.0.saturating_add(other.into().0)) } } impl<T: Into<Size>> Mul<T> for Size { type Output = Size; fn mul(self, other: T) -> Size { Size(self.0 * other.into().0) } } impl From<f32> for Size { fn from(float: f32) -> Size { Size::new(float) } } #[derive(Clone, Default)] pub struct RasterizedGlyph { pub c: KeyType, pub width: i32, pub height: i32, pub top: i32, pub left: i32, pub buf: Vec<u8>, } struct BufDebugger<'a>(&'a [u8]); impl<'a> fmt::Debug for BufDebugger<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("GlyphBuffer").field("len", &self.0.len()).field("bytes", &self.0).finish() } } impl fmt::Debug for RasterizedGlyph { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("RasterizedGlyph") .field("c", &self.c) .field("width", &self.width) .field("height", &self.height) .field("top", &self.top) .field("left", &self.left) .field("buf", &BufDebugger(&self.buf[..])) .finish() } } #[derive(Copy, Clone)] pub struct Metrics { pub average_advance: f64, pub line_height: f64, pub descent: f32, pub underline_position: f32, pub underline_thickness: f32, pub strikeout_position: f32, pub strikeout_thickness: f32, } pub trait Rasterize { /// Errors occurring in Rasterize methods type Err: ::std::error::Error + Send + Sync + 'static; fn new( device_pixel_ratio: f32, use_thin_strokes: bool, ligatures: bool, ) -> Result<Self, Self::Err> where Self: Sized; /// Get `Metrics` for the given `FontKey` fn metrics(&self, _: FontKey, _: Size) -> Result<Metrics, Self::Err>; /// Load the font described by `FontDesc` and `Size` fn load_font(&mut self, _: &FontDesc, _: Size) -> Result<FontKey, Self::Err>; /// Rasterize the glyph described by `GlyphKey`. fn get_glyph(&mut self, _: GlyphKey) -> Result<RasterizedGlyph, Self::Err>; /// Update the Rasterizer's DPI factor fn update_dpr(&mut self, device_pixel_ratio: f32); } /// Extends the Rasterizer with Harfbuzz specific functionality. #[cfg(not(any(target_os = "macos", windows)))] pub trait HbFtExt { /// Shape the provided text into a set of glyphs. fn shape(&mut self, text: &str, font_key: FontKey) -> harfbuzz_rs::GlyphBuffer; }
26.546667
100
0.638875
2fa34d19cca332664920473da207ae785884a2b3
2,664
#![allow(missing_docs)] // https://docs.live2d.com/cubism-editor-manual/standard-parametor-list/# /// Common Group IDs pub mod groups { pub static EYE_BLINK: &str = "EyeBlink"; pub static LIP_SYNC: &str = "LipSync"; } /// Standard Part IDs pub mod parts { pub static HIT_AREA_PREFIX: &str = "HitArea"; pub static HIT_AREA_HEAD: &str = "Head"; pub static HIT_AREA_BODY: &str = "Body"; pub static CORE: &str = "Parts01Core"; pub static ARM_PREFIX: &str = "Parts01Arm_"; pub static ARM_L_PREFIX: &str = "Parts01ArmL_"; pub static ARM_R_PREFIX: &str = "Parts01ArmR_"; } /// Standard Parameter IDs pub mod param { pub static ANGLE_X: &str = "ParamAngleX"; pub static ANGLE_Y: &str = "ParamAngleY"; pub static ANGLE_Z: &str = "ParamAngleZ"; pub static EYE_L_OPEN: &str = "ParamEyeLOpen"; pub static EYE_L_SMILE: &str = "ParamEyeLSmile"; pub static EYE_R_OPEN: &str = "ParamEyeROpen"; pub static EYE_R_SMILE: &str = "ParamEyeRSmile"; pub static EYE_BALL_X: &str = "ParamEyeBallX"; pub static EYE_BALL_Y: &str = "ParamEyeBallY"; pub static EYE_BALL_FORM: &str = "ParamEyeBallForm"; pub static BROW_LY: &str = "ParamBrowLY"; pub static BROW_RY: &str = "ParamBrowRY"; pub static BROW_LX: &str = "ParamBrowLX"; pub static BROW_RX: &str = "ParamBrowRX"; pub static BROW_L_ANGLE: &str = "ParamBrowLAngle"; pub static BROW_R_ANGLE: &str = "ParamBrowRAngle"; pub static BROW_L_FORM: &str = "ParamBrowLForm"; pub static BROW_R_FORM: &str = "ParamBrowRForm"; pub static MOUTH_FORM: &str = "ParamMouthForm"; pub static MOUTH_OPEN_Y: &str = "ParamMouthOpenY"; pub static CHEEK: &str = "ParamCheek"; pub static BODY_ANGLE_X: &str = "ParamBodyAngleX"; pub static BODY_ANGLE_Y: &str = "ParamBodyAngleY"; pub static BODY_ANGLE_Z: &str = "ParamBodyAngleZ"; pub static BREATH: &str = "ParamBreath"; pub static ARM_LA: &str = "ParamArmLA"; pub static ARM_RA: &str = "ParamArmRA"; pub static ARM_LB: &str = "ParamArmLB"; pub static ARM_RB: &str = "ParamArmRB"; pub static HAND_L: &str = "ParamHandL"; pub static HAND_R: &str = "ParamHandR"; pub static HAIR_FRONT: &str = "ParamHairFront"; pub static HAIR_SIDE: &str = "ParamHairSide"; pub static HAIR_BACK: &str = "ParamHairBack"; pub static HAIR_FLUFFY: &str = "ParamHairFluffy"; pub static SHOULDER_Y: &str = "ParamShoulderY"; pub static BUST_X: &str = "ParamBustX"; pub static BUST_Y: &str = "ParamBustY"; pub static BASE_X: &str = "ParamBaseX"; pub static BASE_Y: &str = "ParamBaseY"; pub static NONE: &str = "NONE:"; }
33.721519
73
0.668168
f9e71b6ba29dbe0fde836f162d4a22b64fc5e086
1,511
// TODO: add .gitignore (and other ignore files) parsing functinality // TODO: add "create-module" functionality (so generated types can be under a specified namespace like Rust.MyType) extern crate syn; pub mod hook; pub mod params; pub mod processor; use std::path::PathBuf; use structopt::StructOpt; pub use processor::process; const DESCRIPTION: &'static str = env!("CARGO_PKG_DESCRIPTION"); #[derive(Debug, StructOpt, Clone)] #[structopt(about = DESCRIPTION, after_help = "This command helps generate type information for other languages. Currently, only typescript is supported.")] struct Args { /// Activate debug mode #[structopt(long, help = "Dry-run, prints to stdout", short = "d", long = "debug")] debug: bool, // #[structopt( // long = "use-ignore-file", // help = "Optionally ignore files with a .gitignore (or similar file); for example: --use-ignore-file=.gitignore" // )] // use_ignore_file: Option<PathBuf>, /// Input file #[structopt( short = "i", long = "input", help = "Required; rust file(s) to read type information from", required = true )] input: Vec<PathBuf>, /// Output file, stdout if not present #[structopt( parse(from_os_str), short = "o", long = "output", help = "Required; file to write generated types to" )] output: PathBuf, } fn main() { let args: Args = Args::from_args(); process(args.input, args.output, args.debug); }
29.057692
156
0.643944
e61601ff2e23370c50cac456270d3496f61a7091
3,159
//! Types which represent various database backends use byteorder::ByteOrder; use query_builder::bind_collector::BindCollector; use query_builder::QueryBuilder; use sql_types::{self, HasSqlType}; /// A database backend /// /// This trait represents the concept of a backend (e.g. "MySQL" vs "SQLite"). /// It is separate from a [`Connection`](../connection/trait.Connection.html) /// to that backend. /// One backend may have multiple concrete connection implementations. /// /// Implementations of this trait should not assume details about how the /// connection is implemented. /// For example, the `Pg` backend does not assume that `libpq` is being used. /// Implementations of this trait can and should care about details of the wire /// protocol used to communicated with the database. pub trait Backend where Self: Sized, Self: HasSqlType<sql_types::SmallInt>, Self: HasSqlType<sql_types::Integer>, Self: HasSqlType<sql_types::BigInt>, Self: HasSqlType<sql_types::Float>, Self: HasSqlType<sql_types::Double>, Self: HasSqlType<sql_types::VarChar>, Self: HasSqlType<sql_types::Text>, Self: HasSqlType<sql_types::Binary>, Self: HasSqlType<sql_types::Date>, Self: HasSqlType<sql_types::Time>, Self: HasSqlType<sql_types::Timestamp>, Self: for<'a> HasRawValue<'a>, { /// The concrete `QueryBuilder` implementation for this backend. type QueryBuilder: QueryBuilder<Self>; /// The concrete `BindCollector` implementation for this backend. /// /// Most backends should use [`RawBytesBindCollector`]. /// /// [`RawBytesBindCollector`]: ../query_builder/bind_collector/struct.RawBytesBindCollector.html type BindCollector: BindCollector<Self>; /// What byte order is used to transmit integers? /// /// This type is only used if `RawValue` is `[u8]`. type ByteOrder: ByteOrder; } /// The raw representation of a database value given to `FromSql`. /// /// This trait is separate from `Backend` to imitate `type RawValue<'a>`. It /// should only be referenced directly by implementors. Users of this type /// should instead use the [`RawValue`](type.RawValue.html) helper type instead. pub trait HasRawValue<'a> { /// The actual type given to `FromSql`, with lifetimes applied. This type /// should not be used directly. Use the [`RawValue`](type.RawValue.html) /// helper type instead. type RawValue; } /// A trait indicating that the provided raw value uses a binary representation internally pub trait BinaryRawValue<'a>: HasRawValue<'a> { /// Get the underlying binary representation of the raw value fn as_bytes(value: Self::RawValue) -> &'a [u8]; } /// A helper type to get the raw representation of a database type given to /// `FromSql`. Equivalent to `<DB as Backend>::RawValue<'a>`. pub type RawValue<'a, DB> = <DB as HasRawValue<'a>>::RawValue; /// Does this backend support `RETURNING` clauses? pub trait SupportsReturningClause {} /// Does this backend support the bare `DEFAULT` keyword? pub trait SupportsDefaultKeyword {} /// Does this backend use the standard `SAVEPOINT` syntax? pub trait UsesAnsiSavepointSyntax {}
39.987342
100
0.718898
ff9dc9d33a8c1044c12d24d7be855b16fc0c4343
598
// move_semantics2.rs // Make me compile without changing line 13! // Execute `rustlings hint move_semantics2` for hints :) fn main() { let vec0 = Vec::new(); let mut vec1 = fill_vec(&vec0); // Do not change the following line! println!("{} has length {} content `{:?}`", "vec0", vec0.len(), vec0); vec1.push(88); println!("{} has length {} content `{:?}`", "vec1", vec1.len(), vec1); } fn fill_vec(vec: &Vec<i32>) -> Vec<i32> { let mut vec = vec.to_vec(); // convert reference to the actual array vec.push(22); vec.push(44); vec.push(66); vec }
22.148148
74
0.590301
8a29a3bc1aa7feb40ed31029cf56209871e714ed
1,104
#![cfg_attr(not(feature = "std"), no_std)] use frame_system::Config; //use sp_std::prelude::*; pub trait GeneticAnalystQualificationInfo<T: Config> { fn get_id(&self) -> &T::Hash; fn get_owner_id(&self) -> &T::AccountId; } pub trait GeneticAnalystQualificationsProvider<T: Config> { type Error; type GeneticAnalystQualification: GeneticAnalystQualificationInfo<T> + sp_std::fmt::Debug; fn delete_qualification( owner_id: &T::AccountId, id: &T::Hash, ) -> Result<Self::GeneticAnalystQualification, Self::Error>; fn qualification_by_id(id: &T::Hash) -> Option<Self::GeneticAnalystQualification>; } pub trait GeneticAnalystQualificationOwnerInfo<T: Config> { fn get_owner_id(&self) -> &T::AccountId; } pub trait GeneticAnalystQualificationOwner<T: Config> { type Owner: GeneticAnalystQualificationOwnerInfo<T> + sp_std::fmt::Debug; fn can_create_qualification(id: &T::AccountId) -> bool; fn get_owner(id: &T::AccountId) -> Option<Self::Owner>; fn associate(owner_id: &T::AccountId, qualification_id: &T::Hash); fn disassociate(owner_id: &T::AccountId, qualification_id: &T::Hash); }
32.470588
91
0.741848
093143737042945143f3b6fcab9e8ce2cdc1b053
4,068
#![stable(feature = "metadata_ext", since = "1.1.0")] use crate::fs::Metadata; use crate::sys_common::AsInner; #[allow(deprecated)] use crate::os::solaris::raw; /// OS-specific extensions to [`fs::Metadata`]. /// /// [`fs::Metadata`]: crate::fs::Metadata #[stable(feature = "metadata_ext", since = "1.1.0")] pub trait MetadataExt { /// Gain a reference to the underlying `stat` structure which contains /// the raw information returned by the OS. /// /// The contents of the returned `stat` are **not** consistent across /// Unix platforms. The `os::unix::fs::MetadataExt` trait contains the /// cross-Unix abstractions contained within the raw stat. #[stable(feature = "metadata_ext", since = "1.1.0")] #[deprecated( since = "1.8.0", note = "deprecated in favor of the accessor \ methods of this trait" )] #[allow(deprecated)] fn as_raw_stat(&self) -> &raw::stat; #[stable(feature = "metadata_ext2", since = "1.8.0")] fn st_dev(&self) -> u64; #[stable(feature = "metadata_ext2", since = "1.8.0")] fn st_ino(&self) -> u64; #[stable(feature = "metadata_ext2", since = "1.8.0")] fn st_mode(&self) -> u32; #[stable(feature = "metadata_ext2", since = "1.8.0")] fn st_nlink(&self) -> u64; #[stable(feature = "metadata_ext2", since = "1.8.0")] fn st_uid(&self) -> u32; #[stable(feature = "metadata_ext2", since = "1.8.0")] fn st_gid(&self) -> u32; #[stable(feature = "metadata_ext2", since = "1.8.0")] fn st_rdev(&self) -> u64; #[stable(feature = "metadata_ext2", since = "1.8.0")] fn st_size(&self) -> u64; #[stable(feature = "metadata_ext2", since = "1.8.0")] fn st_atime(&self) -> i64; #[stable(feature = "metadata_ext2", since = "1.8.0")] fn st_atime_nsec(&self) -> i64; #[stable(feature = "metadata_ext2", since = "1.8.0")] fn st_mtime(&self) -> i64; #[stable(feature = "metadata_ext2", since = "1.8.0")] fn st_mtime_nsec(&self) -> i64; #[stable(feature = "metadata_ext2", since = "1.8.0")] fn st_ctime(&self) -> i64; #[stable(feature = "metadata_ext2", since = "1.8.0")] fn st_ctime_nsec(&self) -> i64; #[stable(feature = "metadata_ext2", since = "1.8.0")] fn st_blksize(&self) -> u64; #[stable(feature = "metadata_ext2", since = "1.8.0")] fn st_blocks(&self) -> u64; } #[stable(feature = "metadata_ext", since = "1.1.0")] impl MetadataExt for Metadata { #[allow(deprecated)] fn as_raw_stat(&self) -> &raw::stat { unsafe { &*(self.as_inner().as_inner() as *const libc::stat as *const raw::stat) } } fn st_dev(&self) -> u64 { self.as_inner().as_inner().st_dev as u64 } fn st_ino(&self) -> u64 { self.as_inner().as_inner().st_ino as u64 } fn st_mode(&self) -> u32 { self.as_inner().as_inner().st_mode as u32 } fn st_nlink(&self) -> u64 { self.as_inner().as_inner().st_nlink as u64 } fn st_uid(&self) -> u32 { self.as_inner().as_inner().st_uid as u32 } fn st_gid(&self) -> u32 { self.as_inner().as_inner().st_gid as u32 } fn st_rdev(&self) -> u64 { self.as_inner().as_inner().st_rdev as u64 } fn st_size(&self) -> u64 { self.as_inner().as_inner().st_size as u64 } fn st_atime(&self) -> i64 { self.as_inner().as_inner().st_atime as i64 } fn st_atime_nsec(&self) -> i64 { self.as_inner().as_inner().st_atime_nsec as i64 } fn st_mtime(&self) -> i64 { self.as_inner().as_inner().st_mtime as i64 } fn st_mtime_nsec(&self) -> i64 { self.as_inner().as_inner().st_mtime_nsec as i64 } fn st_ctime(&self) -> i64 { self.as_inner().as_inner().st_ctime as i64 } fn st_ctime_nsec(&self) -> i64 { self.as_inner().as_inner().st_ctime_nsec as i64 } fn st_blksize(&self) -> u64 { self.as_inner().as_inner().st_blksize as u64 } fn st_blocks(&self) -> u64 { self.as_inner().as_inner().st_blocks as u64 } }
34.474576
90
0.586529
e61dc081e0eda2568ed787a1ab079b25d78e25d7
78,992
/// FeatureMap represents extra features that customers want to include in the /// recommendation model for catalogs/user events as categorical/numerical /// features. #[derive(Clone, PartialEq, ::prost::Message)] pub struct FeatureMap { /// Categorical features that can take on one of a limited number of possible /// values. Some examples would be the brand/maker of a product, or country of /// a customer. /// /// Feature names and values must be UTF-8 encoded strings. /// /// For example: `{ "colors": {"value": ["yellow", "green"]}, /// "sizes": {"value":["S", "M"]}` #[prost(map = "string, message", tag = "1")] pub categorical_features: ::std::collections::HashMap<::prost::alloc::string::String, feature_map::StringList>, /// Numerical features. Some examples would be the height/weight of a product, /// or age of a customer. /// /// Feature names must be UTF-8 encoded strings. /// /// For example: `{ "lengths_cm": {"value":[2.3, 15.4]}, /// "heights_cm": {"value":[8.1, 6.4]} }` #[prost(map = "string, message", tag = "2")] pub numerical_features: ::std::collections::HashMap<::prost::alloc::string::String, feature_map::FloatList>, } /// Nested message and enum types in `FeatureMap`. pub mod feature_map { /// A list of string features. #[derive(Clone, PartialEq, ::prost::Message)] pub struct StringList { /// String feature value with a length limit of 128 bytes. #[prost(string, repeated, tag = "1")] pub value: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// A list of float features. #[derive(Clone, PartialEq, ::prost::Message)] pub struct FloatList { /// Float feature value. #[prost(float, repeated, tag = "1")] pub value: ::prost::alloc::vec::Vec<f32>, } } /// CatalogItem captures all metadata information of items to be recommended. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CatalogItem { /// Required. Catalog item identifier. UTF-8 encoded string with a length limit /// of 128 bytes. /// /// This id must be unique among all catalog items within the same catalog. It /// should also be used when logging user events in order for the user events /// to be joined with the Catalog. #[prost(string, tag = "1")] pub id: ::prost::alloc::string::String, /// Required. Catalog item categories. This field is repeated for supporting /// one catalog item belonging to several parallel category hierarchies. /// /// For example, if a shoes product belongs to both /// ["Shoes & Accessories" -> "Shoes"] and /// ["Sports & Fitness" -> "Athletic Clothing" -> "Shoes"], it could be /// represented as: /// /// "categoryHierarchies": [ /// { "categories": ["Shoes & Accessories", "Shoes"]}, /// { "categories": ["Sports & Fitness", "Athletic Clothing", "Shoes"] } /// ] #[prost(message, repeated, tag = "2")] pub category_hierarchies: ::prost::alloc::vec::Vec<catalog_item::CategoryHierarchy>, /// Required. Catalog item title. UTF-8 encoded string with a length limit of 1 /// KiB. #[prost(string, tag = "3")] pub title: ::prost::alloc::string::String, /// Optional. Catalog item description. UTF-8 encoded string with a length /// limit of 5 KiB. #[prost(string, tag = "4")] pub description: ::prost::alloc::string::String, /// Optional. Highly encouraged. Extra catalog item attributes to be /// included in the recommendation model. For example, for retail products, /// this could include the store name, vendor, style, color, etc. These are /// very strong signals for recommendation model, thus we highly recommend /// providing the item attributes here. #[prost(message, optional, tag = "5")] pub item_attributes: ::core::option::Option<FeatureMap>, /// Optional. Language of the title/description/item_attributes. Use language /// tags defined by BCP 47. https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Our /// supported language codes include 'en', 'es', 'fr', 'de', 'ar', 'fa', 'zh', /// 'ja', 'ko', 'sv', 'ro', 'nl'. For other languages, contact /// your Google account manager. #[prost(string, tag = "6")] pub language_code: ::prost::alloc::string::String, /// Optional. Filtering tags associated with the catalog item. Each tag should /// be a UTF-8 encoded string with a length limit of 1 KiB. /// /// This tag can be used for filtering recommendation results by passing the /// tag as part of the predict request filter. #[prost(string, repeated, tag = "8")] pub tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// Optional. Variant group identifier for prediction results. UTF-8 encoded /// string with a length limit of 128 bytes. /// /// This field must be enabled before it can be used. [Learn /// more](/recommendations-ai/docs/catalog#item-group-id). #[prost(string, tag = "9")] pub item_group_id: ::prost::alloc::string::String, /// Extra catalog item metadata for different recommendation types. #[prost(oneof = "catalog_item::RecommendationType", tags = "10")] pub recommendation_type: ::core::option::Option<catalog_item::RecommendationType>, } /// Nested message and enum types in `CatalogItem`. pub mod catalog_item { /// Category represents catalog item category hierarchy. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CategoryHierarchy { /// Required. Catalog item categories. Each category should be a UTF-8 /// encoded string with a length limit of 2 KiB. /// /// Note that the order in the list denotes the specificity (from least to /// most specific). #[prost(string, repeated, tag = "1")] pub categories: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// Extra catalog item metadata for different recommendation types. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum RecommendationType { /// Optional. Metadata specific to retail products. #[prost(message, tag = "10")] ProductMetadata(super::ProductCatalogItem), } } /// ProductCatalogItem captures item metadata specific to retail products. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ProductCatalogItem { /// Optional. A map to pass the costs associated with the product. /// /// For example: /// {"manufacturing": 45.5} The profit of selling this item is computed like /// so: /// /// * If 'exactPrice' is provided, profit = displayPrice - sum(costs) /// * If 'priceRange' is provided, profit = minPrice - sum(costs) #[prost(map = "string, float", tag = "3")] pub costs: ::std::collections::HashMap<::prost::alloc::string::String, f32>, /// Optional. Only required if the price is set. Currency code for price/costs. Use /// three-character ISO-4217 code. #[prost(string, tag = "4")] pub currency_code: ::prost::alloc::string::String, /// Optional. Online stock state of the catalog item. Default is `IN_STOCK`. #[prost(enumeration = "product_catalog_item::StockState", tag = "5")] pub stock_state: i32, /// Optional. The available quantity of the item. #[prost(int64, tag = "6")] pub available_quantity: i64, /// Optional. Canonical URL directly linking to the item detail page with a /// length limit of 5 KiB.. #[prost(string, tag = "7")] pub canonical_product_uri: ::prost::alloc::string::String, /// Optional. Product images for the catalog item. #[prost(message, repeated, tag = "8")] pub images: ::prost::alloc::vec::Vec<Image>, /// Product price. Only one of 'exactPrice'/'priceRange' can be provided. #[prost(oneof = "product_catalog_item::Price", tags = "1, 2")] pub price: ::core::option::Option<product_catalog_item::Price>, } /// Nested message and enum types in `ProductCatalogItem`. pub mod product_catalog_item { /// Exact product price. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExactPrice { /// Optional. Display price of the product. #[prost(float, tag = "1")] pub display_price: f32, /// Optional. Price of the product without any discount. If zero, by default /// set to be the 'displayPrice'. #[prost(float, tag = "2")] pub original_price: f32, } /// Product price range when there are a range of prices for different /// variations of the same product. #[derive(Clone, PartialEq, ::prost::Message)] pub struct PriceRange { /// Required. The minimum product price. #[prost(float, tag = "1")] pub min: f32, /// Required. The maximum product price. #[prost(float, tag = "2")] pub max: f32, } /// Item stock state. If this field is unspecified, the item is /// assumed to be in stock. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum StockState { /// Default item stock status. Should never be used. Unspecified = 0, /// Item out of stock. OutOfStock = 1, /// Item that is in pre-order state. Preorder = 2, /// Item that is back-ordered (i.e. temporarily out of stock). Backorder = 3, } /// Product price. Only one of 'exactPrice'/'priceRange' can be provided. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Price { /// Optional. The exact product price. #[prost(message, tag = "1")] ExactPrice(ExactPrice), /// Optional. The product price range. #[prost(message, tag = "2")] PriceRange(PriceRange), } } /// Catalog item thumbnail/detail image. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Image { /// Required. URL of the image with a length limit of 5 KiB. #[prost(string, tag = "1")] pub uri: ::prost::alloc::string::String, /// Optional. Height of the image in number of pixels. #[prost(int32, tag = "2")] pub height: i32, /// Optional. Width of the image in number of pixels. #[prost(int32, tag = "3")] pub width: i32, } /// UserEvent captures all metadata information recommendation engine needs to /// know about how end users interact with customers' website. #[derive(Clone, PartialEq, ::prost::Message)] pub struct UserEvent { /// Required. User event type. Allowed values are: /// /// * `add-to-cart` Products being added to cart. /// * `add-to-list` Items being added to a list (shopping list, favorites /// etc). /// * `category-page-view` Special pages such as sale or promotion pages /// viewed. /// * `checkout-start` User starting a checkout process. /// * `detail-page-view` Products detail page viewed. /// * `home-page-view` Homepage viewed. /// * `page-visit` Generic page visits not included in the event types above. /// * `purchase-complete` User finishing a purchase. /// * `refund` Purchased items being refunded or returned. /// * `remove-from-cart` Products being removed from cart. /// * `remove-from-list` Items being removed from a list. /// * `search` Product search. /// * `shopping-cart-page-view` User viewing a shopping cart. /// * `impression` List of items displayed. Used by Google Tag Manager. #[prost(string, tag = "1")] pub event_type: ::prost::alloc::string::String, /// Required. User information. #[prost(message, optional, tag = "2")] pub user_info: ::core::option::Option<UserInfo>, /// Optional. User event detailed information common across different /// recommendation types. #[prost(message, optional, tag = "3")] pub event_detail: ::core::option::Option<EventDetail>, /// Optional. Retail product specific user event metadata. /// /// This field is required for the following event types: /// /// * `add-to-cart` /// * `add-to-list` /// * `category-page-view` /// * `checkout-start` /// * `detail-page-view` /// * `purchase-complete` /// * `refund` /// * `remove-from-cart` /// * `remove-from-list` /// * `search` /// /// This field is optional for the following event types: /// /// * `page-visit` /// * `shopping-cart-page-view` - note that 'product_event_detail' should be /// set for this unless the shopping cart is empty. /// /// This field is not allowed for the following event types: /// /// * `home-page-view` #[prost(message, optional, tag = "4")] pub product_event_detail: ::core::option::Option<ProductEventDetail>, /// Optional. Only required for ImportUserEvents method. Timestamp of user /// event created. #[prost(message, optional, tag = "5")] pub event_time: ::core::option::Option<::prost_types::Timestamp>, /// Optional. This field should *not* be set when using JavaScript pixel /// or the Recommendations AI Tag. Defaults to `EVENT_SOURCE_UNSPECIFIED`. #[prost(enumeration = "user_event::EventSource", tag = "6")] pub event_source: i32, } /// Nested message and enum types in `UserEvent`. pub mod user_event { /// User event source. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum EventSource { /// Unspecified event source. Unspecified = 0, /// The event is ingested via a javascript pixel or Recommendations AI Tag /// through automl datalayer or JS Macros. Automl = 1, /// The event is ingested via Recommendations AI Tag through Enhanced /// Ecommerce datalayer. Ecommerce = 2, /// The event is ingested via Import user events API. BatchUpload = 3, } } /// Information of end users. #[derive(Clone, PartialEq, ::prost::Message)] pub struct UserInfo { /// Required. A unique identifier for tracking visitors with a length limit of /// 128 bytes. /// /// For example, this could be implemented with a http cookie, which should be /// able to uniquely identify a visitor on a single device. This unique /// identifier should not change if the visitor log in/out of the website. /// Maximum length 128 bytes. Cannot be empty. #[prost(string, tag = "1")] pub visitor_id: ::prost::alloc::string::String, /// Optional. Unique identifier for logged-in user with a length limit of 128 /// bytes. Required only for logged-in users. #[prost(string, tag = "2")] pub user_id: ::prost::alloc::string::String, /// Optional. IP address of the user. This could be either IPv4 (e.g. 104.133.9.80) or /// IPv6 (e.g. 2001:0db8:85a3:0000:0000:8a2e:0370:7334). This should *not* be /// set when using the javascript pixel or if `direct_user_request` is set. /// Used to extract location information for personalization. #[prost(string, tag = "3")] pub ip_address: ::prost::alloc::string::String, /// Optional. User agent as included in the HTTP header. UTF-8 encoded string /// with a length limit of 1 KiB. /// /// This should *not* be set when using the JavaScript pixel or if /// `directUserRequest` is set. #[prost(string, tag = "4")] pub user_agent: ::prost::alloc::string::String, /// Optional. Indicates if the request is made directly from the end user /// in which case the user_agent and ip_address fields can be populated /// from the HTTP request. This should *not* be set when using the javascript /// pixel. This flag should be set only if the API request is made directly /// from the end user such as a mobile app (and not if a gateway or a server is /// processing and pushing the user events). #[prost(bool, tag = "5")] pub direct_user_request: bool, } /// User event details shared by all recommendation types. #[derive(Clone, PartialEq, ::prost::Message)] pub struct EventDetail { /// Optional. Complete url (window.location.href) of the user's current page. /// When using the JavaScript pixel, this value is filled in automatically. /// Maximum length 5KB. #[prost(string, tag = "1")] pub uri: ::prost::alloc::string::String, /// Optional. The referrer url of the current page. When using /// the JavaScript pixel, this value is filled in automatically. #[prost(string, tag = "6")] pub referrer_uri: ::prost::alloc::string::String, /// Optional. A unique id of a web page view. /// This should be kept the same for all user events triggered from the same /// pageview. For example, an item detail page view could trigger multiple /// events as the user is browsing the page. /// The `pageViewId` property should be kept the same for all these events so /// that they can be grouped together properly. This `pageViewId` will be /// automatically generated if using the JavaScript pixel. #[prost(string, tag = "2")] pub page_view_id: ::prost::alloc::string::String, /// Optional. A list of identifiers for the independent experiment groups /// this user event belongs to. This is used to distinguish between user events /// associated with different experiment setups (e.g. using Recommendation /// Engine system, using different recommendation models). #[prost(string, repeated, tag = "3")] pub experiment_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// Optional. Recommendation token included in the recommendation prediction /// response. /// /// This field enables accurate attribution of recommendation model /// performance. /// /// This token enables us to accurately attribute page view or purchase back to /// the event and the particular predict response containing this /// clicked/purchased item. If user clicks on product K in the recommendation /// results, pass the `PredictResponse.recommendationToken` property as a url /// parameter to product K's page. When recording events on product K's page, /// log the PredictResponse.recommendation_token to this field. /// /// Optional, but highly encouraged for user events that are the result of a /// recommendation prediction query. #[prost(string, tag = "4")] pub recommendation_token: ::prost::alloc::string::String, /// Optional. Extra user event features to include in the recommendation /// model. /// /// For product recommendation, an example of extra user information is /// traffic_channel, i.e. how user arrives at the site. Users can arrive /// at the site by coming to the site directly, or coming through Google /// search, and etc. #[prost(message, optional, tag = "5")] pub event_attributes: ::core::option::Option<FeatureMap>, } /// ProductEventDetail captures user event information specific to retail /// products. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ProductEventDetail { /// Required for `search` events. Other event types should not set this field. /// The user's search query as UTF-8 encoded text with a length limit of 5 KiB. #[prost(string, tag = "1")] pub search_query: ::prost::alloc::string::String, /// Required for `category-page-view` events. Other event types should not set /// this field. /// The categories associated with a category page. /// Category pages include special pages such as sales or promotions. For /// instance, a special sale page may have the category hierarchy: /// categories : ["Sales", "2017 Black Friday Deals"]. #[prost(message, repeated, tag = "2")] pub page_categories: ::prost::alloc::vec::Vec<catalog_item::CategoryHierarchy>, /// The main product details related to the event. /// /// This field is required for the following event types: /// /// * `add-to-cart` /// * `add-to-list` /// * `checkout-start` /// * `detail-page-view` /// * `purchase-complete` /// * `refund` /// * `remove-from-cart` /// * `remove-from-list` /// /// This field is optional for the following event types: /// /// * `page-visit` /// * `shopping-cart-page-view` - note that 'product_details' should be set for /// this unless the shopping cart is empty. /// /// This field is not allowed for the following event types: /// /// * `category-page-view` /// * `home-page-view` /// * `search` #[prost(message, repeated, tag = "3")] pub product_details: ::prost::alloc::vec::Vec<ProductDetail>, /// Required for `add-to-list` and `remove-from-list` events. The id or name of /// the list that the item is being added to or removed from. Other event types /// should not set this field. #[prost(string, tag = "4")] pub list_id: ::prost::alloc::string::String, /// Optional. The id or name of the associated shopping cart. This id is used /// to associate multiple items added or present in the cart before purchase. /// /// This can only be set for `add-to-cart`, `remove-from-cart`, /// `checkout-start`, `purchase-complete`, or `shopping-cart-page-view` events. #[prost(string, tag = "5")] pub cart_id: ::prost::alloc::string::String, /// Optional. A transaction represents the entire purchase transaction. /// Required for `purchase-complete` events. Optional for `checkout-start` /// events. Other event types should not set this field. #[prost(message, optional, tag = "6")] pub purchase_transaction: ::core::option::Option<PurchaseTransaction>, } /// A transaction represents the entire purchase transaction. #[derive(Clone, PartialEq, ::prost::Message)] pub struct PurchaseTransaction { /// Optional. The transaction ID with a length limit of 128 bytes. #[prost(string, tag = "1")] pub id: ::prost::alloc::string::String, /// Required. Total revenue or grand total associated with the transaction. /// This value include shipping, tax, or other adjustments to total revenue /// that you want to include as part of your revenue calculations. This field /// is not required if the event type is `refund`. #[prost(float, tag = "2")] pub revenue: f32, /// Optional. All the taxes associated with the transaction. #[prost(map = "string, float", tag = "3")] pub taxes: ::std::collections::HashMap<::prost::alloc::string::String, f32>, /// Optional. All the costs associated with the product. These can be /// manufacturing costs, shipping expenses not borne by the end user, or any /// other costs. /// /// Total product cost such that /// profit = revenue - (sum(taxes) + sum(costs)) /// If product_cost is not set, then /// profit = revenue - tax - shipping - sum(CatalogItem.costs). /// /// If CatalogItem.cost is not specified for one of the items, CatalogItem.cost /// based profit *cannot* be calculated for this Transaction. #[prost(map = "string, float", tag = "4")] pub costs: ::std::collections::HashMap<::prost::alloc::string::String, f32>, /// Required. Currency code. Use three-character ISO-4217 code. This field /// is not required if the event type is `refund`. #[prost(string, tag = "6")] pub currency_code: ::prost::alloc::string::String, } /// Detailed product information associated with a user event. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ProductDetail { /// Required. Catalog item ID. UTF-8 encoded string with a length limit of 128 /// characters. #[prost(string, tag = "1")] pub id: ::prost::alloc::string::String, /// Optional. Currency code for price/costs. Use three-character ISO-4217 /// code. Required only if originalPrice or displayPrice is set. #[prost(string, tag = "2")] pub currency_code: ::prost::alloc::string::String, /// Optional. Original price of the product. If provided, this will override /// the original price in Catalog for this product. #[prost(float, tag = "3")] pub original_price: f32, /// Optional. Display price of the product (e.g. discounted price). If /// provided, this will override the display price in Catalog for this product. #[prost(float, tag = "4")] pub display_price: f32, /// Optional. Item stock state. If provided, this overrides the stock state /// in Catalog for items in this event. #[prost(enumeration = "product_catalog_item::StockState", tag = "5")] pub stock_state: i32, /// Optional. Quantity of the product associated with the user event. For /// example, this field will be 2 if two products are added to the shopping /// cart for `add-to-cart` event. Required for `add-to-cart`, `add-to-list`, /// `remove-from-cart`, `checkout-start`, `purchase-complete`, `refund` event /// types. #[prost(int32, tag = "6")] pub quantity: i32, /// Optional. Quantity of the products in stock when a user event happens. /// Optional. If provided, this overrides the available quantity in Catalog for /// this event. and can only be set if `stock_status` is set to `IN_STOCK`. /// /// Note that if an item is out of stock, you must set the `stock_state` field /// to be `OUT_OF_STOCK`. Leaving this field unspecified / as zero is not /// sufficient to mark the item out of stock. #[prost(int32, tag = "7")] pub available_quantity: i32, /// Optional. Extra features associated with a product in the user event. #[prost(message, optional, tag = "8")] pub item_attributes: ::core::option::Option<FeatureMap>, } /// Google Cloud Storage location for input content. /// format. #[derive(Clone, PartialEq, ::prost::Message)] pub struct GcsSource { /// Required. Google Cloud Storage URIs to input files. URI can be up to /// 2000 characters long. URIs can match the full object path (for example, /// `gs://bucket/directory/object.json`) or a pattern matching one or more /// files, such as `gs://bucket/directory/*.json`. A request can /// contain at most 100 files, and each file can be up to 2 GB. See /// [Importing catalog information](/recommendations-ai/docs/upload-catalog) /// for the expected file format and setup instructions. #[prost(string, repeated, tag = "1")] pub input_uris: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// The inline source for the input config for ImportCatalogItems method. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CatalogInlineSource { /// Optional. A list of catalog items to update/create. Recommended max of 10k /// items. #[prost(message, repeated, tag = "1")] pub catalog_items: ::prost::alloc::vec::Vec<CatalogItem>, } /// The inline source for the input config for ImportUserEvents method. #[derive(Clone, PartialEq, ::prost::Message)] pub struct UserEventInlineSource { /// Optional. A list of user events to import. Recommended max of 10k items. #[prost(message, repeated, tag = "1")] pub user_events: ::prost::alloc::vec::Vec<UserEvent>, } /// Configuration of destination for Import related errors. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ImportErrorsConfig { /// Required. Errors destination. #[prost(oneof = "import_errors_config::Destination", tags = "1")] pub destination: ::core::option::Option<import_errors_config::Destination>, } /// Nested message and enum types in `ImportErrorsConfig`. pub mod import_errors_config { /// Required. Errors destination. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Destination { /// Google Cloud Storage path for import errors. This must be an empty, /// existing Cloud Storage bucket. Import errors will be written to a file in /// this bucket, one per line, as a JSON-encoded /// `google.rpc.Status` message. #[prost(string, tag = "1")] GcsPrefix(::prost::alloc::string::String), } } /// Request message for Import methods. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ImportCatalogItemsRequest { /// Required. `projects/1234/locations/global/catalogs/default_catalog` #[prost(string, tag = "1")] pub parent: ::prost::alloc::string::String, /// Optional. Unique identifier provided by client, within the ancestor /// dataset scope. Ensures idempotency and used for request deduplication. /// Server-generated if unspecified. Up to 128 characters long. This is /// returned as google.longrunning.Operation.name in the response. #[prost(string, tag = "2")] pub request_id: ::prost::alloc::string::String, /// Required. The desired input location of the data. #[prost(message, optional, tag = "3")] pub input_config: ::core::option::Option<InputConfig>, /// Optional. The desired location of errors incurred during the Import. #[prost(message, optional, tag = "4")] pub errors_config: ::core::option::Option<ImportErrorsConfig>, } /// Request message for the ImportUserEvents request. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ImportUserEventsRequest { /// Required. /// `projects/1234/locations/global/catalogs/default_catalog/eventStores/default_event_store` #[prost(string, tag = "1")] pub parent: ::prost::alloc::string::String, /// Optional. Unique identifier provided by client, within the ancestor /// dataset scope. Ensures idempotency for expensive long running operations. /// Server-generated if unspecified. Up to 128 characters long. This is /// returned as google.longrunning.Operation.name in the response. Note that /// this field must not be set if the desired input config is /// catalog_inline_source. #[prost(string, tag = "2")] pub request_id: ::prost::alloc::string::String, /// Required. The desired input location of the data. #[prost(message, optional, tag = "3")] pub input_config: ::core::option::Option<InputConfig>, /// Optional. The desired location of errors incurred during the Import. #[prost(message, optional, tag = "4")] pub errors_config: ::core::option::Option<ImportErrorsConfig>, } /// The input config source. #[derive(Clone, PartialEq, ::prost::Message)] pub struct InputConfig { /// Required. The source of the input. #[prost(oneof = "input_config::Source", tags = "1, 2, 3")] pub source: ::core::option::Option<input_config::Source>, } /// Nested message and enum types in `InputConfig`. pub mod input_config { /// Required. The source of the input. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Source { /// The Inline source for the input content for Catalog items. #[prost(message, tag = "1")] CatalogInlineSource(super::CatalogInlineSource), /// Google Cloud Storage location for the input content. #[prost(message, tag = "2")] GcsSource(super::GcsSource), /// The Inline source for the input content for UserEvents. #[prost(message, tag = "3")] UserEventInlineSource(super::UserEventInlineSource), } } /// Metadata related to the progress of the Import operation. This will be /// returned by the google.longrunning.Operation.metadata field. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ImportMetadata { /// Name of the operation. #[prost(string, tag = "5")] pub operation_name: ::prost::alloc::string::String, /// Id of the request / operation. This is parroting back the requestId that /// was passed in the request. #[prost(string, tag = "3")] pub request_id: ::prost::alloc::string::String, /// Operation create time. #[prost(message, optional, tag = "4")] pub create_time: ::core::option::Option<::prost_types::Timestamp>, /// Count of entries that were processed successfully. #[prost(int64, tag = "1")] pub success_count: i64, /// Count of entries that encountered errors while processing. #[prost(int64, tag = "2")] pub failure_count: i64, /// Operation last update time. If the operation is done, this is also the /// finish time. #[prost(message, optional, tag = "6")] pub update_time: ::core::option::Option<::prost_types::Timestamp>, } /// Response of the ImportCatalogItemsRequest. If the long running /// operation is done, then this message is returned by the /// google.longrunning.Operations.response field if the operation was successful. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ImportCatalogItemsResponse { /// A sample of errors encountered while processing the request. #[prost(message, repeated, tag = "1")] pub error_samples: ::prost::alloc::vec::Vec<super::super::super::rpc::Status>, /// Echoes the destination for the complete errors in the request if set. #[prost(message, optional, tag = "2")] pub errors_config: ::core::option::Option<ImportErrorsConfig>, } /// Response of the ImportUserEventsRequest. If the long running /// operation was successful, then this message is returned by the /// google.longrunning.Operations.response field if the operation was successful. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ImportUserEventsResponse { /// A sample of errors encountered while processing the request. #[prost(message, repeated, tag = "1")] pub error_samples: ::prost::alloc::vec::Vec<super::super::super::rpc::Status>, /// Echoes the destination for the complete errors if this field was set in /// the request. #[prost(message, optional, tag = "2")] pub errors_config: ::core::option::Option<ImportErrorsConfig>, /// Aggregated statistics of user event import status. #[prost(message, optional, tag = "3")] pub import_summary: ::core::option::Option<UserEventImportSummary>, } /// A summary of import result. The UserEventImportSummary summarizes /// the import status for user events. #[derive(Clone, PartialEq, ::prost::Message)] pub struct UserEventImportSummary { /// Count of user events imported with complete existing catalog information. #[prost(int64, tag = "1")] pub joined_events_count: i64, /// Count of user events imported, but with catalog information not found /// in the imported catalog. #[prost(int64, tag = "2")] pub unjoined_events_count: i64, } /// Request message for CreateCatalogItem method. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CreateCatalogItemRequest { /// Required. The parent catalog resource name, such as /// `projects/*/locations/global/catalogs/default_catalog`. #[prost(string, tag = "1")] pub parent: ::prost::alloc::string::String, /// Required. The catalog item to create. #[prost(message, optional, tag = "2")] pub catalog_item: ::core::option::Option<CatalogItem>, } /// Request message for GetCatalogItem method. #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetCatalogItemRequest { /// Required. Full resource name of catalog item, such as /// `projects/*/locations/global/catalogs/default_catalog/catalogitems/some_catalog_item_id`. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, } /// Request message for ListCatalogItems method. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListCatalogItemsRequest { /// Required. The parent catalog resource name, such as /// `projects/*/locations/global/catalogs/default_catalog`. #[prost(string, tag = "1")] pub parent: ::prost::alloc::string::String, /// Optional. Maximum number of results to return per page. If zero, the /// service will choose a reasonable default. #[prost(int32, tag = "2")] pub page_size: i32, /// Optional. The previous ListCatalogItemsResponse.next_page_token. #[prost(string, tag = "3")] pub page_token: ::prost::alloc::string::String, /// Optional. A filter to apply on the list results. #[prost(string, tag = "4")] pub filter: ::prost::alloc::string::String, } /// Response message for ListCatalogItems method. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListCatalogItemsResponse { /// The catalog items. #[prost(message, repeated, tag = "1")] pub catalog_items: ::prost::alloc::vec::Vec<CatalogItem>, /// If empty, the list is complete. If nonempty, the token to pass to the next /// request's ListCatalogItemRequest.page_token. #[prost(string, tag = "2")] pub next_page_token: ::prost::alloc::string::String, } /// Request message for UpdateCatalogItem method. #[derive(Clone, PartialEq, ::prost::Message)] pub struct UpdateCatalogItemRequest { /// Required. Full resource name of catalog item, such as /// "projects/*/locations/global/catalogs/default_catalog/catalogItems/some_catalog_item_id". #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// Required. The catalog item to update/create. The 'catalog_item_id' field /// has to match that in the 'name'. #[prost(message, optional, tag = "2")] pub catalog_item: ::core::option::Option<CatalogItem>, /// Optional. Indicates which fields in the provided 'item' to update. If not /// set, will by default update all fields. #[prost(message, optional, tag = "3")] pub update_mask: ::core::option::Option<::prost_types::FieldMask>, } /// Request message for DeleteCatalogItem method. #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeleteCatalogItemRequest { /// Required. Full resource name of catalog item, such as /// `projects/*/locations/global/catalogs/default_catalog/catalogItems/some_catalog_item_id`. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, } #[doc = r" Generated client implementations."] pub mod catalog_service_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; #[doc = " Service for ingesting catalog information of the customer's website."] #[derive(Debug, Clone)] pub struct CatalogServiceClient<T> { inner: tonic::client::Grpc<T>, } impl<T> CatalogServiceClient<T> where T: tonic::client::GrpcService<tonic::body::BoxBody>, T::ResponseBody: Body + Send + Sync + 'static, T::Error: Into<StdError>, <T::ResponseBody as Body>::Error: Into<StdError> + Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); Self { inner } } pub fn with_interceptor<F>( inner: T, interceptor: F, ) -> CatalogServiceClient<InterceptedService<T, F>> where F: FnMut(tonic::Request<()>) -> Result<tonic::Request<()>, tonic::Status>, T: tonic::codegen::Service< http::Request<tonic::body::BoxBody>, Response = http::Response< <T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody, >, >, <T as tonic::codegen::Service<http::Request<tonic::body::BoxBody>>>::Error: Into<StdError> + Send + Sync, { CatalogServiceClient::new(InterceptedService::new(inner, interceptor)) } #[doc = r" Compress requests with `gzip`."] #[doc = r""] #[doc = r" This requires the server to support it otherwise it might respond with an"] #[doc = r" error."] pub fn send_gzip(mut self) -> Self { self.inner = self.inner.send_gzip(); self } #[doc = r" Enable decompressing responses with `gzip`."] pub fn accept_gzip(mut self) -> Self { self.inner = self.inner.accept_gzip(); self } #[doc = " Creates a catalog item."] pub async fn create_catalog_item( &mut self, request: impl tonic::IntoRequest<super::CreateCatalogItemRequest>, ) -> Result<tonic::Response<super::CatalogItem>, tonic::Status> { self.inner.ready().await.map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.recommendationengine.v1beta1.CatalogService/CreateCatalogItem", ); self.inner.unary(request.into_request(), path, codec).await } #[doc = " Gets a specific catalog item."] pub async fn get_catalog_item( &mut self, request: impl tonic::IntoRequest<super::GetCatalogItemRequest>, ) -> Result<tonic::Response<super::CatalogItem>, tonic::Status> { self.inner.ready().await.map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.recommendationengine.v1beta1.CatalogService/GetCatalogItem", ); self.inner.unary(request.into_request(), path, codec).await } #[doc = " Gets a list of catalog items."] pub async fn list_catalog_items( &mut self, request: impl tonic::IntoRequest<super::ListCatalogItemsRequest>, ) -> Result<tonic::Response<super::ListCatalogItemsResponse>, tonic::Status> { self.inner.ready().await.map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.recommendationengine.v1beta1.CatalogService/ListCatalogItems", ); self.inner.unary(request.into_request(), path, codec).await } #[doc = " Updates a catalog item. Partial updating is supported. Non-existing"] #[doc = " items will be created."] pub async fn update_catalog_item( &mut self, request: impl tonic::IntoRequest<super::UpdateCatalogItemRequest>, ) -> Result<tonic::Response<super::CatalogItem>, tonic::Status> { self.inner.ready().await.map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.recommendationengine.v1beta1.CatalogService/UpdateCatalogItem", ); self.inner.unary(request.into_request(), path, codec).await } #[doc = " Deletes a catalog item."] pub async fn delete_catalog_item( &mut self, request: impl tonic::IntoRequest<super::DeleteCatalogItemRequest>, ) -> Result<tonic::Response<()>, tonic::Status> { self.inner.ready().await.map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.recommendationengine.v1beta1.CatalogService/DeleteCatalogItem", ); self.inner.unary(request.into_request(), path, codec).await } #[doc = " Bulk import of multiple catalog items. Request processing may be"] #[doc = " synchronous. No partial updating supported. Non-existing items will be"] #[doc = " created."] #[doc = ""] #[doc = " Operation.response is of type ImportResponse. Note that it is"] #[doc = " possible for a subset of the items to be successfully updated."] pub async fn import_catalog_items( &mut self, request: impl tonic::IntoRequest<super::ImportCatalogItemsRequest>, ) -> Result< tonic::Response<super::super::super::super::longrunning::Operation>, tonic::Status, > { self.inner.ready().await.map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.recommendationengine.v1beta1.CatalogService/ImportCatalogItems", ); self.inner.unary(request.into_request(), path, codec).await } } } /// Registered Api Key. #[derive(Clone, PartialEq, ::prost::Message)] pub struct PredictionApiKeyRegistration { /// The API key. #[prost(string, tag = "1")] pub api_key: ::prost::alloc::string::String, } /// Request message for the `CreatePredictionApiKeyRegistration` method. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CreatePredictionApiKeyRegistrationRequest { /// Required. The parent resource path. /// `projects/*/locations/global/catalogs/default_catalog/eventStores/default_event_store`. #[prost(string, tag = "1")] pub parent: ::prost::alloc::string::String, /// Required. The prediction API key registration. #[prost(message, optional, tag = "2")] pub prediction_api_key_registration: ::core::option::Option<PredictionApiKeyRegistration>, } /// Request message for the `ListPredictionApiKeyRegistrations`. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListPredictionApiKeyRegistrationsRequest { /// Required. The parent placement resource name such as /// `projects/1234/locations/global/catalogs/default_catalog/eventStores/default_event_store` #[prost(string, tag = "1")] pub parent: ::prost::alloc::string::String, /// Optional. Maximum number of results to return per page. If unset, the /// service will choose a reasonable default. #[prost(int32, tag = "2")] pub page_size: i32, /// Optional. The previous `ListPredictionApiKeyRegistration.nextPageToken`. #[prost(string, tag = "3")] pub page_token: ::prost::alloc::string::String, } /// Response message for the `ListPredictionApiKeyRegistrations`. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListPredictionApiKeyRegistrationsResponse { /// The list of registered API keys. #[prost(message, repeated, tag = "1")] pub prediction_api_key_registrations: ::prost::alloc::vec::Vec<PredictionApiKeyRegistration>, /// If empty, the list is complete. If nonempty, pass the token to the next /// request's `ListPredictionApiKeysRegistrationsRequest.pageToken`. #[prost(string, tag = "2")] pub next_page_token: ::prost::alloc::string::String, } /// Request message for `DeletePredictionApiKeyRegistration` method. #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeletePredictionApiKeyRegistrationRequest { /// Required. The API key to unregister including full resource path. /// `projects/*/locations/global/catalogs/default_catalog/eventStores/default_event_store/predictionApiKeyRegistrations/<YOUR_API_KEY>` #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, } #[doc = r" Generated client implementations."] pub mod prediction_api_key_registry_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; #[doc = " Service for registering API keys for use with the `predict` method. If you"] #[doc = " use an API key to request predictions, you must first register the API key."] #[doc = " Otherwise, your prediction request is rejected. If you use OAuth to"] #[doc = " authenticate your `predict` method call, you do not need to register an API"] #[doc = " key. You can register up to 20 API keys per project."] #[derive(Debug, Clone)] pub struct PredictionApiKeyRegistryClient<T> { inner: tonic::client::Grpc<T>, } impl<T> PredictionApiKeyRegistryClient<T> where T: tonic::client::GrpcService<tonic::body::BoxBody>, T::ResponseBody: Body + Send + Sync + 'static, T::Error: Into<StdError>, <T::ResponseBody as Body>::Error: Into<StdError> + Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); Self { inner } } pub fn with_interceptor<F>( inner: T, interceptor: F, ) -> PredictionApiKeyRegistryClient<InterceptedService<T, F>> where F: FnMut(tonic::Request<()>) -> Result<tonic::Request<()>, tonic::Status>, T: tonic::codegen::Service< http::Request<tonic::body::BoxBody>, Response = http::Response< <T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody, >, >, <T as tonic::codegen::Service<http::Request<tonic::body::BoxBody>>>::Error: Into<StdError> + Send + Sync, { PredictionApiKeyRegistryClient::new(InterceptedService::new(inner, interceptor)) } #[doc = r" Compress requests with `gzip`."] #[doc = r""] #[doc = r" This requires the server to support it otherwise it might respond with an"] #[doc = r" error."] pub fn send_gzip(mut self) -> Self { self.inner = self.inner.send_gzip(); self } #[doc = r" Enable decompressing responses with `gzip`."] pub fn accept_gzip(mut self) -> Self { self.inner = self.inner.accept_gzip(); self } #[doc = " Register an API key for use with predict method."] pub async fn create_prediction_api_key_registration( &mut self, request: impl tonic::IntoRequest<super::CreatePredictionApiKeyRegistrationRequest>, ) -> Result<tonic::Response<super::PredictionApiKeyRegistration>, tonic::Status> { self.inner.ready().await.map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http :: uri :: PathAndQuery :: from_static ("/google.cloud.recommendationengine.v1beta1.PredictionApiKeyRegistry/CreatePredictionApiKeyRegistration") ; self.inner.unary(request.into_request(), path, codec).await } #[doc = " List the registered apiKeys for use with predict method."] pub async fn list_prediction_api_key_registrations( &mut self, request: impl tonic::IntoRequest<super::ListPredictionApiKeyRegistrationsRequest>, ) -> Result<tonic::Response<super::ListPredictionApiKeyRegistrationsResponse>, tonic::Status> { self.inner.ready().await.map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http :: uri :: PathAndQuery :: from_static ("/google.cloud.recommendationengine.v1beta1.PredictionApiKeyRegistry/ListPredictionApiKeyRegistrations") ; self.inner.unary(request.into_request(), path, codec).await } #[doc = " Unregister an apiKey from using for predict method."] pub async fn delete_prediction_api_key_registration( &mut self, request: impl tonic::IntoRequest<super::DeletePredictionApiKeyRegistrationRequest>, ) -> Result<tonic::Response<()>, tonic::Status> { self.inner.ready().await.map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http :: uri :: PathAndQuery :: from_static ("/google.cloud.recommendationengine.v1beta1.PredictionApiKeyRegistry/DeletePredictionApiKeyRegistration") ; self.inner.unary(request.into_request(), path, codec).await } } } /// Request message for Predict method. #[derive(Clone, PartialEq, ::prost::Message)] pub struct PredictRequest { /// Required. Full resource name of the format: /// `{name=projects/*/locations/global/catalogs/default_catalog/eventStores/default_event_store/placements/*}` /// The id of the recommendation engine placement. This id is used to identify /// the set of models that will be used to make the prediction. /// /// We currently support three placements with the following IDs by default: /// /// * `shopping_cart`: Predicts items frequently bought together with one or /// more catalog items in the same shopping session. Commonly displayed after /// `add-to-cart` events, on product detail pages, or on the shopping cart /// page. /// /// * `home_page`: Predicts the next product that a user will most likely /// engage with or purchase based on the shopping or viewing history of the /// specified `userId` or `visitorId`. For example - Recommendations for you. /// /// * `product_detail`: Predicts the next product that a user will most likely /// engage with or purchase. The prediction is based on the shopping or /// viewing history of the specified `userId` or `visitorId` and its /// relevance to a specified `CatalogItem`. Typically used on product detail /// pages. For example - More items like this. /// /// * `recently_viewed_default`: Returns up to 75 items recently viewed by the /// specified `userId` or `visitorId`, most recent ones first. Returns /// nothing if neither of them has viewed any items yet. For example - /// Recently viewed. /// /// The full list of available placements can be seen at /// https://console.cloud.google.com/recommendation/datafeeds/default_catalog/dashboard #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// Required. Context about the user, what they are looking at and what action /// they took to trigger the predict request. Note that this user event detail /// won't be ingested to userEvent logs. Thus, a separate userEvent write /// request is required for event logging. #[prost(message, optional, tag = "2")] pub user_event: ::core::option::Option<UserEvent>, /// Optional. Maximum number of results to return per page. Set this property /// to the number of prediction results required. If zero, the service will /// choose a reasonable default. #[prost(int32, tag = "7")] pub page_size: i32, /// Optional. The previous PredictResponse.next_page_token. #[prost(string, tag = "8")] pub page_token: ::prost::alloc::string::String, /// Optional. Filter for restricting prediction results. Accepts values for /// tags and the `filterOutOfStockItems` flag. /// /// * Tag expressions. Restricts predictions to items that match all of the /// specified tags. Boolean operators `OR` and `NOT` are supported if the /// expression is enclosed in parentheses, and must be separated from the /// tag values by a space. `-"tagA"` is also supported and is equivalent to /// `NOT "tagA"`. Tag values must be double quoted UTF-8 encoded strings /// with a size limit of 1 KiB. /// /// * filterOutOfStockItems. Restricts predictions to items that do not have a /// stockState value of OUT_OF_STOCK. /// /// Examples: /// /// * tag=("Red" OR "Blue") tag="New-Arrival" tag=(NOT "promotional") /// * filterOutOfStockItems tag=(-"promotional") /// * filterOutOfStockItems #[prost(string, tag = "3")] pub filter: ::prost::alloc::string::String, /// Optional. Use dryRun mode for this prediction query. If set to true, a /// dummy model will be used that returns arbitrary catalog items. /// Note that the dryRun mode should only be used for testing the API, or if /// the model is not ready. #[prost(bool, tag = "4")] pub dry_run: bool, /// Optional. Additional domain specific parameters for the predictions. /// /// Allowed values: /// /// * `returnCatalogItem`: Boolean. If set to true, the associated catalogItem /// object will be returned in the /// `PredictResponse.PredictionResult.itemMetadata` object in the method /// response. /// * `returnItemScore`: Boolean. If set to true, the prediction 'score' /// corresponding to each returned item will be set in the `metadata` /// field in the prediction response. The given 'score' indicates the /// probability of an item being clicked/purchased given the user's context /// and history. #[prost(map = "string, message", tag = "6")] pub params: ::std::collections::HashMap<::prost::alloc::string::String, ::prost_types::Value>, /// Optional. The labels for the predict request. /// /// * Label keys can contain lowercase letters, digits and hyphens, must start /// with a letter, and must end with a letter or digit. /// * Non-zero label values can contain lowercase letters, digits and hyphens, /// must start with a letter, and must end with a letter or digit. /// * No more than 64 labels can be associated with a given request. /// /// See https://goo.gl/xmQnxf for more information on and examples of labels. #[prost(map = "string, string", tag = "9")] pub labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, } /// Response message for predict method. #[derive(Clone, PartialEq, ::prost::Message)] pub struct PredictResponse { /// A list of recommended items. The order represents the ranking (from the /// most relevant item to the least). #[prost(message, repeated, tag = "1")] pub results: ::prost::alloc::vec::Vec<predict_response::PredictionResult>, /// A unique recommendation token. This should be included in the user event /// logs resulting from this recommendation, which enables accurate attribution /// of recommendation model performance. #[prost(string, tag = "2")] pub recommendation_token: ::prost::alloc::string::String, /// IDs of items in the request that were missing from the catalog. #[prost(string, repeated, tag = "3")] pub items_missing_in_catalog: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// True if the dryRun property was set in the request. #[prost(bool, tag = "4")] pub dry_run: bool, /// Additional domain specific prediction response metadata. #[prost(map = "string, message", tag = "5")] pub metadata: ::std::collections::HashMap<::prost::alloc::string::String, ::prost_types::Value>, /// If empty, the list is complete. If nonempty, the token to pass to the next /// request's PredictRequest.page_token. #[prost(string, tag = "6")] pub next_page_token: ::prost::alloc::string::String, } /// Nested message and enum types in `PredictResponse`. pub mod predict_response { /// PredictionResult represents the recommendation prediction results. #[derive(Clone, PartialEq, ::prost::Message)] pub struct PredictionResult { /// ID of the recommended catalog item #[prost(string, tag = "1")] pub id: ::prost::alloc::string::String, /// Additional item metadata / annotations. /// /// Possible values: /// /// * `catalogItem`: JSON representation of the catalogItem. Will be set if /// `returnCatalogItem` is set to true in `PredictRequest.params`. /// * `score`: Prediction score in double value. Will be set if /// `returnItemScore` is set to true in `PredictRequest.params`. #[prost(map = "string, message", tag = "2")] pub item_metadata: ::std::collections::HashMap<::prost::alloc::string::String, ::prost_types::Value>, } } #[doc = r" Generated client implementations."] pub mod prediction_service_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; #[doc = " Service for making recommendation prediction."] #[derive(Debug, Clone)] pub struct PredictionServiceClient<T> { inner: tonic::client::Grpc<T>, } impl<T> PredictionServiceClient<T> where T: tonic::client::GrpcService<tonic::body::BoxBody>, T::ResponseBody: Body + Send + Sync + 'static, T::Error: Into<StdError>, <T::ResponseBody as Body>::Error: Into<StdError> + Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); Self { inner } } pub fn with_interceptor<F>( inner: T, interceptor: F, ) -> PredictionServiceClient<InterceptedService<T, F>> where F: FnMut(tonic::Request<()>) -> Result<tonic::Request<()>, tonic::Status>, T: tonic::codegen::Service< http::Request<tonic::body::BoxBody>, Response = http::Response< <T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody, >, >, <T as tonic::codegen::Service<http::Request<tonic::body::BoxBody>>>::Error: Into<StdError> + Send + Sync, { PredictionServiceClient::new(InterceptedService::new(inner, interceptor)) } #[doc = r" Compress requests with `gzip`."] #[doc = r""] #[doc = r" This requires the server to support it otherwise it might respond with an"] #[doc = r" error."] pub fn send_gzip(mut self) -> Self { self.inner = self.inner.send_gzip(); self } #[doc = r" Enable decompressing responses with `gzip`."] pub fn accept_gzip(mut self) -> Self { self.inner = self.inner.accept_gzip(); self } #[doc = " Makes a recommendation prediction. If using API Key based authentication,"] #[doc = " the API Key must be registered using the"] #[doc = " [PredictionApiKeyRegistry][google.cloud.recommendationengine.v1beta1.PredictionApiKeyRegistry]"] #[doc = " service. [Learn more](/recommendations-ai/docs/setting-up#register-key)."] pub async fn predict( &mut self, request: impl tonic::IntoRequest<super::PredictRequest>, ) -> Result<tonic::Response<super::PredictResponse>, tonic::Status> { self.inner.ready().await.map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.recommendationengine.v1beta1.PredictionService/Predict", ); self.inner.unary(request.into_request(), path, codec).await } } } /// Request message for PurgeUserEvents method. #[derive(Clone, PartialEq, ::prost::Message)] pub struct PurgeUserEventsRequest { /// Required. The resource name of the event_store under which the events are /// created. The format is /// `projects/${projectId}/locations/global/catalogs/${catalogId}/eventStores/${eventStoreId}` #[prost(string, tag = "1")] pub parent: ::prost::alloc::string::String, /// Required. The filter string to specify the events to be deleted. Empty /// string filter is not allowed. This filter can also be used with /// ListUserEvents API to list events that will be deleted. The eligible fields /// for filtering are: /// * eventType - UserEvent.eventType field of type string. /// * eventTime - in ISO 8601 "zulu" format. /// * visitorId - field of type string. Specifying this will delete all events /// associated with a visitor. /// * userId - field of type string. Specifying this will delete all events /// associated with a user. /// Example 1: Deleting all events in a time range. /// `eventTime > "2012-04-23T18:25:43.511Z" eventTime < /// "2012-04-23T18:30:43.511Z"` /// Example 2: Deleting specific eventType in time range. /// `eventTime > "2012-04-23T18:25:43.511Z" eventType = "detail-page-view"` /// Example 3: Deleting all events for a specific visitor /// `visitorId = visitor1024` /// The filtering fields are assumed to have an implicit AND. #[prost(string, tag = "2")] pub filter: ::prost::alloc::string::String, /// Optional. The default value is false. Override this flag to true to /// actually perform the purge. If the field is not set to true, a sampling of /// events to be deleted will be returned. #[prost(bool, tag = "3")] pub force: bool, } /// Metadata related to the progress of the PurgeUserEvents operation. /// This will be returned by the google.longrunning.Operation.metadata field. #[derive(Clone, PartialEq, ::prost::Message)] pub struct PurgeUserEventsMetadata { /// The ID of the request / operation. #[prost(string, tag = "1")] pub operation_name: ::prost::alloc::string::String, /// Operation create time. #[prost(message, optional, tag = "2")] pub create_time: ::core::option::Option<::prost_types::Timestamp>, } /// Response of the PurgeUserEventsRequest. If the long running operation is /// successfully done, then this message is returned by the /// google.longrunning.Operations.response field. #[derive(Clone, PartialEq, ::prost::Message)] pub struct PurgeUserEventsResponse { /// The total count of events purged as a result of the operation. #[prost(int64, tag = "1")] pub purged_events_count: i64, /// A sampling of events deleted (or will be deleted) depending on the `force` /// property in the request. Max of 500 items will be returned. #[prost(message, repeated, tag = "2")] pub user_events_sample: ::prost::alloc::vec::Vec<UserEvent>, } /// Request message for WriteUserEvent method. #[derive(Clone, PartialEq, ::prost::Message)] pub struct WriteUserEventRequest { /// Required. The parent eventStore resource name, such as /// `projects/1234/locations/global/catalogs/default_catalog/eventStores/default_event_store`. #[prost(string, tag = "1")] pub parent: ::prost::alloc::string::String, /// Required. User event to write. #[prost(message, optional, tag = "2")] pub user_event: ::core::option::Option<UserEvent>, } /// Request message for CollectUserEvent method. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CollectUserEventRequest { /// Required. The parent eventStore name, such as /// `projects/1234/locations/global/catalogs/default_catalog/eventStores/default_event_store`. #[prost(string, tag = "1")] pub parent: ::prost::alloc::string::String, /// Required. URL encoded UserEvent proto. #[prost(string, tag = "2")] pub user_event: ::prost::alloc::string::String, /// Optional. The url including cgi-parameters but excluding the hash fragment. /// The URL must be truncated to 1.5K bytes to conservatively be under the 2K /// bytes. This is often more useful than the referer url, because many /// browsers only send the domain for 3rd party requests. #[prost(string, tag = "3")] pub uri: ::prost::alloc::string::String, /// Optional. The event timestamp in milliseconds. This prevents browser /// caching of otherwise identical get requests. The name is abbreviated to /// reduce the payload bytes. #[prost(int64, tag = "4")] pub ets: i64, } /// Request message for ListUserEvents method. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListUserEventsRequest { /// Required. The parent eventStore resource name, such as /// `projects/*/locations/*/catalogs/default_catalog/eventStores/default_event_store`. #[prost(string, tag = "1")] pub parent: ::prost::alloc::string::String, /// Optional. Maximum number of results to return per page. If zero, the /// service will choose a reasonable default. #[prost(int32, tag = "2")] pub page_size: i32, /// Optional. The previous ListUserEventsResponse.next_page_token. #[prost(string, tag = "3")] pub page_token: ::prost::alloc::string::String, /// Optional. Filtering expression to specify restrictions over /// returned events. This is a sequence of terms, where each term applies some /// kind of a restriction to the returned user events. Use this expression to /// restrict results to a specific time range, or filter events by eventType. /// eg: eventTime > "2012-04-23T18:25:43.511Z" eventsMissingCatalogItems /// eventTime<"2012-04-23T18:25:43.511Z" eventType=search /// /// We expect only 3 types of fields: /// /// * eventTime: this can be specified a maximum of 2 times, once with a /// less than operator and once with a greater than operator. The /// eventTime restrict should result in one contiguous valid eventTime /// range. /// /// * eventType: only 1 eventType restriction can be specified. /// /// * eventsMissingCatalogItems: specififying this will restrict results /// to events for which catalog items were not found in the catalog. The /// default behavior is to return only those events for which catalog /// items were found. /// /// Some examples of valid filters expressions: /// /// * Example 1: eventTime > "2012-04-23T18:25:43.511Z" /// eventTime < "2012-04-23T18:30:43.511Z" /// * Example 2: eventTime > "2012-04-23T18:25:43.511Z" /// eventType = detail-page-view /// * Example 3: eventsMissingCatalogItems /// eventType = search eventTime < "2018-04-23T18:30:43.511Z" /// * Example 4: eventTime > "2012-04-23T18:25:43.511Z" /// * Example 5: eventType = search /// * Example 6: eventsMissingCatalogItems #[prost(string, tag = "4")] pub filter: ::prost::alloc::string::String, } /// Response message for ListUserEvents method. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListUserEventsResponse { /// The user events. #[prost(message, repeated, tag = "1")] pub user_events: ::prost::alloc::vec::Vec<UserEvent>, /// If empty, the list is complete. If nonempty, the token to pass to the next /// request's ListUserEvents.page_token. #[prost(string, tag = "2")] pub next_page_token: ::prost::alloc::string::String, } #[doc = r" Generated client implementations."] pub mod user_event_service_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; #[doc = " Service for ingesting end user actions on the customer website."] #[derive(Debug, Clone)] pub struct UserEventServiceClient<T> { inner: tonic::client::Grpc<T>, } impl<T> UserEventServiceClient<T> where T: tonic::client::GrpcService<tonic::body::BoxBody>, T::ResponseBody: Body + Send + Sync + 'static, T::Error: Into<StdError>, <T::ResponseBody as Body>::Error: Into<StdError> + Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); Self { inner } } pub fn with_interceptor<F>( inner: T, interceptor: F, ) -> UserEventServiceClient<InterceptedService<T, F>> where F: FnMut(tonic::Request<()>) -> Result<tonic::Request<()>, tonic::Status>, T: tonic::codegen::Service< http::Request<tonic::body::BoxBody>, Response = http::Response< <T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody, >, >, <T as tonic::codegen::Service<http::Request<tonic::body::BoxBody>>>::Error: Into<StdError> + Send + Sync, { UserEventServiceClient::new(InterceptedService::new(inner, interceptor)) } #[doc = r" Compress requests with `gzip`."] #[doc = r""] #[doc = r" This requires the server to support it otherwise it might respond with an"] #[doc = r" error."] pub fn send_gzip(mut self) -> Self { self.inner = self.inner.send_gzip(); self } #[doc = r" Enable decompressing responses with `gzip`."] pub fn accept_gzip(mut self) -> Self { self.inner = self.inner.accept_gzip(); self } #[doc = " Writes a single user event."] pub async fn write_user_event( &mut self, request: impl tonic::IntoRequest<super::WriteUserEventRequest>, ) -> Result<tonic::Response<super::UserEvent>, tonic::Status> { self.inner.ready().await.map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.recommendationengine.v1beta1.UserEventService/WriteUserEvent", ); self.inner.unary(request.into_request(), path, codec).await } #[doc = " Writes a single user event from the browser. This uses a GET request to"] #[doc = " due to browser restriction of POST-ing to a 3rd party domain."] #[doc = ""] #[doc = " This method is used only by the Recommendations AI JavaScript pixel."] #[doc = " Users should not call this method directly."] pub async fn collect_user_event( &mut self, request: impl tonic::IntoRequest<super::CollectUserEventRequest>, ) -> Result<tonic::Response<super::super::super::super::api::HttpBody>, tonic::Status> { self.inner.ready().await.map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.recommendationengine.v1beta1.UserEventService/CollectUserEvent", ); self.inner.unary(request.into_request(), path, codec).await } #[doc = " Gets a list of user events within a time range, with potential filtering."] pub async fn list_user_events( &mut self, request: impl tonic::IntoRequest<super::ListUserEventsRequest>, ) -> Result<tonic::Response<super::ListUserEventsResponse>, tonic::Status> { self.inner.ready().await.map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.recommendationengine.v1beta1.UserEventService/ListUserEvents", ); self.inner.unary(request.into_request(), path, codec).await } #[doc = " Deletes permanently all user events specified by the filter provided."] #[doc = " Depending on the number of events specified by the filter, this operation"] #[doc = " could take hours or days to complete. To test a filter, use the list"] #[doc = " command first."] pub async fn purge_user_events( &mut self, request: impl tonic::IntoRequest<super::PurgeUserEventsRequest>, ) -> Result< tonic::Response<super::super::super::super::longrunning::Operation>, tonic::Status, > { self.inner.ready().await.map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.recommendationengine.v1beta1.UserEventService/PurgeUserEvents", ); self.inner.unary(request.into_request(), path, codec).await } #[doc = " Bulk import of User events. Request processing might be"] #[doc = " synchronous. Events that already exist are skipped."] #[doc = " Use this method for backfilling historical user events."] #[doc = ""] #[doc = " Operation.response is of type ImportResponse. Note that it is"] #[doc = " possible for a subset of the items to be successfully inserted."] #[doc = " Operation.metadata is of type ImportMetadata."] pub async fn import_user_events( &mut self, request: impl tonic::IntoRequest<super::ImportUserEventsRequest>, ) -> Result< tonic::Response<super::super::super::super::longrunning::Operation>, tonic::Status, > { self.inner.ready().await.map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.recommendationengine.v1beta1.UserEventService/ImportUserEvents", ); self.inner.unary(request.into_request(), path, codec).await } } }
48.31315
174
0.636216
484593dcf4d785073feb7427d64f22c47b900714
95,086
//! [Flexible target specification.](https://github.com/rust-lang/rfcs/pull/131) //! //! Rust targets a wide variety of usecases, and in the interest of flexibility, //! allows new target triples to be defined in configuration files. Most users //! will not need to care about these, but this is invaluable when porting Rust //! to a new platform, and allows for an unprecedented level of control over how //! the compiler works. //! //! # Using custom targets //! //! A target triple, as passed via `rustc --target=TRIPLE`, will first be //! compared against the list of built-in targets. This is to ease distributing //! rustc (no need for configuration files) and also to hold these built-in //! targets as immutable and sacred. If `TRIPLE` is not one of the built-in //! targets, rustc will check if a file named `TRIPLE` exists. If it does, it //! will be loaded as the target configuration. If the file does not exist, //! rustc will search each directory in the environment variable //! `RUST_TARGET_PATH` for a file named `TRIPLE.json`. The first one found will //! be loaded. If no file is found in any of those directories, a fatal error //! will be given. //! //! Projects defining their own targets should use //! `--target=path/to/my-awesome-platform.json` instead of adding to //! `RUST_TARGET_PATH`. //! //! # Defining a new target //! //! Targets are defined using [JSON](https://json.org/). The `Target` struct in //! this module defines the format the JSON file should take, though each //! underscore in the field names should be replaced with a hyphen (`-`) in the //! JSON file. Some fields are required in every target specification, such as //! `llvm-target`, `target-endian`, `target-pointer-width`, `data-layout`, //! `arch`, and `os`. In general, options passed to rustc with `-C` override //! the target's settings, though `target-feature` and `link-args` will *add* //! to the list specified by the target, rather than replace. use crate::abi::Endian; use crate::spec::abi::{lookup as lookup_abi, Abi}; use crate::spec::crt_objects::{CrtObjects, CrtObjectsFallback}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_serialize::json::{Json, ToJson}; use rustc_span::symbol::{sym, Symbol}; use std::collections::BTreeMap; use std::convert::TryFrom; use std::ops::{Deref, DerefMut}; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::{fmt, io}; use rustc_macros::HashStable_Generic; pub mod abi; pub mod crt_objects; mod android_base; mod apple_base; mod apple_sdk_base; mod avr_gnu_base; mod bpf_base; mod dragonfly_base; mod freebsd_base; mod fuchsia_base; mod haiku_base; mod hermit_base; mod hermit_kernel_base; mod illumos_base; mod l4re_base; mod linux_base; mod linux_gnu_base; mod linux_kernel_base; mod linux_musl_base; mod linux_uclibc_base; mod msvc_base; mod netbsd_base; mod openbsd_base; mod redox_base; mod solaris_base; mod solid_base; mod thumb_base; mod uefi_msvc_base; mod vxworks_base; mod wasm_base; mod windows_gnu_base; mod windows_msvc_base; mod windows_uwp_gnu_base; mod windows_uwp_msvc_base; #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] pub enum LinkerFlavor { Em, Gcc, Ld, Msvc, Lld(LldFlavor), PtxLinker, BpfLinker, } #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] pub enum LldFlavor { Wasm, Ld64, Ld, Link, } impl LldFlavor { fn from_str(s: &str) -> Option<Self> { Some(match s { "darwin" => LldFlavor::Ld64, "gnu" => LldFlavor::Ld, "link" => LldFlavor::Link, "wasm" => LldFlavor::Wasm, _ => return None, }) } } impl ToJson for LldFlavor { fn to_json(&self) -> Json { match *self { LldFlavor::Ld64 => "darwin", LldFlavor::Ld => "gnu", LldFlavor::Link => "link", LldFlavor::Wasm => "wasm", } .to_json() } } impl ToJson for LinkerFlavor { fn to_json(&self) -> Json { self.desc().to_json() } } macro_rules! flavor_mappings { ($((($($flavor:tt)*), $string:expr),)*) => ( impl LinkerFlavor { pub const fn one_of() -> &'static str { concat!("one of: ", $($string, " ",)*) } pub fn from_str(s: &str) -> Option<Self> { Some(match s { $($string => $($flavor)*,)* _ => return None, }) } pub fn desc(&self) -> &str { match *self { $($($flavor)* => $string,)* } } } ) } flavor_mappings! { ((LinkerFlavor::Em), "em"), ((LinkerFlavor::Gcc), "gcc"), ((LinkerFlavor::Ld), "ld"), ((LinkerFlavor::Msvc), "msvc"), ((LinkerFlavor::PtxLinker), "ptx-linker"), ((LinkerFlavor::BpfLinker), "bpf-linker"), ((LinkerFlavor::Lld(LldFlavor::Wasm)), "wasm-ld"), ((LinkerFlavor::Lld(LldFlavor::Ld64)), "ld64.lld"), ((LinkerFlavor::Lld(LldFlavor::Ld)), "ld.lld"), ((LinkerFlavor::Lld(LldFlavor::Link)), "lld-link"), } #[derive(Clone, Copy, Debug, PartialEq, Hash, Encodable, Decodable, HashStable_Generic)] pub enum PanicStrategy { Unwind, Abort, } impl PanicStrategy { pub fn desc(&self) -> &str { match *self { PanicStrategy::Unwind => "unwind", PanicStrategy::Abort => "abort", } } pub fn desc_symbol(&self) -> Symbol { match *self { PanicStrategy::Unwind => sym::unwind, PanicStrategy::Abort => sym::abort, } } } impl ToJson for PanicStrategy { fn to_json(&self) -> Json { match *self { PanicStrategy::Abort => "abort".to_json(), PanicStrategy::Unwind => "unwind".to_json(), } } } #[derive(Clone, Copy, Debug, PartialEq, Hash, Encodable, Decodable)] pub enum RelroLevel { Full, Partial, Off, None, } impl RelroLevel { pub fn desc(&self) -> &str { match *self { RelroLevel::Full => "full", RelroLevel::Partial => "partial", RelroLevel::Off => "off", RelroLevel::None => "none", } } } impl FromStr for RelroLevel { type Err = (); fn from_str(s: &str) -> Result<RelroLevel, ()> { match s { "full" => Ok(RelroLevel::Full), "partial" => Ok(RelroLevel::Partial), "off" => Ok(RelroLevel::Off), "none" => Ok(RelroLevel::None), _ => Err(()), } } } impl ToJson for RelroLevel { fn to_json(&self) -> Json { match *self { RelroLevel::Full => "full".to_json(), RelroLevel::Partial => "partial".to_json(), RelroLevel::Off => "off".to_json(), RelroLevel::None => "None".to_json(), } } } #[derive(Clone, Copy, Debug, PartialEq, Hash, Encodable, Decodable)] pub enum MergeFunctions { Disabled, Trampolines, Aliases, } impl MergeFunctions { pub fn desc(&self) -> &str { match *self { MergeFunctions::Disabled => "disabled", MergeFunctions::Trampolines => "trampolines", MergeFunctions::Aliases => "aliases", } } } impl FromStr for MergeFunctions { type Err = (); fn from_str(s: &str) -> Result<MergeFunctions, ()> { match s { "disabled" => Ok(MergeFunctions::Disabled), "trampolines" => Ok(MergeFunctions::Trampolines), "aliases" => Ok(MergeFunctions::Aliases), _ => Err(()), } } } impl ToJson for MergeFunctions { fn to_json(&self) -> Json { match *self { MergeFunctions::Disabled => "disabled".to_json(), MergeFunctions::Trampolines => "trampolines".to_json(), MergeFunctions::Aliases => "aliases".to_json(), } } } #[derive(Clone, Copy, PartialEq, Hash, Debug)] pub enum RelocModel { Static, Pic, Pie, DynamicNoPic, Ropi, Rwpi, RopiRwpi, } impl FromStr for RelocModel { type Err = (); fn from_str(s: &str) -> Result<RelocModel, ()> { Ok(match s { "static" => RelocModel::Static, "pic" => RelocModel::Pic, "pie" => RelocModel::Pie, "dynamic-no-pic" => RelocModel::DynamicNoPic, "ropi" => RelocModel::Ropi, "rwpi" => RelocModel::Rwpi, "ropi-rwpi" => RelocModel::RopiRwpi, _ => return Err(()), }) } } impl ToJson for RelocModel { fn to_json(&self) -> Json { match *self { RelocModel::Static => "static", RelocModel::Pic => "pic", RelocModel::Pie => "pie", RelocModel::DynamicNoPic => "dynamic-no-pic", RelocModel::Ropi => "ropi", RelocModel::Rwpi => "rwpi", RelocModel::RopiRwpi => "ropi-rwpi", } .to_json() } } #[derive(Clone, Copy, PartialEq, Hash, Debug)] pub enum CodeModel { Tiny, Small, Kernel, Medium, Large, } impl FromStr for CodeModel { type Err = (); fn from_str(s: &str) -> Result<CodeModel, ()> { Ok(match s { "tiny" => CodeModel::Tiny, "small" => CodeModel::Small, "kernel" => CodeModel::Kernel, "medium" => CodeModel::Medium, "large" => CodeModel::Large, _ => return Err(()), }) } } impl ToJson for CodeModel { fn to_json(&self) -> Json { match *self { CodeModel::Tiny => "tiny", CodeModel::Small => "small", CodeModel::Kernel => "kernel", CodeModel::Medium => "medium", CodeModel::Large => "large", } .to_json() } } #[derive(Clone, Copy, PartialEq, Hash, Debug)] pub enum TlsModel { GeneralDynamic, LocalDynamic, InitialExec, LocalExec, } impl FromStr for TlsModel { type Err = (); fn from_str(s: &str) -> Result<TlsModel, ()> { Ok(match s { // Note the difference "general" vs "global" difference. The model name is "general", // but the user-facing option name is "global" for consistency with other compilers. "global-dynamic" => TlsModel::GeneralDynamic, "local-dynamic" => TlsModel::LocalDynamic, "initial-exec" => TlsModel::InitialExec, "local-exec" => TlsModel::LocalExec, _ => return Err(()), }) } } impl ToJson for TlsModel { fn to_json(&self) -> Json { match *self { TlsModel::GeneralDynamic => "global-dynamic", TlsModel::LocalDynamic => "local-dynamic", TlsModel::InitialExec => "initial-exec", TlsModel::LocalExec => "local-exec", } .to_json() } } /// Everything is flattened to a single enum to make the json encoding/decoding less annoying. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)] pub enum LinkOutputKind { /// Dynamically linked non position-independent executable. DynamicNoPicExe, /// Dynamically linked position-independent executable. DynamicPicExe, /// Statically linked non position-independent executable. StaticNoPicExe, /// Statically linked position-independent executable. StaticPicExe, /// Regular dynamic library ("dynamically linked"). DynamicDylib, /// Dynamic library with bundled libc ("statically linked"). StaticDylib, /// WASI module with a lifetime past the _initialize entry point WasiReactorExe, } impl LinkOutputKind { fn as_str(&self) -> &'static str { match self { LinkOutputKind::DynamicNoPicExe => "dynamic-nopic-exe", LinkOutputKind::DynamicPicExe => "dynamic-pic-exe", LinkOutputKind::StaticNoPicExe => "static-nopic-exe", LinkOutputKind::StaticPicExe => "static-pic-exe", LinkOutputKind::DynamicDylib => "dynamic-dylib", LinkOutputKind::StaticDylib => "static-dylib", LinkOutputKind::WasiReactorExe => "wasi-reactor-exe", } } pub(super) fn from_str(s: &str) -> Option<LinkOutputKind> { Some(match s { "dynamic-nopic-exe" => LinkOutputKind::DynamicNoPicExe, "dynamic-pic-exe" => LinkOutputKind::DynamicPicExe, "static-nopic-exe" => LinkOutputKind::StaticNoPicExe, "static-pic-exe" => LinkOutputKind::StaticPicExe, "dynamic-dylib" => LinkOutputKind::DynamicDylib, "static-dylib" => LinkOutputKind::StaticDylib, "wasi-reactor-exe" => LinkOutputKind::WasiReactorExe, _ => return None, }) } } impl fmt::Display for LinkOutputKind { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(self.as_str()) } } pub type LinkArgs = BTreeMap<LinkerFlavor, Vec<String>>; #[derive(Clone, Copy, Hash, Debug, PartialEq, Eq)] pub enum SplitDebuginfo { /// Split debug-information is disabled, meaning that on supported platforms /// you can find all debug information in the executable itself. This is /// only supported for ELF effectively. /// /// * Windows - not supported /// * macOS - don't run `dsymutil` /// * ELF - `.dwarf_*` sections Off, /// Split debug-information can be found in a "packed" location separate /// from the final artifact. This is supported on all platforms. /// /// * Windows - `*.pdb` /// * macOS - `*.dSYM` (run `dsymutil`) /// * ELF - `*.dwp` (run `rust-llvm-dwp`) Packed, /// Split debug-information can be found in individual object files on the /// filesystem. The main executable may point to the object files. /// /// * Windows - not supported /// * macOS - supported, scattered object files /// * ELF - supported, scattered `*.dwo` files Unpacked, } impl SplitDebuginfo { fn as_str(&self) -> &'static str { match self { SplitDebuginfo::Off => "off", SplitDebuginfo::Packed => "packed", SplitDebuginfo::Unpacked => "unpacked", } } } impl FromStr for SplitDebuginfo { type Err = (); fn from_str(s: &str) -> Result<SplitDebuginfo, ()> { Ok(match s { "off" => SplitDebuginfo::Off, "unpacked" => SplitDebuginfo::Unpacked, "packed" => SplitDebuginfo::Packed, _ => return Err(()), }) } } impl ToJson for SplitDebuginfo { fn to_json(&self) -> Json { self.as_str().to_json() } } impl fmt::Display for SplitDebuginfo { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(self.as_str()) } } #[derive(Clone, Debug, PartialEq, Eq)] pub enum StackProbeType { /// Don't emit any stack probes. None, /// It is harmless to use this option even on targets that do not have backend support for /// stack probes as the failure mode is the same as if no stack-probe option was specified in /// the first place. Inline, /// Call `__rust_probestack` whenever stack needs to be probed. Call, /// Use inline option for LLVM versions later than specified in `min_llvm_version_for_inline` /// and call `__rust_probestack` otherwise. InlineOrCall { min_llvm_version_for_inline: (u32, u32, u32) }, } impl StackProbeType { fn from_json(json: &Json) -> Result<Self, String> { let object = json.as_object().ok_or_else(|| "expected a JSON object")?; let kind = object .get("kind") .and_then(|o| o.as_string()) .ok_or_else(|| "expected `kind` to be a string")?; match kind { "none" => Ok(StackProbeType::None), "inline" => Ok(StackProbeType::Inline), "call" => Ok(StackProbeType::Call), "inline-or-call" => { let min_version = object .get("min-llvm-version-for-inline") .and_then(|o| o.as_array()) .ok_or_else(|| "expected `min-llvm-version-for-inline` to be an array")?; let mut iter = min_version.into_iter().map(|v| { let int = v.as_u64().ok_or_else( || "expected `min-llvm-version-for-inline` values to be integers", )?; u32::try_from(int) .map_err(|_| "`min-llvm-version-for-inline` values don't convert to u32") }); let min_llvm_version_for_inline = ( iter.next().unwrap_or(Ok(11))?, iter.next().unwrap_or(Ok(0))?, iter.next().unwrap_or(Ok(0))?, ); Ok(StackProbeType::InlineOrCall { min_llvm_version_for_inline }) } _ => Err(String::from( "`kind` expected to be one of `none`, `inline`, `call` or `inline-or-call`", )), } } } impl ToJson for StackProbeType { fn to_json(&self) -> Json { Json::Object(match self { StackProbeType::None => { vec![(String::from("kind"), "none".to_json())].into_iter().collect() } StackProbeType::Inline => { vec![(String::from("kind"), "inline".to_json())].into_iter().collect() } StackProbeType::Call => { vec![(String::from("kind"), "call".to_json())].into_iter().collect() } StackProbeType::InlineOrCall { min_llvm_version_for_inline } => vec![ (String::from("kind"), "inline-or-call".to_json()), ( String::from("min-llvm-version-for-inline"), min_llvm_version_for_inline.to_json(), ), ] .into_iter() .collect(), }) } } bitflags::bitflags! { #[derive(Default, Encodable, Decodable)] pub struct SanitizerSet: u8 { const ADDRESS = 1 << 0; const LEAK = 1 << 1; const MEMORY = 1 << 2; const THREAD = 1 << 3; const HWADDRESS = 1 << 4; const CFI = 1 << 5; } } impl SanitizerSet { /// Return sanitizer's name /// /// Returns none if the flags is a set of sanitizers numbering not exactly one. fn as_str(self) -> Option<&'static str> { Some(match self { SanitizerSet::ADDRESS => "address", SanitizerSet::CFI => "cfi", SanitizerSet::LEAK => "leak", SanitizerSet::MEMORY => "memory", SanitizerSet::THREAD => "thread", SanitizerSet::HWADDRESS => "hwaddress", _ => return None, }) } } /// Formats a sanitizer set as a comma separated list of sanitizers' names. impl fmt::Display for SanitizerSet { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut first = true; for s in *self { let name = s.as_str().unwrap_or_else(|| panic!("unrecognized sanitizer {:?}", s)); if !first { f.write_str(", ")?; } f.write_str(name)?; first = false; } Ok(()) } } impl IntoIterator for SanitizerSet { type Item = SanitizerSet; type IntoIter = std::vec::IntoIter<SanitizerSet>; fn into_iter(self) -> Self::IntoIter { [ SanitizerSet::ADDRESS, SanitizerSet::CFI, SanitizerSet::LEAK, SanitizerSet::MEMORY, SanitizerSet::THREAD, SanitizerSet::HWADDRESS, ] .iter() .copied() .filter(|&s| self.contains(s)) .collect::<Vec<_>>() .into_iter() } } impl<CTX> HashStable<CTX> for SanitizerSet { fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) { self.bits().hash_stable(ctx, hasher); } } impl ToJson for SanitizerSet { fn to_json(&self) -> Json { self.into_iter() .map(|v| Some(v.as_str()?.to_json())) .collect::<Option<Vec<_>>>() .unwrap_or_default() .to_json() } } #[derive(Clone, Copy, PartialEq, Hash, Debug)] pub enum FramePointer { /// Forces the machine code generator to always preserve the frame pointers. Always, /// Forces the machine code generator to preserve the frame pointers except for the leaf /// functions (i.e. those that don't call other functions). NonLeaf, /// Allows the machine code generator to omit the frame pointers. /// /// This option does not guarantee that the frame pointers will be omitted. MayOmit, } impl FromStr for FramePointer { type Err = (); fn from_str(s: &str) -> Result<Self, ()> { Ok(match s { "always" => Self::Always, "non-leaf" => Self::NonLeaf, "may-omit" => Self::MayOmit, _ => return Err(()), }) } } impl ToJson for FramePointer { fn to_json(&self) -> Json { match *self { Self::Always => "always", Self::NonLeaf => "non-leaf", Self::MayOmit => "may-omit", } .to_json() } } macro_rules! supported_targets { ( $(($( $triple:literal, )+ $module:ident ),)+ ) => { $(mod $module;)+ /// List of supported targets pub const TARGETS: &[&str] = &[$($($triple),+),+]; fn load_builtin(target: &str) -> Option<Target> { let mut t = match target { $( $($triple)|+ => $module::target(), )+ _ => return None, }; t.is_builtin = true; debug!("got builtin target: {:?}", t); Some(t) } #[cfg(test)] mod tests { mod tests_impl; // Cannot put this into a separate file without duplication, make an exception. $( #[test] // `#[test]` fn $module() { tests_impl::test_target(super::$module::target()); } )+ } }; } supported_targets! { ("x86_64-unknown-linux-gnu", x86_64_unknown_linux_gnu), ("x86_64-unknown-linux-gnux32", x86_64_unknown_linux_gnux32), ("i686-unknown-linux-gnu", i686_unknown_linux_gnu), ("i586-unknown-linux-gnu", i586_unknown_linux_gnu), ("m68k-unknown-linux-gnu", m68k_unknown_linux_gnu), ("mips-unknown-linux-gnu", mips_unknown_linux_gnu), ("mips64-unknown-linux-gnuabi64", mips64_unknown_linux_gnuabi64), ("mips64el-unknown-linux-gnuabi64", mips64el_unknown_linux_gnuabi64), ("mipsisa32r6-unknown-linux-gnu", mipsisa32r6_unknown_linux_gnu), ("mipsisa32r6el-unknown-linux-gnu", mipsisa32r6el_unknown_linux_gnu), ("mipsisa64r6-unknown-linux-gnuabi64", mipsisa64r6_unknown_linux_gnuabi64), ("mipsisa64r6el-unknown-linux-gnuabi64", mipsisa64r6el_unknown_linux_gnuabi64), ("mipsel-unknown-linux-gnu", mipsel_unknown_linux_gnu), ("powerpc-unknown-linux-gnu", powerpc_unknown_linux_gnu), ("powerpc-unknown-linux-gnuspe", powerpc_unknown_linux_gnuspe), ("powerpc-unknown-linux-musl", powerpc_unknown_linux_musl), ("powerpc64-unknown-linux-gnu", powerpc64_unknown_linux_gnu), ("powerpc64-unknown-linux-musl", powerpc64_unknown_linux_musl), ("powerpc64le-unknown-linux-gnu", powerpc64le_unknown_linux_gnu), ("powerpc64le-unknown-linux-musl", powerpc64le_unknown_linux_musl), ("s390x-unknown-linux-gnu", s390x_unknown_linux_gnu), ("s390x-unknown-linux-musl", s390x_unknown_linux_musl), ("sparc-unknown-linux-gnu", sparc_unknown_linux_gnu), ("sparc64-unknown-linux-gnu", sparc64_unknown_linux_gnu), ("arm-unknown-linux-gnueabi", arm_unknown_linux_gnueabi), ("arm-unknown-linux-gnueabihf", arm_unknown_linux_gnueabihf), ("arm-unknown-linux-musleabi", arm_unknown_linux_musleabi), ("arm-unknown-linux-musleabihf", arm_unknown_linux_musleabihf), ("armv4t-unknown-linux-gnueabi", armv4t_unknown_linux_gnueabi), ("armv5te-unknown-linux-gnueabi", armv5te_unknown_linux_gnueabi), ("armv5te-unknown-linux-musleabi", armv5te_unknown_linux_musleabi), ("armv5te-unknown-linux-uclibceabi", armv5te_unknown_linux_uclibceabi), ("armv7-unknown-linux-gnueabi", armv7_unknown_linux_gnueabi), ("armv7-unknown-linux-gnueabihf", armv7_unknown_linux_gnueabihf), ("thumbv7neon-unknown-linux-gnueabihf", thumbv7neon_unknown_linux_gnueabihf), ("thumbv7neon-unknown-linux-musleabihf", thumbv7neon_unknown_linux_musleabihf), ("armv7-unknown-linux-musleabi", armv7_unknown_linux_musleabi), ("armv7-unknown-linux-musleabihf", armv7_unknown_linux_musleabihf), ("aarch64-unknown-linux-gnu", aarch64_unknown_linux_gnu), ("aarch64-unknown-linux-musl", aarch64_unknown_linux_musl), ("x86_64-unknown-linux-musl", x86_64_unknown_linux_musl), ("i686-unknown-linux-musl", i686_unknown_linux_musl), ("i586-unknown-linux-musl", i586_unknown_linux_musl), ("mips-unknown-linux-musl", mips_unknown_linux_musl), ("mipsel-unknown-linux-musl", mipsel_unknown_linux_musl), ("mips64-unknown-linux-muslabi64", mips64_unknown_linux_muslabi64), ("mips64el-unknown-linux-muslabi64", mips64el_unknown_linux_muslabi64), ("hexagon-unknown-linux-musl", hexagon_unknown_linux_musl), ("mips-unknown-linux-uclibc", mips_unknown_linux_uclibc), ("mipsel-unknown-linux-uclibc", mipsel_unknown_linux_uclibc), ("i686-linux-android", i686_linux_android), ("x86_64-linux-android", x86_64_linux_android), ("arm-linux-androideabi", arm_linux_androideabi), ("armv7-linux-androideabi", armv7_linux_androideabi), ("thumbv7neon-linux-androideabi", thumbv7neon_linux_androideabi), ("aarch64-linux-android", aarch64_linux_android), ("x86_64-unknown-none-linuxkernel", x86_64_unknown_none_linuxkernel), ("aarch64-unknown-freebsd", aarch64_unknown_freebsd), ("armv6-unknown-freebsd", armv6_unknown_freebsd), ("armv7-unknown-freebsd", armv7_unknown_freebsd), ("i686-unknown-freebsd", i686_unknown_freebsd), ("powerpc-unknown-freebsd", powerpc_unknown_freebsd), ("powerpc64-unknown-freebsd", powerpc64_unknown_freebsd), ("powerpc64le-unknown-freebsd", powerpc64le_unknown_freebsd), ("x86_64-unknown-freebsd", x86_64_unknown_freebsd), ("x86_64-unknown-dragonfly", x86_64_unknown_dragonfly), ("aarch64-unknown-openbsd", aarch64_unknown_openbsd), ("i686-unknown-openbsd", i686_unknown_openbsd), ("sparc64-unknown-openbsd", sparc64_unknown_openbsd), ("x86_64-unknown-openbsd", x86_64_unknown_openbsd), ("powerpc-unknown-openbsd", powerpc_unknown_openbsd), ("aarch64-unknown-netbsd", aarch64_unknown_netbsd), ("armv6-unknown-netbsd-eabihf", armv6_unknown_netbsd_eabihf), ("armv7-unknown-netbsd-eabihf", armv7_unknown_netbsd_eabihf), ("i686-unknown-netbsd", i686_unknown_netbsd), ("powerpc-unknown-netbsd", powerpc_unknown_netbsd), ("sparc64-unknown-netbsd", sparc64_unknown_netbsd), ("x86_64-unknown-netbsd", x86_64_unknown_netbsd), ("i686-unknown-haiku", i686_unknown_haiku), ("x86_64-unknown-haiku", x86_64_unknown_haiku), ("aarch64-apple-darwin", aarch64_apple_darwin), ("x86_64-apple-darwin", x86_64_apple_darwin), ("i686-apple-darwin", i686_apple_darwin), ("aarch64-fuchsia", aarch64_fuchsia), ("x86_64-fuchsia", x86_64_fuchsia), ("avr-unknown-gnu-atmega328", avr_unknown_gnu_atmega328), ("x86_64-unknown-l4re-uclibc", x86_64_unknown_l4re_uclibc), ("aarch64-unknown-redox", aarch64_unknown_redox), ("x86_64-unknown-redox", x86_64_unknown_redox), ("i386-apple-ios", i386_apple_ios), ("x86_64-apple-ios", x86_64_apple_ios), ("aarch64-apple-ios", aarch64_apple_ios), ("armv7-apple-ios", armv7_apple_ios), ("armv7s-apple-ios", armv7s_apple_ios), ("x86_64-apple-ios-macabi", x86_64_apple_ios_macabi), ("aarch64-apple-ios-macabi", aarch64_apple_ios_macabi), ("aarch64-apple-ios-sim", aarch64_apple_ios_sim), ("aarch64-apple-tvos", aarch64_apple_tvos), ("x86_64-apple-tvos", x86_64_apple_tvos), ("armebv7r-none-eabi", armebv7r_none_eabi), ("armebv7r-none-eabihf", armebv7r_none_eabihf), ("armv7r-none-eabi", armv7r_none_eabi), ("armv7r-none-eabihf", armv7r_none_eabihf), ("x86_64-pc-solaris", x86_64_pc_solaris), ("x86_64-sun-solaris", x86_64_sun_solaris), ("sparcv9-sun-solaris", sparcv9_sun_solaris), ("x86_64-unknown-illumos", x86_64_unknown_illumos), ("x86_64-pc-windows-gnu", x86_64_pc_windows_gnu), ("i686-pc-windows-gnu", i686_pc_windows_gnu), ("i686-uwp-windows-gnu", i686_uwp_windows_gnu), ("x86_64-uwp-windows-gnu", x86_64_uwp_windows_gnu), ("aarch64-pc-windows-msvc", aarch64_pc_windows_msvc), ("aarch64-uwp-windows-msvc", aarch64_uwp_windows_msvc), ("x86_64-pc-windows-msvc", x86_64_pc_windows_msvc), ("x86_64-uwp-windows-msvc", x86_64_uwp_windows_msvc), ("i686-pc-windows-msvc", i686_pc_windows_msvc), ("i686-uwp-windows-msvc", i686_uwp_windows_msvc), ("i586-pc-windows-msvc", i586_pc_windows_msvc), ("thumbv7a-pc-windows-msvc", thumbv7a_pc_windows_msvc), ("thumbv7a-uwp-windows-msvc", thumbv7a_uwp_windows_msvc), ("asmjs-unknown-emscripten", asmjs_unknown_emscripten), ("wasm32-unknown-emscripten", wasm32_unknown_emscripten), ("wasm32-unknown-unknown", wasm32_unknown_unknown), ("wasm32-wasi", wasm32_wasi), ("wasm64-unknown-unknown", wasm64_unknown_unknown), ("thumbv6m-none-eabi", thumbv6m_none_eabi), ("thumbv7m-none-eabi", thumbv7m_none_eabi), ("thumbv7em-none-eabi", thumbv7em_none_eabi), ("thumbv7em-none-eabihf", thumbv7em_none_eabihf), ("thumbv8m.base-none-eabi", thumbv8m_base_none_eabi), ("thumbv8m.main-none-eabi", thumbv8m_main_none_eabi), ("thumbv8m.main-none-eabihf", thumbv8m_main_none_eabihf), ("armv7a-none-eabi", armv7a_none_eabi), ("armv7a-none-eabihf", armv7a_none_eabihf), ("msp430-none-elf", msp430_none_elf), ("aarch64-unknown-hermit", aarch64_unknown_hermit), ("x86_64-unknown-hermit", x86_64_unknown_hermit), ("x86_64-unknown-none-hermitkernel", x86_64_unknown_none_hermitkernel), ("riscv32i-unknown-none-elf", riscv32i_unknown_none_elf), ("riscv32imc-unknown-none-elf", riscv32imc_unknown_none_elf), ("riscv32imc-esp-espidf", riscv32imc_esp_espidf), ("riscv32imac-unknown-none-elf", riscv32imac_unknown_none_elf), ("riscv32gc-unknown-linux-gnu", riscv32gc_unknown_linux_gnu), ("riscv32gc-unknown-linux-musl", riscv32gc_unknown_linux_musl), ("riscv64imac-unknown-none-elf", riscv64imac_unknown_none_elf), ("riscv64gc-unknown-none-elf", riscv64gc_unknown_none_elf), ("riscv64gc-unknown-linux-gnu", riscv64gc_unknown_linux_gnu), ("riscv64gc-unknown-linux-musl", riscv64gc_unknown_linux_musl), ("aarch64-unknown-none", aarch64_unknown_none), ("aarch64-unknown-none-softfloat", aarch64_unknown_none_softfloat), ("x86_64-fortanix-unknown-sgx", x86_64_fortanix_unknown_sgx), ("x86_64-unknown-uefi", x86_64_unknown_uefi), ("i686-unknown-uefi", i686_unknown_uefi), ("aarch64-unknown-uefi", aarch64_unknown_uefi), ("nvptx64-nvidia-cuda", nvptx64_nvidia_cuda), ("i686-wrs-vxworks", i686_wrs_vxworks), ("x86_64-wrs-vxworks", x86_64_wrs_vxworks), ("armv7-wrs-vxworks-eabihf", armv7_wrs_vxworks_eabihf), ("aarch64-wrs-vxworks", aarch64_wrs_vxworks), ("powerpc-wrs-vxworks", powerpc_wrs_vxworks), ("powerpc-wrs-vxworks-spe", powerpc_wrs_vxworks_spe), ("powerpc64-wrs-vxworks", powerpc64_wrs_vxworks), ("aarch64-kmc-solid_asp3", aarch64_kmc_solid_asp3), ("armv7a-kmc-solid_asp3-eabi", armv7a_kmc_solid_asp3_eabi), ("armv7a-kmc-solid_asp3-eabihf", armv7a_kmc_solid_asp3_eabihf), ("mipsel-sony-psp", mipsel_sony_psp), ("mipsel-unknown-none", mipsel_unknown_none), ("thumbv4t-none-eabi", thumbv4t_none_eabi), ("aarch64_be-unknown-linux-gnu", aarch64_be_unknown_linux_gnu), ("aarch64-unknown-linux-gnu_ilp32", aarch64_unknown_linux_gnu_ilp32), ("aarch64_be-unknown-linux-gnu_ilp32", aarch64_be_unknown_linux_gnu_ilp32), ("bpfeb-unknown-none", bpfeb_unknown_none), ("bpfel-unknown-none", bpfel_unknown_none), ("armv6k-nintendo-3ds", armv6k_nintendo_3ds), ("armv7-unknown-linux-uclibceabihf", armv7_unknown_linux_uclibceabihf), } /// Warnings encountered when parsing the target `json`. /// /// Includes fields that weren't recognized and fields that don't have the expected type. #[derive(Debug, PartialEq)] pub struct TargetWarnings { unused_fields: Vec<String>, incorrect_type: Vec<String>, } impl TargetWarnings { pub fn empty() -> Self { Self { unused_fields: Vec::new(), incorrect_type: Vec::new() } } pub fn warning_messages(&self) -> Vec<String> { let mut warnings = vec![]; if !self.unused_fields.is_empty() { warnings.push(format!( "target json file contains unused fields: {}", self.unused_fields.join(", ") )); } if !self.incorrect_type.is_empty() { warnings.push(format!( "target json file contains fields whose value doesn't have the correct json type: {}", self.incorrect_type.join(", ") )); } warnings } } /// Everything `rustc` knows about how to compile for a specific target. /// /// Every field here must be specified, and has no default value. #[derive(PartialEq, Clone, Debug)] pub struct Target { /// Target triple to pass to LLVM. pub llvm_target: String, /// Number of bits in a pointer. Influences the `target_pointer_width` `cfg` variable. pub pointer_width: u32, /// Architecture to use for ABI considerations. Valid options include: "x86", /// "x86_64", "arm", "aarch64", "mips", "powerpc", "powerpc64", and others. pub arch: String, /// [Data layout](https://llvm.org/docs/LangRef.html#data-layout) to pass to LLVM. pub data_layout: String, /// Optional settings with defaults. pub options: TargetOptions, } pub trait HasTargetSpec { fn target_spec(&self) -> &Target; } impl HasTargetSpec for Target { #[inline] fn target_spec(&self) -> &Target { self } } /// Optional aspects of a target specification. /// /// This has an implementation of `Default`, see each field for what the default is. In general, /// these try to take "minimal defaults" that don't assume anything about the runtime they run in. /// /// `TargetOptions` as a separate structure is mostly an implementation detail of `Target` /// construction, all its fields logically belong to `Target` and available from `Target` /// through `Deref` impls. #[derive(PartialEq, Clone, Debug)] pub struct TargetOptions { /// Whether the target is built-in or loaded from a custom target specification. pub is_builtin: bool, /// Used as the `target_endian` `cfg` variable. Defaults to little endian. pub endian: Endian, /// Width of c_int type. Defaults to "32". pub c_int_width: String, /// OS name to use for conditional compilation (`target_os`). Defaults to "none". /// "none" implies a bare metal target without `std` library. /// A couple of targets having `std` also use "unknown" as an `os` value, /// but they are exceptions. pub os: String, /// Environment name to use for conditional compilation (`target_env`). Defaults to "". pub env: String, /// ABI name to distinguish multiple ABIs on the same OS and architecture. For instance, `"eabi"` /// or `"eabihf"`. Defaults to "". pub abi: String, /// Vendor name to use for conditional compilation (`target_vendor`). Defaults to "unknown". pub vendor: String, /// Default linker flavor used if `-C linker-flavor` or `-C linker` are not passed /// on the command line. Defaults to `LinkerFlavor::Gcc`. pub linker_flavor: LinkerFlavor, /// Linker to invoke pub linker: Option<String>, /// LLD flavor used if `lld` (or `rust-lld`) is specified as a linker /// without clarifying its flavor in any way. pub lld_flavor: LldFlavor, /// Linker arguments that are passed *before* any user-defined libraries. pub pre_link_args: LinkArgs, /// Objects to link before and after all other object code. pub pre_link_objects: CrtObjects, pub post_link_objects: CrtObjects, /// Same as `(pre|post)_link_objects`, but when we fail to pull the objects with help of the /// target's native gcc and fall back to the "self-contained" mode and pull them manually. /// See `crt_objects.rs` for some more detailed documentation. pub pre_link_objects_fallback: CrtObjects, pub post_link_objects_fallback: CrtObjects, /// Which logic to use to determine whether to fall back to the "self-contained" mode or not. pub crt_objects_fallback: Option<CrtObjectsFallback>, /// Linker arguments that are unconditionally passed after any /// user-defined but before post-link objects. Standard platform /// libraries that should be always be linked to, usually go here. pub late_link_args: LinkArgs, /// Linker arguments used in addition to `late_link_args` if at least one /// Rust dependency is dynamically linked. pub late_link_args_dynamic: LinkArgs, /// Linker arguments used in addition to `late_link_args` if aall Rust /// dependencies are statically linked. pub late_link_args_static: LinkArgs, /// Linker arguments that are unconditionally passed *after* any /// user-defined libraries. pub post_link_args: LinkArgs, /// Optional link script applied to `dylib` and `executable` crate types. /// This is a string containing the script, not a path. Can only be applied /// to linkers where `linker_is_gnu` is true. pub link_script: Option<String>, /// Environment variables to be set for the linker invocation. pub link_env: Vec<(String, String)>, /// Environment variables to be removed for the linker invocation. pub link_env_remove: Vec<String>, /// Extra arguments to pass to the external assembler (when used) pub asm_args: Vec<String>, /// Default CPU to pass to LLVM. Corresponds to `llc -mcpu=$cpu`. Defaults /// to "generic". pub cpu: String, /// Default target features to pass to LLVM. These features will *always* be /// passed, and cannot be disabled even via `-C`. Corresponds to `llc /// -mattr=$features`. pub features: String, /// Whether dynamic linking is available on this target. Defaults to false. pub dynamic_linking: bool, /// If dynamic linking is available, whether only cdylibs are supported. pub only_cdylib: bool, /// Whether executables are available on this target. iOS, for example, only allows static /// libraries. Defaults to false. pub executables: bool, /// Relocation model to use in object file. Corresponds to `llc /// -relocation-model=$relocation_model`. Defaults to `Pic`. pub relocation_model: RelocModel, /// Code model to use. Corresponds to `llc -code-model=$code_model`. /// Defaults to `None` which means "inherited from the base LLVM target". pub code_model: Option<CodeModel>, /// TLS model to use. Options are "global-dynamic" (default), "local-dynamic", "initial-exec" /// and "local-exec". This is similar to the -ftls-model option in GCC/Clang. pub tls_model: TlsModel, /// Do not emit code that uses the "red zone", if the ABI has one. Defaults to false. pub disable_redzone: bool, /// Frame pointer mode for this target. Defaults to `MayOmit`. pub frame_pointer: FramePointer, /// Emit each function in its own section. Defaults to true. pub function_sections: bool, /// String to prepend to the name of every dynamic library. Defaults to "lib". pub dll_prefix: String, /// String to append to the name of every dynamic library. Defaults to ".so". pub dll_suffix: String, /// String to append to the name of every executable. pub exe_suffix: String, /// String to prepend to the name of every static library. Defaults to "lib". pub staticlib_prefix: String, /// String to append to the name of every static library. Defaults to ".a". pub staticlib_suffix: String, /// Values of the `target_family` cfg set for this target. /// /// Common options are: "unix", "windows". Defaults to no families. /// /// See <https://doc.rust-lang.org/reference/conditional-compilation.html#target_family>. pub families: Vec<String>, /// Whether the target toolchain's ABI supports returning small structs as an integer. pub abi_return_struct_as_int: bool, /// Whether the target toolchain is like macOS's. Only useful for compiling against iOS/macOS, /// in particular running dsymutil and some other stuff like `-dead_strip`. Defaults to false. pub is_like_osx: bool, /// Whether the target toolchain is like Solaris's. /// Only useful for compiling against Illumos/Solaris, /// as they have a different set of linker flags. Defaults to false. pub is_like_solaris: bool, /// Whether the target is like Windows. /// This is a combination of several more specific properties represented as a single flag: /// - The target uses a Windows ABI, /// - uses PE/COFF as a format for object code, /// - uses Windows-style dllexport/dllimport for shared libraries, /// - uses import libraries and .def files for symbol exports, /// - executables support setting a subsystem. pub is_like_windows: bool, /// Whether the target is like MSVC. /// This is a combination of several more specific properties represented as a single flag: /// - The target has all the properties from `is_like_windows` /// (for in-tree targets "is_like_msvc ⇒ is_like_windows" is ensured by a unit test), /// - has some MSVC-specific Windows ABI properties, /// - uses a link.exe-like linker, /// - uses CodeView/PDB for debuginfo and natvis for its visualization, /// - uses SEH-based unwinding, /// - supports control flow guard mechanism. pub is_like_msvc: bool, /// Whether the target toolchain is like Emscripten's. Only useful for compiling with /// Emscripten toolchain. /// Defaults to false. pub is_like_emscripten: bool, /// Whether the target toolchain is like Fuchsia's. pub is_like_fuchsia: bool, /// Whether a target toolchain is like WASM. pub is_like_wasm: bool, /// Version of DWARF to use if not using the default. /// Useful because some platforms (osx, bsd) only want up to DWARF2. pub dwarf_version: Option<u32>, /// Whether the linker support GNU-like arguments such as -O. Defaults to true. pub linker_is_gnu: bool, /// The MinGW toolchain has a known issue that prevents it from correctly /// handling COFF object files with more than 2<sup>15</sup> sections. Since each weak /// symbol needs its own COMDAT section, weak linkage implies a large /// number sections that easily exceeds the given limit for larger /// codebases. Consequently we want a way to disallow weak linkage on some /// platforms. pub allows_weak_linkage: bool, /// Whether the linker support rpaths or not. Defaults to false. pub has_rpath: bool, /// Whether to disable linking to the default libraries, typically corresponds /// to `-nodefaultlibs`. Defaults to true. pub no_default_libraries: bool, /// Dynamically linked executables can be compiled as position independent /// if the default relocation model of position independent code is not /// changed. This is a requirement to take advantage of ASLR, as otherwise /// the functions in the executable are not randomized and can be used /// during an exploit of a vulnerability in any code. pub position_independent_executables: bool, /// Executables that are both statically linked and position-independent are supported. pub static_position_independent_executables: bool, /// Determines if the target always requires using the PLT for indirect /// library calls or not. This controls the default value of the `-Z plt` flag. pub needs_plt: bool, /// Either partial, full, or off. Full RELRO makes the dynamic linker /// resolve all symbols at startup and marks the GOT read-only before /// starting the program, preventing overwriting the GOT. pub relro_level: RelroLevel, /// Format that archives should be emitted in. This affects whether we use /// LLVM to assemble an archive or fall back to the system linker, and /// currently only "gnu" is used to fall into LLVM. Unknown strings cause /// the system linker to be used. pub archive_format: String, /// Is asm!() allowed? Defaults to true. pub allow_asm: bool, /// Whether the runtime startup code requires the `main` function be passed /// `argc` and `argv` values. pub main_needs_argc_argv: bool, /// Flag indicating whether ELF TLS (e.g., #[thread_local]) is available for /// this target. pub has_elf_tls: bool, // This is mainly for easy compatibility with emscripten. // If we give emcc .o files that are actually .bc files it // will 'just work'. pub obj_is_bitcode: bool, /// Whether the target requires that emitted object code includes bitcode. pub forces_embed_bitcode: bool, /// Content of the LLVM cmdline section associated with embedded bitcode. pub bitcode_llvm_cmdline: String, /// Don't use this field; instead use the `.min_atomic_width()` method. pub min_atomic_width: Option<u64>, /// Don't use this field; instead use the `.max_atomic_width()` method. pub max_atomic_width: Option<u64>, /// Whether the target supports atomic CAS operations natively pub atomic_cas: bool, /// Panic strategy: "unwind" or "abort" pub panic_strategy: PanicStrategy, /// Whether or not linking dylibs to a static CRT is allowed. pub crt_static_allows_dylibs: bool, /// Whether or not the CRT is statically linked by default. pub crt_static_default: bool, /// Whether or not crt-static is respected by the compiler (or is a no-op). pub crt_static_respected: bool, /// The implementation of stack probes to use. pub stack_probes: StackProbeType, /// The minimum alignment for global symbols. pub min_global_align: Option<u64>, /// Default number of codegen units to use in debug mode pub default_codegen_units: Option<u64>, /// Whether to generate trap instructions in places where optimization would /// otherwise produce control flow that falls through into unrelated memory. pub trap_unreachable: bool, /// This target requires everything to be compiled with LTO to emit a final /// executable, aka there is no native linker for this target. pub requires_lto: bool, /// This target has no support for threads. pub singlethread: bool, /// Whether library functions call lowering/optimization is disabled in LLVM /// for this target unconditionally. pub no_builtins: bool, /// The default visibility for symbols in this target should be "hidden" /// rather than "default" pub default_hidden_visibility: bool, /// Whether a .debug_gdb_scripts section will be added to the output object file pub emit_debug_gdb_scripts: bool, /// Whether or not to unconditionally `uwtable` attributes on functions, /// typically because the platform needs to unwind for things like stack /// unwinders. pub requires_uwtable: bool, /// Whether or not to emit `uwtable` attributes on functions if `-C force-unwind-tables` /// is not specified and `uwtable` is not required on this target. pub default_uwtable: bool, /// Whether or not SIMD types are passed by reference in the Rust ABI, /// typically required if a target can be compiled with a mixed set of /// target features. This is `true` by default, and `false` for targets like /// wasm32 where the whole program either has simd or not. pub simd_types_indirect: bool, /// Pass a list of symbol which should be exported in the dylib to the linker. pub limit_rdylib_exports: bool, /// If set, have the linker export exactly these symbols, instead of using /// the usual logic to figure this out from the crate itself. pub override_export_symbols: Option<Vec<String>>, /// Determines how or whether the MergeFunctions LLVM pass should run for /// this target. Either "disabled", "trampolines", or "aliases". /// The MergeFunctions pass is generally useful, but some targets may need /// to opt out. The default is "aliases". /// /// Workaround for: <https://github.com/rust-lang/rust/issues/57356> pub merge_functions: MergeFunctions, /// Use platform dependent mcount function pub mcount: String, /// LLVM ABI name, corresponds to the '-mabi' parameter available in multilib C compilers pub llvm_abiname: String, /// Whether or not RelaxElfRelocation flag will be passed to the linker pub relax_elf_relocations: bool, /// Additional arguments to pass to LLVM, similar to the `-C llvm-args` codegen option. pub llvm_args: Vec<String>, /// Whether to use legacy .ctors initialization hooks rather than .init_array. Defaults /// to false (uses .init_array). pub use_ctors_section: bool, /// Whether the linker is instructed to add a `GNU_EH_FRAME` ELF header /// used to locate unwinding information is passed /// (only has effect if the linker is `ld`-like). pub eh_frame_header: bool, /// Is true if the target is an ARM architecture using thumb v1 which allows for /// thumb and arm interworking. pub has_thumb_interworking: bool, /// How to handle split debug information, if at all. Specifying `None` has /// target-specific meaning. pub split_debuginfo: SplitDebuginfo, /// The sanitizers supported by this target /// /// Note that the support here is at a codegen level. If the machine code with sanitizer /// enabled can generated on this target, but the necessary supporting libraries are not /// distributed with the target, the sanitizer should still appear in this list for the target. pub supported_sanitizers: SanitizerSet, /// If present it's a default value to use for adjusting the C ABI. pub default_adjusted_cabi: Option<Abi>, /// Minimum number of bits in #[repr(C)] enum. Defaults to 32. pub c_enum_min_bits: u64, } impl Default for TargetOptions { /// Creates a set of "sane defaults" for any target. This is still /// incomplete, and if used for compilation, will certainly not work. fn default() -> TargetOptions { TargetOptions { is_builtin: false, endian: Endian::Little, c_int_width: "32".to_string(), os: "none".to_string(), env: String::new(), abi: String::new(), vendor: "unknown".to_string(), linker_flavor: LinkerFlavor::Gcc, linker: option_env!("CFG_DEFAULT_LINKER").map(|s| s.to_string()), lld_flavor: LldFlavor::Ld, pre_link_args: LinkArgs::new(), post_link_args: LinkArgs::new(), link_script: None, asm_args: Vec::new(), cpu: "generic".to_string(), features: String::new(), dynamic_linking: false, only_cdylib: false, executables: false, relocation_model: RelocModel::Pic, code_model: None, tls_model: TlsModel::GeneralDynamic, disable_redzone: false, frame_pointer: FramePointer::MayOmit, function_sections: true, dll_prefix: "lib".to_string(), dll_suffix: ".so".to_string(), exe_suffix: String::new(), staticlib_prefix: "lib".to_string(), staticlib_suffix: ".a".to_string(), families: Vec::new(), abi_return_struct_as_int: false, is_like_osx: false, is_like_solaris: false, is_like_windows: false, is_like_emscripten: false, is_like_msvc: false, is_like_fuchsia: false, is_like_wasm: false, dwarf_version: None, linker_is_gnu: true, allows_weak_linkage: true, has_rpath: false, no_default_libraries: true, position_independent_executables: false, static_position_independent_executables: false, needs_plt: false, relro_level: RelroLevel::None, pre_link_objects: Default::default(), post_link_objects: Default::default(), pre_link_objects_fallback: Default::default(), post_link_objects_fallback: Default::default(), crt_objects_fallback: None, late_link_args: LinkArgs::new(), late_link_args_dynamic: LinkArgs::new(), late_link_args_static: LinkArgs::new(), link_env: Vec::new(), link_env_remove: Vec::new(), archive_format: "gnu".to_string(), main_needs_argc_argv: true, allow_asm: true, has_elf_tls: false, obj_is_bitcode: false, forces_embed_bitcode: false, bitcode_llvm_cmdline: String::new(), min_atomic_width: None, max_atomic_width: None, atomic_cas: true, panic_strategy: PanicStrategy::Unwind, crt_static_allows_dylibs: false, crt_static_default: false, crt_static_respected: false, stack_probes: StackProbeType::None, min_global_align: None, default_codegen_units: None, trap_unreachable: true, requires_lto: false, singlethread: false, no_builtins: false, default_hidden_visibility: false, emit_debug_gdb_scripts: true, requires_uwtable: false, default_uwtable: false, simd_types_indirect: true, limit_rdylib_exports: true, override_export_symbols: None, merge_functions: MergeFunctions::Aliases, mcount: "mcount".to_string(), llvm_abiname: "".to_string(), relax_elf_relocations: false, llvm_args: vec![], use_ctors_section: false, eh_frame_header: true, has_thumb_interworking: false, split_debuginfo: SplitDebuginfo::Off, supported_sanitizers: SanitizerSet::empty(), default_adjusted_cabi: None, c_enum_min_bits: 32, } } } /// `TargetOptions` being a separate type is basically an implementation detail of `Target` that is /// used for providing defaults. Perhaps there's a way to merge `TargetOptions` into `Target` so /// this `Deref` implementation is no longer necessary. impl Deref for Target { type Target = TargetOptions; fn deref(&self) -> &Self::Target { &self.options } } impl DerefMut for Target { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.options } } impl Target { /// Given a function ABI, turn it into the correct ABI for this target. pub fn adjust_abi(&self, abi: Abi) -> Abi { match abi { Abi::C { .. } => self.default_adjusted_cabi.unwrap_or(abi), Abi::System { unwind } if self.is_like_windows && self.arch == "x86" => { Abi::Stdcall { unwind } } Abi::System { unwind } => Abi::C { unwind }, Abi::EfiApi if self.arch == "x86_64" => Abi::Win64, Abi::EfiApi => Abi::C { unwind: false }, // See commentary in `is_abi_supported`. Abi::Stdcall { .. } | Abi::Thiscall { .. } if self.arch == "x86" => abi, Abi::Stdcall { unwind } | Abi::Thiscall { unwind } => Abi::C { unwind }, Abi::Fastcall if self.arch == "x86" => abi, Abi::Vectorcall if ["x86", "x86_64"].contains(&&self.arch[..]) => abi, Abi::Fastcall | Abi::Vectorcall => Abi::C { unwind: false }, abi => abi, } } /// Returns a None if the UNSUPPORTED_CALLING_CONVENTIONS lint should be emitted pub fn is_abi_supported(&self, abi: Abi) -> Option<bool> { use Abi::*; Some(match abi { Rust | C { .. } | System { .. } | RustIntrinsic | RustCall | PlatformIntrinsic | Unadjusted | Cdecl | EfiApi => true, X86Interrupt => ["x86", "x86_64"].contains(&&self.arch[..]), Aapcs => "arm" == self.arch, CCmseNonSecureCall => ["arm", "aarch64"].contains(&&self.arch[..]), Win64 | SysV64 => self.arch == "x86_64", PtxKernel => self.arch == "nvptx64", Msp430Interrupt => self.arch == "msp430", AmdGpuKernel => self.arch == "amdgcn", AvrInterrupt | AvrNonBlockingInterrupt => self.arch == "avr", Wasm => ["wasm32", "wasm64"].contains(&&self.arch[..]), Thiscall { .. } => self.arch == "x86", // On windows these fall-back to platform native calling convention (C) when the // architecture is not supported. // // This is I believe a historical accident that has occurred as part of Microsoft // striving to allow most of the code to "just" compile when support for 64-bit x86 // was added and then later again, when support for ARM architectures was added. // // This is well documented across MSDN. Support for this in Rust has been added in // #54576. This makes much more sense in context of Microsoft's C++ than it does in // Rust, but there isn't much leeway remaining here to change it back at the time this // comment has been written. // // Following are the relevant excerpts from the MSDN documentation. // // > The __vectorcall calling convention is only supported in native code on x86 and // x64 processors that include Streaming SIMD Extensions 2 (SSE2) and above. // > ... // > On ARM machines, __vectorcall is accepted and ignored by the compiler. // // -- https://docs.microsoft.com/en-us/cpp/cpp/vectorcall?view=msvc-160 // // > On ARM and x64 processors, __stdcall is accepted and ignored by the compiler; // // -- https://docs.microsoft.com/en-us/cpp/cpp/stdcall?view=msvc-160 // // > In most cases, keywords or compiler switches that specify an unsupported // > convention on a particular platform are ignored, and the platform default // > convention is used. // // -- https://docs.microsoft.com/en-us/cpp/cpp/argument-passing-and-naming-conventions Stdcall { .. } | Fastcall | Vectorcall if self.is_like_windows => true, // Outside of Windows we want to only support these calling conventions for the // architectures for which these calling conventions are actually well defined. Stdcall { .. } | Fastcall if self.arch == "x86" => true, Vectorcall if ["x86", "x86_64"].contains(&&self.arch[..]) => true, // Return a `None` for other cases so that we know to emit a future compat lint. Stdcall { .. } | Fastcall | Vectorcall => return None, }) } /// Minimum integer size in bits that this target can perform atomic /// operations on. pub fn min_atomic_width(&self) -> u64 { self.min_atomic_width.unwrap_or(8) } /// Maximum integer size in bits that this target can perform atomic /// operations on. pub fn max_atomic_width(&self) -> u64 { self.max_atomic_width.unwrap_or_else(|| self.pointer_width.into()) } /// Loads a target descriptor from a JSON object. pub fn from_json(mut obj: Json) -> Result<(Target, TargetWarnings), String> { // While ugly, this code must remain this way to retain // compatibility with existing JSON fields and the internal // expected naming of the Target and TargetOptions structs. // To ensure compatibility is retained, the built-in targets // are round-tripped through this code to catch cases where // the JSON parser is not updated to match the structs. let mut get_req_field = |name: &str| { obj.remove_key(name) .and_then(|j| Json::as_string(&j).map(str::to_string)) .ok_or_else(|| format!("Field {} in target specification is required", name)) }; let mut base = Target { llvm_target: get_req_field("llvm-target")?, pointer_width: get_req_field("target-pointer-width")? .parse::<u32>() .map_err(|_| "target-pointer-width must be an integer".to_string())?, data_layout: get_req_field("data-layout")?, arch: get_req_field("arch")?, options: Default::default(), }; let mut incorrect_type = vec![]; macro_rules! key { ($key_name:ident) => ( { let name = (stringify!($key_name)).replace("_", "-"); if let Some(s) = obj.remove_key(&name).and_then(|j| Json::as_string(&j).map(str::to_string)) { base.$key_name = s; } } ); ($key_name:ident = $json_name:expr) => ( { let name = $json_name; if let Some(s) = obj.remove_key(&name).and_then(|j| Json::as_string(&j).map(str::to_string)) { base.$key_name = s; } } ); ($key_name:ident, bool) => ( { let name = (stringify!($key_name)).replace("_", "-"); if let Some(s) = obj.remove_key(&name).and_then(|j| Json::as_boolean(&j)) { base.$key_name = s; } } ); ($key_name:ident, u64) => ( { let name = (stringify!($key_name)).replace("_", "-"); if let Some(s) = obj.remove_key(&name).and_then(|j| Json::as_u64(&j)) { base.$key_name = s; } } ); ($key_name:ident, Option<u32>) => ( { let name = (stringify!($key_name)).replace("_", "-"); if let Some(s) = obj.remove_key(&name).and_then(|j| Json::as_u64(&j)) { if s < 1 || s > 5 { return Err("Not a valid DWARF version number".to_string()); } base.$key_name = Some(s as u32); } } ); ($key_name:ident, Option<u64>) => ( { let name = (stringify!($key_name)).replace("_", "-"); if let Some(s) = obj.remove_key(&name).and_then(|j| Json::as_u64(&j)) { base.$key_name = Some(s); } } ); ($key_name:ident, MergeFunctions) => ( { let name = (stringify!($key_name)).replace("_", "-"); obj.remove_key(&name[..]).and_then(|o| o.as_string().and_then(|s| { match s.parse::<MergeFunctions>() { Ok(mergefunc) => base.$key_name = mergefunc, _ => return Some(Err(format!("'{}' is not a valid value for \ merge-functions. Use 'disabled', \ 'trampolines', or 'aliases'.", s))), } Some(Ok(())) })).unwrap_or(Ok(())) } ); ($key_name:ident, RelocModel) => ( { let name = (stringify!($key_name)).replace("_", "-"); obj.remove_key(&name[..]).and_then(|o| o.as_string().and_then(|s| { match s.parse::<RelocModel>() { Ok(relocation_model) => base.$key_name = relocation_model, _ => return Some(Err(format!("'{}' is not a valid relocation model. \ Run `rustc --print relocation-models` to \ see the list of supported values.", s))), } Some(Ok(())) })).unwrap_or(Ok(())) } ); ($key_name:ident, CodeModel) => ( { let name = (stringify!($key_name)).replace("_", "-"); obj.remove_key(&name[..]).and_then(|o| o.as_string().and_then(|s| { match s.parse::<CodeModel>() { Ok(code_model) => base.$key_name = Some(code_model), _ => return Some(Err(format!("'{}' is not a valid code model. \ Run `rustc --print code-models` to \ see the list of supported values.", s))), } Some(Ok(())) })).unwrap_or(Ok(())) } ); ($key_name:ident, TlsModel) => ( { let name = (stringify!($key_name)).replace("_", "-"); obj.remove_key(&name[..]).and_then(|o| o.as_string().and_then(|s| { match s.parse::<TlsModel>() { Ok(tls_model) => base.$key_name = tls_model, _ => return Some(Err(format!("'{}' is not a valid TLS model. \ Run `rustc --print tls-models` to \ see the list of supported values.", s))), } Some(Ok(())) })).unwrap_or(Ok(())) } ); ($key_name:ident, PanicStrategy) => ( { let name = (stringify!($key_name)).replace("_", "-"); obj.remove_key(&name[..]).and_then(|o| o.as_string().and_then(|s| { match s { "unwind" => base.$key_name = PanicStrategy::Unwind, "abort" => base.$key_name = PanicStrategy::Abort, _ => return Some(Err(format!("'{}' is not a valid value for \ panic-strategy. Use 'unwind' or 'abort'.", s))), } Some(Ok(())) })).unwrap_or(Ok(())) } ); ($key_name:ident, RelroLevel) => ( { let name = (stringify!($key_name)).replace("_", "-"); obj.remove_key(&name[..]).and_then(|o| o.as_string().and_then(|s| { match s.parse::<RelroLevel>() { Ok(level) => base.$key_name = level, _ => return Some(Err(format!("'{}' is not a valid value for \ relro-level. Use 'full', 'partial, or 'off'.", s))), } Some(Ok(())) })).unwrap_or(Ok(())) } ); ($key_name:ident, SplitDebuginfo) => ( { let name = (stringify!($key_name)).replace("_", "-"); obj.remove_key(&name[..]).and_then(|o| o.as_string().and_then(|s| { match s.parse::<SplitDebuginfo>() { Ok(level) => base.$key_name = level, _ => return Some(Err(format!("'{}' is not a valid value for \ split-debuginfo. Use 'off' or 'dsymutil'.", s))), } Some(Ok(())) })).unwrap_or(Ok(())) } ); ($key_name:ident, list) => ( { let name = (stringify!($key_name)).replace("_", "-"); if let Some(j) = obj.remove_key(&name){ if let Some(v) = Json::as_array(&j) { base.$key_name = v.iter() .map(|a| a.as_string().unwrap().to_string()) .collect(); } else { incorrect_type.push(name) } } } ); ($key_name:ident, opt_list) => ( { let name = (stringify!($key_name)).replace("_", "-"); if let Some(j) = obj.remove_key(&name) { if let Some(v) = Json::as_array(&j) { base.$key_name = Some(v.iter() .map(|a| a.as_string().unwrap().to_string()) .collect()); } else { incorrect_type.push(name) } } } ); ($key_name:ident, optional) => ( { let name = (stringify!($key_name)).replace("_", "-"); if let Some(o) = obj.remove_key(&name[..]) { base.$key_name = o .as_string() .map(|s| s.to_string() ); } } ); ($key_name:ident, LldFlavor) => ( { let name = (stringify!($key_name)).replace("_", "-"); obj.remove_key(&name[..]).and_then(|o| o.as_string().and_then(|s| { if let Some(flavor) = LldFlavor::from_str(&s) { base.$key_name = flavor; } else { return Some(Err(format!( "'{}' is not a valid value for lld-flavor. \ Use 'darwin', 'gnu', 'link' or 'wasm.", s))) } Some(Ok(())) })).unwrap_or(Ok(())) } ); ($key_name:ident, LinkerFlavor) => ( { let name = (stringify!($key_name)).replace("_", "-"); obj.remove_key(&name[..]).and_then(|o| o.as_string().and_then(|s| { match LinkerFlavor::from_str(s) { Some(linker_flavor) => base.$key_name = linker_flavor, _ => return Some(Err(format!("'{}' is not a valid value for linker-flavor. \ Use {}", s, LinkerFlavor::one_of()))), } Some(Ok(())) })).unwrap_or(Ok(())) } ); ($key_name:ident, StackProbeType) => ( { let name = (stringify!($key_name)).replace("_", "-"); obj.remove_key(&name[..]).and_then(|o| match StackProbeType::from_json(&o) { Ok(v) => { base.$key_name = v; Some(Ok(())) }, Err(s) => Some(Err( format!("`{:?}` is not a valid value for `{}`: {}", o, name, s) )), }).unwrap_or(Ok(())) } ); ($key_name:ident, SanitizerSet) => ( { let name = (stringify!($key_name)).replace("_", "-"); if let Some(o) = obj.remove_key(&name[..]) { if let Some(a) = o.as_array() { for s in a { base.$key_name |= match s.as_string() { Some("address") => SanitizerSet::ADDRESS, Some("cfi") => SanitizerSet::CFI, Some("leak") => SanitizerSet::LEAK, Some("memory") => SanitizerSet::MEMORY, Some("thread") => SanitizerSet::THREAD, Some("hwaddress") => SanitizerSet::HWADDRESS, Some(s) => return Err(format!("unknown sanitizer {}", s)), _ => return Err(format!("not a string: {:?}", s)), }; } } else { incorrect_type.push(name) } } Ok::<(), String>(()) } ); ($key_name:ident, crt_objects_fallback) => ( { let name = (stringify!($key_name)).replace("_", "-"); obj.remove_key(&name[..]).and_then(|o| o.as_string().and_then(|s| { match s.parse::<CrtObjectsFallback>() { Ok(fallback) => base.$key_name = Some(fallback), _ => return Some(Err(format!("'{}' is not a valid CRT objects fallback. \ Use 'musl', 'mingw' or 'wasm'", s))), } Some(Ok(())) })).unwrap_or(Ok(())) } ); ($key_name:ident, link_objects) => ( { let name = (stringify!($key_name)).replace("_", "-"); if let Some(val) = obj.remove_key(&name[..]) { let obj = val.as_object().ok_or_else(|| format!("{}: expected a \ JSON object with fields per CRT object kind.", name))?; let mut args = CrtObjects::new(); for (k, v) in obj { let kind = LinkOutputKind::from_str(&k).ok_or_else(|| { format!("{}: '{}' is not a valid value for CRT object kind. \ Use '(dynamic,static)-(nopic,pic)-exe' or \ '(dynamic,static)-dylib' or 'wasi-reactor-exe'", name, k) })?; let v = v.as_array().ok_or_else(|| format!("{}.{}: expected a JSON array", name, k) )?.iter().enumerate() .map(|(i,s)| { let s = s.as_string().ok_or_else(|| format!("{}.{}[{}]: expected a JSON string", name, k, i))?; Ok(s.to_owned()) }) .collect::<Result<Vec<_>, String>>()?; args.insert(kind, v); } base.$key_name = args; } } ); ($key_name:ident, link_args) => ( { let name = (stringify!($key_name)).replace("_", "-"); if let Some(val) = obj.remove_key(&name[..]) { let obj = val.as_object().ok_or_else(|| format!("{}: expected a \ JSON object with fields per linker-flavor.", name))?; let mut args = LinkArgs::new(); for (k, v) in obj { let flavor = LinkerFlavor::from_str(&k).ok_or_else(|| { format!("{}: '{}' is not a valid value for linker-flavor. \ Use 'em', 'gcc', 'ld' or 'msvc'", name, k) })?; let v = v.as_array().ok_or_else(|| format!("{}.{}: expected a JSON array", name, k) )?.iter().enumerate() .map(|(i,s)| { let s = s.as_string().ok_or_else(|| format!("{}.{}[{}]: expected a JSON string", name, k, i))?; Ok(s.to_owned()) }) .collect::<Result<Vec<_>, String>>()?; args.insert(flavor, v); } base.$key_name = args; } } ); ($key_name:ident, env) => ( { let name = (stringify!($key_name)).replace("_", "-"); if let Some(o) = obj.remove_key(&name[..]) { if let Some(a) = o.as_array() { for o in a { if let Some(s) = o.as_string() { let p = s.split('=').collect::<Vec<_>>(); if p.len() == 2 { let k = p[0].to_string(); let v = p[1].to_string(); base.$key_name.push((k, v)); } } } } else { incorrect_type.push(name) } } } ); ($key_name:ident, Option<Abi>) => ( { let name = (stringify!($key_name)).replace("_", "-"); obj.remove_key(&name[..]).and_then(|o| o.as_string().and_then(|s| { match lookup_abi(s) { Some(abi) => base.$key_name = Some(abi), _ => return Some(Err(format!("'{}' is not a valid value for abi", s))), } Some(Ok(())) })).unwrap_or(Ok(())) } ); ($key_name:ident, TargetFamilies) => ( { if let Some(value) = obj.remove_key("target-family") { if let Some(v) = Json::as_array(&value) { base.$key_name = v.iter() .map(|a| a.as_string().unwrap().to_string()) .collect(); } else if let Some(v) = Json::as_string(&value) { base.$key_name = vec![v.to_string()]; } } } ); } if let Some(j) = obj.remove_key("target-endian") { if let Some(s) = Json::as_string(&j) { base.endian = s.parse()?; } else { incorrect_type.push("target-endian".to_string()) } } if let Some(fp) = obj.remove_key("frame-pointer") { if let Some(s) = Json::as_string(&fp) { base.frame_pointer = s .parse() .map_err(|()| format!("'{}' is not a valid value for frame-pointer", s))?; } else { incorrect_type.push("frame-pointer".to_string()) } } key!(is_builtin, bool); key!(c_int_width = "target-c-int-width"); key!(os); key!(env); key!(abi); key!(vendor); key!(linker_flavor, LinkerFlavor)?; key!(linker, optional); key!(lld_flavor, LldFlavor)?; key!(pre_link_objects, link_objects); key!(post_link_objects, link_objects); key!(pre_link_objects_fallback, link_objects); key!(post_link_objects_fallback, link_objects); key!(crt_objects_fallback, crt_objects_fallback)?; key!(pre_link_args, link_args); key!(late_link_args, link_args); key!(late_link_args_dynamic, link_args); key!(late_link_args_static, link_args); key!(post_link_args, link_args); key!(link_script, optional); key!(link_env, env); key!(link_env_remove, list); key!(asm_args, list); key!(cpu); key!(features); key!(dynamic_linking, bool); key!(only_cdylib, bool); key!(executables, bool); key!(relocation_model, RelocModel)?; key!(code_model, CodeModel)?; key!(tls_model, TlsModel)?; key!(disable_redzone, bool); key!(function_sections, bool); key!(dll_prefix); key!(dll_suffix); key!(exe_suffix); key!(staticlib_prefix); key!(staticlib_suffix); key!(families, TargetFamilies); key!(abi_return_struct_as_int, bool); key!(is_like_osx, bool); key!(is_like_solaris, bool); key!(is_like_windows, bool); key!(is_like_msvc, bool); key!(is_like_emscripten, bool); key!(is_like_fuchsia, bool); key!(is_like_wasm, bool); key!(dwarf_version, Option<u32>); key!(linker_is_gnu, bool); key!(allows_weak_linkage, bool); key!(has_rpath, bool); key!(no_default_libraries, bool); key!(position_independent_executables, bool); key!(static_position_independent_executables, bool); key!(needs_plt, bool); key!(relro_level, RelroLevel)?; key!(archive_format); key!(allow_asm, bool); key!(main_needs_argc_argv, bool); key!(has_elf_tls, bool); key!(obj_is_bitcode, bool); key!(forces_embed_bitcode, bool); key!(bitcode_llvm_cmdline); key!(max_atomic_width, Option<u64>); key!(min_atomic_width, Option<u64>); key!(atomic_cas, bool); key!(panic_strategy, PanicStrategy)?; key!(crt_static_allows_dylibs, bool); key!(crt_static_default, bool); key!(crt_static_respected, bool); key!(stack_probes, StackProbeType)?; key!(min_global_align, Option<u64>); key!(default_codegen_units, Option<u64>); key!(trap_unreachable, bool); key!(requires_lto, bool); key!(singlethread, bool); key!(no_builtins, bool); key!(default_hidden_visibility, bool); key!(emit_debug_gdb_scripts, bool); key!(requires_uwtable, bool); key!(default_uwtable, bool); key!(simd_types_indirect, bool); key!(limit_rdylib_exports, bool); key!(override_export_symbols, opt_list); key!(merge_functions, MergeFunctions)?; key!(mcount = "target-mcount"); key!(llvm_abiname); key!(relax_elf_relocations, bool); key!(llvm_args, list); key!(use_ctors_section, bool); key!(eh_frame_header, bool); key!(has_thumb_interworking, bool); key!(split_debuginfo, SplitDebuginfo)?; key!(supported_sanitizers, SanitizerSet)?; key!(default_adjusted_cabi, Option<Abi>)?; key!(c_enum_min_bits, u64); if base.is_builtin { // This can cause unfortunate ICEs later down the line. return Err("may not set is_builtin for targets not built-in".to_string()); } // Each field should have been read using `Json::remove_key` so any keys remaining are unused. let remaining_keys = obj.as_object().ok_or("Expected JSON object for target")?.keys(); Ok(( base, TargetWarnings { unused_fields: remaining_keys.cloned().collect(), incorrect_type }, )) } /// Search for a JSON file specifying the given target triple. /// /// If none is found in `$RUST_TARGET_PATH`, look for a file called `target.json` inside the /// sysroot under the target-triple's `rustlib` directory. Note that it could also just be a /// bare filename already, so also check for that. If one of the hardcoded targets we know /// about, just return it directly. /// /// The error string could come from any of the APIs called, including filesystem access and /// JSON decoding. pub fn search( target_triple: &TargetTriple, sysroot: &PathBuf, ) -> Result<(Target, TargetWarnings), String> { use rustc_serialize::json; use std::env; use std::fs; fn load_file(path: &Path) -> Result<(Target, TargetWarnings), String> { let contents = fs::read(path).map_err(|e| e.to_string())?; let obj = json::from_reader(&mut &contents[..]).map_err(|e| e.to_string())?; Target::from_json(obj) } match *target_triple { TargetTriple::TargetTriple(ref target_triple) => { // check if triple is in list of built-in targets if let Some(t) = load_builtin(target_triple) { return Ok((t, TargetWarnings::empty())); } // search for a file named `target_triple`.json in RUST_TARGET_PATH let path = { let mut target = target_triple.to_string(); target.push_str(".json"); PathBuf::from(target) }; let target_path = env::var_os("RUST_TARGET_PATH").unwrap_or_default(); for dir in env::split_paths(&target_path) { let p = dir.join(&path); if p.is_file() { return load_file(&p); } } // Additionally look in the sysroot under `lib/rustlib/<triple>/target.json` // as a fallback. let rustlib_path = crate::target_rustlib_path(&sysroot, &target_triple); let p = std::array::IntoIter::new([ Path::new(sysroot), Path::new(&rustlib_path), Path::new("target.json"), ]) .collect::<PathBuf>(); if p.is_file() { return load_file(&p); } Err(format!("Could not find specification for target {:?}", target_triple)) } TargetTriple::TargetPath(ref target_path) => { if target_path.is_file() { return load_file(&target_path); } Err(format!("Target path {:?} is not a valid file", target_path)) } } } } impl ToJson for Target { fn to_json(&self) -> Json { let mut d = BTreeMap::new(); let default: TargetOptions = Default::default(); macro_rules! target_val { ($attr:ident) => {{ let name = (stringify!($attr)).replace("_", "-"); d.insert(name, self.$attr.to_json()); }}; ($attr:ident, $key_name:expr) => {{ let name = $key_name; d.insert(name.to_string(), self.$attr.to_json()); }}; } macro_rules! target_option_val { ($attr:ident) => {{ let name = (stringify!($attr)).replace("_", "-"); if default.$attr != self.$attr { d.insert(name, self.$attr.to_json()); } }}; ($attr:ident, $key_name:expr) => {{ let name = $key_name; if default.$attr != self.$attr { d.insert(name.to_string(), self.$attr.to_json()); } }}; (link_args - $attr:ident) => {{ let name = (stringify!($attr)).replace("_", "-"); if default.$attr != self.$attr { let obj = self .$attr .iter() .map(|(k, v)| (k.desc().to_owned(), v.clone())) .collect::<BTreeMap<_, _>>(); d.insert(name, obj.to_json()); } }}; (env - $attr:ident) => {{ let name = (stringify!($attr)).replace("_", "-"); if default.$attr != self.$attr { let obj = self .$attr .iter() .map(|&(ref k, ref v)| k.clone() + "=" + &v) .collect::<Vec<_>>(); d.insert(name, obj.to_json()); } }}; } target_val!(llvm_target); d.insert("target-pointer-width".to_string(), self.pointer_width.to_string().to_json()); target_val!(arch); target_val!(data_layout); target_option_val!(is_builtin); target_option_val!(endian, "target-endian"); target_option_val!(c_int_width, "target-c-int-width"); target_option_val!(os); target_option_val!(env); target_option_val!(abi); target_option_val!(vendor); target_option_val!(linker_flavor); target_option_val!(linker); target_option_val!(lld_flavor); target_option_val!(pre_link_objects); target_option_val!(post_link_objects); target_option_val!(pre_link_objects_fallback); target_option_val!(post_link_objects_fallback); target_option_val!(crt_objects_fallback); target_option_val!(link_args - pre_link_args); target_option_val!(link_args - late_link_args); target_option_val!(link_args - late_link_args_dynamic); target_option_val!(link_args - late_link_args_static); target_option_val!(link_args - post_link_args); target_option_val!(link_script); target_option_val!(env - link_env); target_option_val!(link_env_remove); target_option_val!(asm_args); target_option_val!(cpu); target_option_val!(features); target_option_val!(dynamic_linking); target_option_val!(only_cdylib); target_option_val!(executables); target_option_val!(relocation_model); target_option_val!(code_model); target_option_val!(tls_model); target_option_val!(disable_redzone); target_option_val!(frame_pointer); target_option_val!(function_sections); target_option_val!(dll_prefix); target_option_val!(dll_suffix); target_option_val!(exe_suffix); target_option_val!(staticlib_prefix); target_option_val!(staticlib_suffix); target_option_val!(families, "target-family"); target_option_val!(abi_return_struct_as_int); target_option_val!(is_like_osx); target_option_val!(is_like_solaris); target_option_val!(is_like_windows); target_option_val!(is_like_msvc); target_option_val!(is_like_emscripten); target_option_val!(is_like_fuchsia); target_option_val!(is_like_wasm); target_option_val!(dwarf_version); target_option_val!(linker_is_gnu); target_option_val!(allows_weak_linkage); target_option_val!(has_rpath); target_option_val!(no_default_libraries); target_option_val!(position_independent_executables); target_option_val!(static_position_independent_executables); target_option_val!(needs_plt); target_option_val!(relro_level); target_option_val!(archive_format); target_option_val!(allow_asm); target_option_val!(main_needs_argc_argv); target_option_val!(has_elf_tls); target_option_val!(obj_is_bitcode); target_option_val!(forces_embed_bitcode); target_option_val!(bitcode_llvm_cmdline); target_option_val!(min_atomic_width); target_option_val!(max_atomic_width); target_option_val!(atomic_cas); target_option_val!(panic_strategy); target_option_val!(crt_static_allows_dylibs); target_option_val!(crt_static_default); target_option_val!(crt_static_respected); target_option_val!(stack_probes); target_option_val!(min_global_align); target_option_val!(default_codegen_units); target_option_val!(trap_unreachable); target_option_val!(requires_lto); target_option_val!(singlethread); target_option_val!(no_builtins); target_option_val!(default_hidden_visibility); target_option_val!(emit_debug_gdb_scripts); target_option_val!(requires_uwtable); target_option_val!(default_uwtable); target_option_val!(simd_types_indirect); target_option_val!(limit_rdylib_exports); target_option_val!(override_export_symbols); target_option_val!(merge_functions); target_option_val!(mcount, "target-mcount"); target_option_val!(llvm_abiname); target_option_val!(relax_elf_relocations); target_option_val!(llvm_args); target_option_val!(use_ctors_section); target_option_val!(eh_frame_header); target_option_val!(has_thumb_interworking); target_option_val!(split_debuginfo); target_option_val!(supported_sanitizers); target_option_val!(c_enum_min_bits); if let Some(abi) = self.default_adjusted_cabi { d.insert("default-adjusted-cabi".to_string(), Abi::name(abi).to_json()); } Json::Object(d) } } /// Either a target triple string or a path to a JSON file. #[derive(PartialEq, Clone, Debug, Hash, Encodable, Decodable)] pub enum TargetTriple { TargetTriple(String), TargetPath(PathBuf), } impl TargetTriple { /// Creates a target triple from the passed target triple string. pub fn from_triple(triple: &str) -> Self { TargetTriple::TargetTriple(triple.to_string()) } /// Creates a target triple from the passed target path. pub fn from_path(path: &Path) -> Result<Self, io::Error> { let canonicalized_path = path.canonicalize()?; Ok(TargetTriple::TargetPath(canonicalized_path)) } /// Returns a string triple for this target. /// /// If this target is a path, the file name (without extension) is returned. pub fn triple(&self) -> &str { match *self { TargetTriple::TargetTriple(ref triple) => triple, TargetTriple::TargetPath(ref path) => path .file_stem() .expect("target path must not be empty") .to_str() .expect("target path must be valid unicode"), } } /// Returns an extended string triple for this target. /// /// If this target is a path, a hash of the path is appended to the triple returned /// by `triple()`. pub fn debug_triple(&self) -> String { use std::collections::hash_map::DefaultHasher; use std::hash::{Hash, Hasher}; let triple = self.triple(); if let TargetTriple::TargetPath(ref path) = *self { let mut hasher = DefaultHasher::new(); path.hash(&mut hasher); let hash = hasher.finish(); format!("{}-{}", triple, hash) } else { triple.to_owned() } } } impl fmt::Display for TargetTriple { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.debug_triple()) } }
40.393373
110
0.577036
bb5cf6060398d2b00918931d8d8912e63d4d0a0a
607
#[test] fn ui() { if !version_check::is_nightly().unwrap() { return; } let mut config = compiletest::Config { mode: compiletest::common::Mode::Ui, src_base: std::path::PathBuf::from("tests/ui"), target_rustcflags: Some(String::from( "\ --edition=2018 \ -Z unstable-options \ --extern serde_repr \ ", )), build_base: std::path::PathBuf::from("target/ui"), ..Default::default() }; config.link_deps(); config.clean_rmeta(); compiletest::run_tests(&config); }
23.346154
58
0.520593
cc81f32b5967de7edc2806c809ab8e7cfbc837a0
2,718
use necsim_core::cogs::{Backup, PrimeableRng, RngCore}; use aes::{ cipher::{generic_array::GenericArray, BlockEncrypt, NewBlockCipher}, Aes128, }; #[allow(clippy::module_name_repetitions)] #[derive(Clone, Debug)] pub struct AesRng { cipher: Aes128, state: [u8; 16], cached: bool, } #[contract_trait] impl Backup for AesRng { unsafe fn backup_unchecked(&self) -> Self { self.clone() } } impl RngCore for AesRng { type Seed = [u8; 16]; #[must_use] #[inline] fn from_seed(seed: Self::Seed) -> Self { Self { cipher: Aes128::new(GenericArray::from_slice(&seed)), state: [0_u8; 16], cached: false, } } #[must_use] #[inline] fn sample_u64(&mut self) -> u64 { self.cached ^= true; if self.cached { // one more u64 will be cached self.cipher .encrypt_block(GenericArray::from_mut_slice(&mut self.state)); u64::from_le_bytes([ self.state[0], self.state[1], self.state[2], self.state[3], self.state[4], self.state[5], self.state[6], self.state[7], ]) } else { // one more u64 was cached let rand_u64 = u64::from_le_bytes([ self.state[8], self.state[9], self.state[10], self.state[11], self.state[12], self.state[13], self.state[14], self.state[15], ]); self.state[9] = self.state[9].wrapping_add(1); rand_u64 } } } impl PrimeableRng for AesRng { fn prime_with(&mut self, location_index: u64, time_index: u64) { let location_bytes = location_index.to_le_bytes(); self.state[0] = location_bytes[0]; self.state[1] = location_bytes[1]; self.state[2] = location_bytes[2]; self.state[3] = location_bytes[3]; self.state[4] = location_bytes[4]; self.state[5] = location_bytes[5]; self.state[6] = location_bytes[6]; self.state[7] = location_bytes[7]; let time_index_bytes = time_index.to_le_bytes(); self.state[8] = time_index_bytes[0]; self.state[9] = time_index_bytes[1]; self.state[10] = time_index_bytes[2]; self.state[11] = time_index_bytes[3]; self.state[12] = time_index_bytes[4]; self.state[13] = time_index_bytes[5]; self.state[14] = time_index_bytes[6]; self.state[15] = time_index_bytes[7]; self.cached = false; } }
26.38835
78
0.527226
e83399a238ad14f4e8c4589321a9f696b760619c
8,445
mod krate; pub mod package; pub mod preview; mod route; mod upload_form; pub use package::Package; use crate::settings::target::KvNamespace; use route::Route; use upload_form::build_script_and_upload_form; use std::path::Path; use crate::commands::kv; use crate::commands::kv::bucket::AssetManifest; use crate::commands::subdomain::Subdomain; use crate::commands::validate_worker_name; use crate::http; use crate::settings::global_user::GlobalUser; use crate::settings::target::{Site, Target}; use crate::terminal::{emoji, message}; pub fn publish( user: &GlobalUser, target: &mut Target, verbose: bool, ) -> Result<(), failure::Error> { let msg = match &target.route { Some(route) => &route, None => "workers_dev", }; log::info!("{}", msg); validate_target_required_fields_present(target)?; validate_worker_name(&target.name)?; if let Some(site_config) = target.site.clone() { bind_static_site_contents(user, target, &site_config, false)?; } let asset_manifest = upload_buckets(target, user, verbose)?; build_and_publish_script(&user, &target, asset_manifest)?; Ok(()) } // Updates given Target with kv_namespace binding for a static site assets KV namespace. pub fn bind_static_site_contents( user: &GlobalUser, target: &mut Target, site_config: &Site, preview: bool, ) -> Result<(), failure::Error> { let site_namespace = kv::namespace::site(target, &user, preview)?; // Check if namespace already is in namespace list for namespace in target.kv_namespaces() { if namespace.id == site_namespace.id { return Ok(()); // Sites binding already exists; ignore } } target.add_kv_namespace(KvNamespace { binding: "__STATIC_CONTENT".to_string(), id: site_namespace.id, bucket: Some(site_config.bucket.to_owned()), }); Ok(()) } fn build_and_publish_script( user: &GlobalUser, target: &Target, asset_manifest: Option<AssetManifest>, ) -> Result<(), failure::Error> { let worker_addr = format!( "https://api.cloudflare.com/client/v4/accounts/{}/workers/scripts/{}", target.account_id, target.name, ); let client = if target.site.is_some() { http::auth_client(Some("site"), user) } else { http::auth_client(None, user) }; let script_upload_form = build_script_and_upload_form(target, asset_manifest)?; let mut res = client .put(&worker_addr) .multipart(script_upload_form) .send()?; let res_status = res.status(); let res_text = res.text()?; if !res_status.is_success() { failure::bail!(error_msg(res_status, res_text)) } let pattern = if target.route.is_some() { let route = Route::new(&target)?; Route::publish(&user, &target, &route)?; log::info!("publishing to route"); route.pattern } else { log::info!("publishing to subdomain"); publish_to_subdomain(target, user)? }; log::info!("{}", &pattern); message::success(&format!( "Successfully published your script to {}", &pattern )); Ok(()) } fn error_msg(status: reqwest::StatusCode, text: String) -> String { if text.contains("\"code\": 10034,") { "You need to verify your account's email address before you can publish. You can do this by checking your email or logging in to https://dash.cloudflare.com.".to_string() } else { format!("Something went wrong! Status: {}, Details {}", status, text) } } #[test] fn fails_with_good_error_msg_on_verify_email_err() { let status = reqwest::StatusCode::FORBIDDEN; let text = r#"{ "result": null, "success": false, "errors": [ { "code": 10034, "message": "workers.api.error.email_verification_required" } ], "messages": [] }"# .to_string(); let result = error_msg(status, text); assert!(result.contains("https://dash.cloudflare.com")); } pub fn upload_buckets( target: &Target, user: &GlobalUser, verbose: bool, ) -> Result<Option<AssetManifest>, failure::Error> { let mut asset_manifest = None; for namespace in &target.kv_namespaces() { if let Some(bucket) = &namespace.bucket { if bucket.is_empty() { failure::bail!( "{} You need to specify a bucket directory in your wrangler.toml", emoji::WARN ) } let path = Path::new(&bucket); if !path.exists() { failure::bail!( "{} bucket directory \"{}\" does not exist", emoji::WARN, path.display() ) } else if !path.is_dir() { failure::bail!( "{} bucket \"{}\" is not a directory", emoji::WARN, path.display() ) } let manifest_result = kv::bucket::sync(target, user, &namespace.id, path, verbose)?; if target.site.is_some() { if asset_manifest.is_none() { asset_manifest = Some(manifest_result) } else { // only site manifest should be returned unreachable!() } } } } Ok(asset_manifest) } fn build_subdomain_request() -> String { serde_json::json!({ "enabled": true }).to_string() } fn publish_to_subdomain(target: &Target, user: &GlobalUser) -> Result<String, failure::Error> { log::info!("checking that subdomain is registered"); let subdomain = Subdomain::get(&target.account_id, user)?; let subdomain = match subdomain { Some(subdomain) => subdomain, None => failure::bail!("Before publishing to workers.dev, you must register a subdomain. Please choose a name for your subdomain and run `wrangler subdomain <name>`.") }; let sd_worker_addr = format!( "https://api.cloudflare.com/client/v4/accounts/{}/workers/scripts/{}/subdomain", target.account_id, target.name, ); let client = http::auth_client(None, user); log::info!("Making public on subdomain..."); let mut res = client .post(&sd_worker_addr) .header("Content-type", "application/json") .body(build_subdomain_request()) .send()?; if !res.status().is_success() { failure::bail!( "Something went wrong! Status: {}, Details {}", res.status(), res.text()? ) } Ok(format!("https://{}.{}.workers.dev", target.name, subdomain)) } fn validate_target_required_fields_present(target: &Target) -> Result<(), failure::Error> { let mut missing_fields = Vec::new(); if target.account_id.is_empty() { missing_fields.push("account_id") }; if target.name.is_empty() { missing_fields.push("name") }; match &target.kv_namespaces { Some(kv_namespaces) => { for kv in kv_namespaces { if kv.binding.is_empty() { missing_fields.push("kv-namespace binding") } if kv.id.is_empty() { missing_fields.push("kv-namespace id") } } } None => {} } let destination = if target.route.is_some() { // check required fields for publishing to a route if target .zone_id .as_ref() .unwrap_or(&"".to_string()) .is_empty() { missing_fields.push("zone_id") }; if target.route.as_ref().unwrap_or(&"".to_string()).is_empty() { missing_fields.push("route") }; // zoned deploy destination "a route" } else { // zoneless deploy destination "your subdomain" }; let (field_pluralization, is_are) = match missing_fields.len() { n if n >= 2 => ("fields", "are"), 1 => ("field", "is"), _ => ("", ""), }; if !missing_fields.is_empty() { failure::bail!( "{} Your wrangler.toml is missing the {} {:?} which {} required to publish to {}!", emoji::WARN, field_pluralization, missing_fields, is_are, destination ); }; Ok(()) }
28.72449
178
0.573949
0104ad0614d42c9da18e12ef4dfb97f8c01bdd3d
7,191
// Copyright 2018 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. //! System service for snoop cellular modems use { anyhow::{format_err, Context as _, Error}, argh::FromArgs, fidl::endpoints::{Proxy, RequestStream, ServerEnd}, fidl_fuchsia_io as fio, fidl_fuchsia_telephony_snoop::{ PublisherMarker as QmiSnoopMarker, PublisherRequest as QmiSnoopRequest, PublisherRequestStream as QmiSnoopRequestStream, SnooperControlHandle, SnooperRequest, SnooperRequestStream, }, fuchsia_async as fasync, fuchsia_component::server::ServiceFs, fuchsia_syslog::{self as syslog, macros::*}, fuchsia_vfs_watcher::{WatchEvent, Watcher}, futures::{future, StreamExt, TryFutureExt, TryStreamExt}, parking_lot::Mutex, std::{ fs::File, path::{Path, PathBuf}, sync::Arc, vec::Vec, }, tel_dev::isolated_devmgr, }; const QMI_TRANSPORT: &str = "/dev/class/qmi-transport"; #[derive(Default)] pub struct Snooper { control_handles: Vec<SnooperControlHandle>, device_num: u32, } #[derive(FromArgs, Debug)] #[argh(description = "Snoop configs")] pub struct Args { /// snoop driver loaded in Isolated Devmgr component #[argh(switch, short = 't')] pub use_isolated_devmgr: bool, } async fn watch_new_devices( snooper: Arc<Mutex<Snooper>>, path_in_dev: &Path, use_isolated_devmgr: bool, ) -> Result<(), Error> { // TODO(jiamingw): make more generic to support non-qmi devices let (protocol_path, dir) = if use_isolated_devmgr { ( path_in_dev.strip_prefix("/dev")?, isolated_devmgr::open_dir_in_isolated_devmgr(path_in_dev.strip_prefix("/dev")?) .context("Opening dir in IsolatedDevmgr failed")?, ) } else { (path_in_dev, File::open(path_in_dev).context("Opening dir in devmgr failed")?) }; let channel = fdio::clone_channel(&dir).unwrap(); let async_channel = fasync::Channel::from_channel(channel).unwrap(); let directory = fio::DirectoryProxy::from_channel(async_channel); let mut watcher = Watcher::new(directory).await.with_context(|| format!("could not watch {:?}", &dir))?; while let Some(msg) = watcher.try_next().await? { match msg.event { WatchEvent::IDLE => { fx_log_info!("watch_new_devices: all devices enumerated"); } WatchEvent::REMOVE_FILE => { snooper.lock().device_num -= 1; fx_log_info!("watch_new_devices: device removed"); } WatchEvent::EXISTING | WatchEvent::ADD_FILE => { let device_path: PathBuf = protocol_path.join(msg.filename); fx_log_info!("watch_new_devices: connecting to {}", device_path.display()); let file: File = if use_isolated_devmgr { isolated_devmgr::open_file_in_isolated_devmgr(device_path)? } else { File::open(device_path)? }; let snoop_endpoint_server_side: ServerEnd<QmiSnoopMarker> = qmi::connect_snoop_channel(&file).await?; let snooper_cloned = snooper.clone(); let mut request_stream: QmiSnoopRequestStream = snoop_endpoint_server_side.into_stream()?; snooper.lock().device_num += 1; fasync::Task::spawn(async move { fx_log_info!("watch_new_devices: spawn async block for forwarding msg"); while let Ok(Some(QmiSnoopRequest::SendMessage { mut msg, control_handle: _, })) = request_stream.try_next().await { let mut snooper_locked = snooper_cloned.lock(); fx_log_info!( "watch_new_devices: qmi msg rcvd, forwarding to {} client...", snooper_locked.control_handles.len() ); // try to send message to all clients connected to snooper // remove client's control handle if there is any error let mut removed_reason = Vec::<fidl::Error>::new(); snooper_locked.control_handles.retain(|ctl_hdl| { if let Err(e) = ctl_hdl.send_on_message(&mut msg) { removed_reason.push(e); false } else { true } }); if removed_reason.len() > 0 { fx_log_info!( "watch_new_devices: removed {} hdl with reason {:?}", removed_reason.len(), removed_reason ); } } fx_log_info!("watch_new_devices: stop forwarding msg"); }) .detach(); } _ => { return Err(format_err!("watch_new_devices: unknown watcher event")); } } } fx_log_err!("watch new devices terminated"); Ok(()) } // forwarding QMI messages from driver to snooper client #[fasync::run_singlethreaded] async fn main() { syslog::init_with_tags(&["tel-snooper"]).expect("Can't init logger"); fx_log_info!("Starting telephony snoop service..."); let args: Args = argh::from_env(); let snooper = Arc::new(Mutex::new(Snooper { control_handles: vec![], device_num: 0 })); let qmi_device_path: &Path = Path::new(QMI_TRANSPORT); let qmi_device_watcher = watch_new_devices(snooper.clone(), qmi_device_path, args.use_isolated_devmgr) .unwrap_or_else(|e| fx_log_err!("Failed to watch new devices: {:?}", e)); let mut fs = ServiceFs::new(); fs.dir("svc").add_fidl_service(move |mut stream: SnooperRequestStream| { fx_log_info!("new client connect to Snooper"); snooper.lock().control_handles.push(stream.control_handle()); let snooper_clone = snooper.clone(); fasync::Task::spawn( async move { while let Some(req) = (stream.try_next()).await? { match req { SnooperRequest::GetDeviceNum { responder } => { if let Err(e) = responder.send(snooper_clone.lock().device_num) { fx_log_err!("failed to respond with device number {:?}", e); } } } } Ok(()) } .unwrap_or_else(|e: anyhow::Error| fx_log_err!("{:?}", e)), ) .detach(); }); fs.take_and_serve_directory_handle().expect("ServiceFs failed to serve directory"); future::join(fs.collect::<()>(), qmi_device_watcher).await; }
42.052632
94
0.551662
2fb49ee73d095e6a15ac9e5db98ce1a15392956b
4,252
use rand::Rng; use std::sync::Arc; use crate::aabb::AABB; use crate::ray::Ray; use crate::traceable::{RayHit, Traceable}; use crate::vec3::Vec3; use crate::world::{World, WorldObjectsList}; /** Bounding Volume Hierarchy node. Used to split World space into groups, to easier discard a lot of objects at once. Form of acceleration structure. */ pub struct BVHNode { aabb: AABB, child_left: Arc<dyn Traceable>, child_right: Arc<dyn Traceable>, } impl BVHNode { pub fn build(world: &World) -> BVHNode { BVHNode::build_impl(&world.objects, 0, world.objects.len()) } fn build_impl(world_objects: &WorldObjectsList, start_idx: usize, end_idx: usize) -> BVHNode { let mut node = BVHNode::subdivide_into_bvh(world_objects, start_idx, end_idx); // recalc aabb match ( node.child_left.bounding_box(), node.child_right.bounding_box(), ) { (Some(bb_left), Some(bb_right)) => { node.aabb = AABB::merge(&bb_left, &bb_right); } _ => { panic!("Tried to create BVH, but some objects do not have bounding box"); } }; node } /** Given object list, subdivide into recursive BVH nodes. This fn returns left/right children, please recalc AABB after! */ fn subdivide_into_bvh( world_objects: &WorldObjectsList, start_idx: usize, end_idx: usize, ) -> BVHNode { let objects_count = end_idx - start_idx; let mock_aabb = AABB { min: Vec3::zero(), max: Vec3::one(), }; if objects_count < 1 { panic!("Tried to create BVHNode from empty objects list"); } else if objects_count == 1 { // single object node. A bit of waste actually.. return BVHNode { aabb: mock_aabb, child_left: world_objects[start_idx].clone(), child_right: world_objects[start_idx].clone(), }; } else if objects_count == 2 { // 2 objects - assign as left/right children return BVHNode { aabb: mock_aabb, child_left: world_objects[start_idx].clone(), child_right: world_objects[start_idx + 1].clone(), }; } // many objects - pick X/Y/Z axis at random and split in half objects wrt. to that axis let mut objects_copy = world_objects[start_idx..end_idx].to_vec(); let axis_to_sort_by: u32 = rand::thread_rng().gen_range(0..3); // represents x/y/z axis sort_by_axis_distance(&mut objects_copy, axis_to_sort_by as usize); let mid = objects_copy.len() / 2; BVHNode { aabb: mock_aabb, child_left: Arc::new(BVHNode::build_impl(&objects_copy, 0, mid)), // first half child_right: Arc::new(BVHNode::build_impl(&objects_copy, mid, objects_copy.len())), // second half } } } /** Sort objects on a selected axis */ fn sort_by_axis_distance(objects: &mut WorldObjectsList, axis_to_sort_by: usize) { objects.sort_by(|a, b| match (a.bounding_box(), b.bounding_box()) { (Some(bb_a), Some(bb_b)) => { let val_a = bb_a.min[axis_to_sort_by]; let val_b = bb_b.min[axis_to_sort_by]; val_a.partial_cmp(&val_b).unwrap() } _ => { panic!("Tried to create BVH, but some objects do not have bounding box"); } }); } impl Traceable for BVHNode { fn bounding_box(&self) -> Option<AABB> { Some(self.aabb) } /** Check ray agains BVH. This is the fn that makes use of AABBs */ fn check_intersection(&self, r: &Ray, t_min: f32, t_max: f32) -> Option<RayHit> { if !self.aabb.check_intersection(r, t_min, t_max) { // this node was not hit by the ray, skip entirely return None; } match self.child_left.check_intersection(r, t_min, t_max) { None => { // left missed, return right that maybe hit return self.child_right.check_intersection(r, t_min, t_max); } Some(left_hit_data) => { // check if right hit closer than left let hit_right = self .child_right .check_intersection(r, t_min, left_hit_data.t); // NOTE: we changed max to `left_hit_data.t` match hit_right { None => return Some(left_hit_data), // left hit, right missed (or was farther) Some(right_hit_data) => return Some(right_hit_data), // both hit, but right was closer } } } } }
31.496296
104
0.642521
4a76d683d6e5033de72cef177c0e62674f5be8b1
743
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Regression test for #51008 -- the anonymous lifetime in `&i32` was // being incorrectly considered part of the "elided lifetimes" from // the impl. // // run-pass #![feature(rust_2018_preview)] trait A { } impl<F> A for F where F: PartialEq<fn(&i32)> { } fn main() {}
28.576923
69
0.716016
e54332c9492404176267cc741c414fcd38cbcb48
20,422
// Copyright 2018-2022 Cargill Incorporated // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::convert::{TryFrom, TryInto}; use std::sync::mpsc::{channel, Sender}; use std::sync::{Arc, Mutex}; use std::thread::{Builder, JoinHandle}; use std::time::Duration; use protobuf::Message; use splinter::consensus::{ error::{ConsensusSendError, ProposalManagerError}, two_phase::v1::TwoPhaseEngine as TwoPhaseEngineV1, two_phase::v2::TwoPhaseEngine as TwoPhaseEngineV2, ConsensusEngine, ConsensusMessage, ConsensusNetworkSender, PeerId, Proposal, ProposalId, ProposalManager, ProposalUpdate, StartupState, }; use transact::protos::IntoBytes; use crate::protos::scabbard::{ProposedBatch, ScabbardMessage, ScabbardMessage_Type}; use super::error::{ScabbardConsensusManagerError, ScabbardError}; use super::shared::ScabbardShared; use super::state::ScabbardState; use super::ScabbardVersion; /// Component used by the service to manage and interact with consenus pub struct ScabbardConsensusManager { consensus_msg_tx: Sender<ConsensusMessage>, proposal_update_tx: Sender<ProposalUpdate>, thread_handle: JoinHandle<()>, } impl ScabbardConsensusManager { /// Create the proposal manager, network sender, and channels used to communicate with /// consensus, and start consensus in a separate thread. pub fn new( service_id: String, version: ScabbardVersion, shared: Arc<Mutex<ScabbardShared>>, state: Arc<Mutex<ScabbardState>>, // The coordinator timeout for the two-phase commit consensus engine coordinator_timeout: Duration, ) -> Result<Self, ScabbardConsensusManagerError> { let peer_ids = shared .lock() .map_err(|_| ScabbardConsensusManagerError(Box::new(ScabbardError::LockPoisoned)))? .peer_services() .iter() .map(|id| id.as_bytes().into()) .collect(); let (consensus_msg_tx, consensus_msg_rx) = channel(); let (proposal_update_tx, proposal_update_rx) = channel(); let proposal_manager = ScabbardProposalManager::new( service_id.clone(), version, proposal_update_tx.clone(), shared.clone(), state, ); let consensus_network_sender = ScabbardConsensusNetworkSender::new(service_id.clone(), shared); let startup_state = StartupState { id: service_id.as_bytes().into(), peer_ids, last_proposal: None, }; let thread_handle = Builder::new() .name(format!("consensus-{}", service_id)) .spawn(move || match version { ScabbardVersion::V1 => { let mut two_phase_engine = TwoPhaseEngineV1::new(coordinator_timeout); if let Err(err) = two_phase_engine.run( consensus_msg_rx, proposal_update_rx, Box::new(consensus_network_sender), Box::new(proposal_manager), startup_state, ) { error!("two phase consensus exited with an error: {}", err) } } ScabbardVersion::V2 => { let mut two_phase_engine = TwoPhaseEngineV2::new(coordinator_timeout); if let Err(err) = two_phase_engine.run( consensus_msg_rx, proposal_update_rx, Box::new(consensus_network_sender), Box::new(proposal_manager), startup_state, ) { error!("two phase consensus exited with an error: {}", err) } } }) .map_err(|err| ScabbardConsensusManagerError(Box::new(err)))?; Ok(ScabbardConsensusManager { consensus_msg_tx, proposal_update_tx, thread_handle, }) } /// Consumes self and shuts down the consensus thread. pub fn shutdown(self) -> Result<(), ScabbardConsensusManagerError> { self.send_update(ProposalUpdate::Shutdown)?; self.thread_handle .join() .unwrap_or_else(|err| error!("consensus thread failed: {:?}", err)); Ok(()) } pub fn handle_message( &self, message_bytes: &[u8], ) -> Result<(), ScabbardConsensusManagerError> { let consensus_message = ConsensusMessage::try_from(message_bytes) .map_err(|err| ScabbardConsensusManagerError(Box::new(err)))?; self.consensus_msg_tx .send(consensus_message) .map_err(|err| ScabbardConsensusManagerError(Box::new(err)))?; Ok(()) } pub fn send_update(&self, update: ProposalUpdate) -> Result<(), ScabbardConsensusManagerError> { self.proposal_update_tx .send(update) .map_err(|err| ScabbardConsensusManagerError(Box::new(err))) } } pub struct ScabbardProposalManager { service_id: String, version: ScabbardVersion, proposal_update_sender: Sender<ProposalUpdate>, shared: Arc<Mutex<ScabbardShared>>, state: Arc<Mutex<ScabbardState>>, } impl ScabbardProposalManager { pub fn new( service_id: String, version: ScabbardVersion, proposal_update_sender: Sender<ProposalUpdate>, shared: Arc<Mutex<ScabbardShared>>, state: Arc<Mutex<ScabbardState>>, ) -> Self { ScabbardProposalManager { service_id, version, proposal_update_sender, shared, state, } } } impl ProposalManager for ScabbardProposalManager { fn create_proposal( &self, // Ignoring previous proposal ID and consensus data, because this service and two phase // consensus don't care about it. _previous_proposal_id: Option<ProposalId>, _consensus_data: Vec<u8>, ) -> Result<(), ProposalManagerError> { let mut shared = self .shared .lock() .map_err(|_| ProposalManagerError::Internal(Box::new(ScabbardError::LockPoisoned)))?; if let Some(batch) = shared .pop_batch_from_queue() .map_err(|err| ProposalManagerError::Internal(Box::new(err)))? { let expected_hash = self .state .lock() .map_err(|_| ProposalManagerError::Internal(Box::new(ScabbardError::LockPoisoned)))? .prepare_change(batch.clone()) .map_err(|err| ProposalManagerError::Internal(Box::new(err)))?; // Intentionally leaving out the previous_id and proposal_height fields, since this // service and two phase consensus don't use them. This means the proposal ID can just // be the summary (in v1) or batch ID (in v2). let id = match self.version { ScabbardVersion::V1 => expected_hash.as_bytes().into(), ScabbardVersion::V2 => batch.batch().header_signature().as_bytes().into(), }; let proposal = Proposal { id, summary: expected_hash.as_bytes().into(), ..Default::default() }; shared.add_open_proposal(proposal.clone(), batch.clone()); // Send the proposal to the other services let mut proposed_batch = ProposedBatch::new(); proposed_batch.set_proposal( proposal .clone() .try_into() .map_err(|err| ProposalManagerError::Internal(Box::new(err)))?, ); proposed_batch.set_batch( batch .into_bytes() .map_err(|err| ProposalManagerError::Internal(Box::new(err)))?, ); proposed_batch.set_service_id(self.service_id.clone()); let mut msg = ScabbardMessage::new(); msg.set_message_type(ScabbardMessage_Type::PROPOSED_BATCH); msg.set_proposed_batch(proposed_batch); let msg_bytes = msg .write_to_bytes() .map_err(|err| ProposalManagerError::Internal(Box::new(err)))?; let sender = shared .network_sender() .ok_or(ProposalManagerError::NotReady)?; for service in shared.peer_services() { sender .send(service, msg_bytes.as_slice()) .map_err(|err| ProposalManagerError::Internal(Box::new(err)))?; } self.proposal_update_sender .send(ProposalUpdate::ProposalCreated(Some(proposal)))?; } else { self.proposal_update_sender .send(ProposalUpdate::ProposalCreated(None))?; } Ok(()) } fn check_proposal(&self, id: &ProposalId) -> Result<(), ProposalManagerError> { let (proposal, batch) = self .shared .lock() .map_err(|_| ProposalManagerError::Internal(Box::new(ScabbardError::LockPoisoned)))? .get_open_proposal(id) .ok_or_else(|| ProposalManagerError::UnknownProposal(id.clone()))? .clone(); let hash = self .state .lock() .map_err(|_| ProposalManagerError::Internal(Box::new(ScabbardError::LockPoisoned)))? .prepare_change(batch) .map_err(|err| ProposalManagerError::Internal(Box::new(err)))?; if hash.as_bytes() != proposal.summary { warn!("Hash mismatch: expected {} but was {}", id, hash); self.proposal_update_sender .send(ProposalUpdate::ProposalInvalid(id.clone()))?; } else { self.proposal_update_sender .send(ProposalUpdate::ProposalValid(id.clone()))?; } Ok(()) } fn accept_proposal( &self, id: &ProposalId, // Ignoring consensus data, because this service and two phase consensus don't care about // it. _consensus_data: Option<Vec<u8>>, ) -> Result<(), ProposalManagerError> { let mut shared = self .shared .lock() .map_err(|_| ProposalManagerError::Internal(Box::new(ScabbardError::LockPoisoned)))?; shared.remove_open_proposal(id); self.state .lock() .map_err(|_| ProposalManagerError::Internal(Box::new(ScabbardError::LockPoisoned)))? .commit() .map_err(|err| ProposalManagerError::Internal(Box::new(err)))?; self.proposal_update_sender .send(ProposalUpdate::ProposalAccepted(id.clone()))?; info!("Committed proposal {}", id); Ok(()) } fn reject_proposal(&self, id: &ProposalId) -> Result<(), ProposalManagerError> { let mut shared = self .shared .lock() .map_err(|_| ProposalManagerError::Internal(Box::new(ScabbardError::LockPoisoned)))?; shared.remove_open_proposal(id); self.state .lock() .map_err(|_| ProposalManagerError::Internal(Box::new(ScabbardError::LockPoisoned)))? .rollback() .map_err(|err| ProposalManagerError::Internal(Box::new(err)))?; info!("Rolled back proposal {}", id); Ok(()) } } pub struct ScabbardConsensusNetworkSender { service_id: String, shared: Arc<Mutex<ScabbardShared>>, } impl ScabbardConsensusNetworkSender { pub fn new(service_id: String, shared: Arc<Mutex<ScabbardShared>>) -> Self { ScabbardConsensusNetworkSender { service_id, shared } } } impl ConsensusNetworkSender for ScabbardConsensusNetworkSender { fn send_to(&self, peer_id: &PeerId, message: Vec<u8>) -> Result<(), ConsensusSendError> { let peer_id_string = String::from_utf8(peer_id.clone().into()) .map_err(|err| ConsensusSendError::Internal(Box::new(err)))?; let consensus_message = ConsensusMessage::new(message, self.service_id.as_bytes().into()); let mut msg = ScabbardMessage::new(); msg.set_message_type(ScabbardMessage_Type::CONSENSUS_MESSAGE); msg.set_consensus_message(consensus_message.try_into()?); let shared = self .shared .lock() .map_err(|_| ConsensusSendError::Internal(Box::new(ScabbardError::LockPoisoned)))?; if !shared.peer_services().contains(&peer_id_string) { return Err(ConsensusSendError::UnknownPeer(peer_id.clone())); } let network_sender = shared .network_sender() .ok_or(ConsensusSendError::NotReady)?; network_sender .send(&peer_id_string, msg.write_to_bytes()?.as_slice()) .map_err(|err| ConsensusSendError::Internal(Box::new(err)))?; Ok(()) } fn broadcast(&self, message: Vec<u8>) -> Result<(), ConsensusSendError> { let consensus_message = ConsensusMessage::new(message, self.service_id.as_bytes().into()); let mut msg = ScabbardMessage::new(); msg.set_message_type(ScabbardMessage_Type::CONSENSUS_MESSAGE); msg.set_consensus_message(consensus_message.try_into()?); let shared = self .shared .lock() .map_err(|_| ConsensusSendError::Internal(Box::new(ScabbardError::LockPoisoned)))?; let network_sender = shared .network_sender() .ok_or(ConsensusSendError::NotReady)?; for service in shared.peer_services() { network_sender .send(service, msg.write_to_bytes()?.as_slice()) .map_err(|err| ConsensusSendError::Internal(Box::new(err)))?; } Ok(()) } } #[cfg(test)] mod tests { use super::*; use std::collections::{HashSet, VecDeque}; use cylinder::{secp256k1::Secp256k1Context, VerifierFactory}; use splinter::service::{ServiceMessageContext, ServiceNetworkSender, ServiceSendError}; /// Tests that the network sender properly creates messages and sends them using the /// `ServiceNetworkSender`. #[test] fn network_sender() { let service_sender = MockServiceNetworkSender::new(); let mut peer_services = HashSet::new(); peer_services.insert("svc1".to_string()); peer_services.insert("svc2".to_string()); let shared = Arc::new(Mutex::new(ScabbardShared::new( VecDeque::new(), Some(Box::new(service_sender.clone())), peer_services.clone(), "svc0".to_string(), #[cfg(feature = "metrics")] "vzrQS-rvwf4".to_string(), Secp256k1Context::new().new_verifier(), ScabbardVersion::V2, ))); let consensus_sender = ScabbardConsensusNetworkSender::new("svc0".into(), shared); // Test send_to consensus_sender .send_to(&"svc1".as_bytes().into(), vec![0]) .expect("failed to send"); let (recipient, message) = service_sender .sent .lock() .expect("sent lock poisoned") .get(0) .expect("1st message not sent") .clone(); assert_eq!(recipient, "svc1".to_string()); let scabbard_message: ScabbardMessage = Message::parse_from_bytes(&message).expect("failed to parse 1st scabbard message"); assert_eq!( scabbard_message.get_message_type(), ScabbardMessage_Type::CONSENSUS_MESSAGE ); let consensus_message = ConsensusMessage::try_from(scabbard_message.get_consensus_message()) .expect("failed to parse 1st consensus message"); assert_eq!(consensus_message.message, vec![0]); assert_eq!(consensus_message.origin_id, "svc0".as_bytes().into()); // Test broadcast consensus_sender.broadcast(vec![1]).expect("failed to send"); // First broadcast message let (recipient, message) = service_sender .sent .lock() .expect("sent lock poisoned") .get(1) .expect("2nd message not sent") .clone(); assert!(peer_services.remove(&recipient)); let scabbard_message: ScabbardMessage = Message::parse_from_bytes(&message).expect("failed to parse 2nd scabbard message"); assert_eq!( scabbard_message.get_message_type(), ScabbardMessage_Type::CONSENSUS_MESSAGE ); let consensus_message = ConsensusMessage::try_from(scabbard_message.get_consensus_message()) .expect("failed to parse 2nd consensus message"); assert_eq!(consensus_message.message, vec![1]); assert_eq!(consensus_message.origin_id, "svc0".as_bytes().into()); // Second broadcast message let (recipient, message) = service_sender .sent .lock() .expect("sent lock poisoned") .get(2) .expect("3rd message not sent") .clone(); assert!(peer_services.remove(&recipient)); let scabbard_message: ScabbardMessage = Message::parse_from_bytes(&message).expect("failed to parse 3rd scabbard message"); assert_eq!( scabbard_message.get_message_type(), ScabbardMessage_Type::CONSENSUS_MESSAGE ); let consensus_message = ConsensusMessage::try_from(scabbard_message.get_consensus_message()) .expect("failed to parse 3rd consensus message"); assert_eq!(consensus_message.message, vec![1]); assert_eq!(consensus_message.origin_id, "svc0".as_bytes().into()); } #[derive(Clone, Debug)] pub struct MockServiceNetworkSender { pub sent: Arc<Mutex<Vec<(String, Vec<u8>)>>>, pub sent_and_awaited: Arc<Mutex<Vec<(String, Vec<u8>)>>>, pub replied: Arc<Mutex<Vec<(ServiceMessageContext, Vec<u8>)>>>, } impl MockServiceNetworkSender { pub fn new() -> Self { MockServiceNetworkSender { sent: Arc::new(Mutex::new(vec![])), sent_and_awaited: Arc::new(Mutex::new(vec![])), replied: Arc::new(Mutex::new(vec![])), } } } impl ServiceNetworkSender for MockServiceNetworkSender { fn send(&self, recipient: &str, message: &[u8]) -> Result<(), ServiceSendError> { self.sent .lock() .expect("sent lock poisoned") .push((recipient.to_string(), message.to_vec())); Ok(()) } fn send_and_await( &self, recipient: &str, message: &[u8], ) -> Result<Vec<u8>, ServiceSendError> { self.sent_and_awaited .lock() .expect("sent_and_awaited lock poisoned") .push((recipient.to_string(), message.to_vec())); Ok(vec![]) } fn reply( &self, message_origin: &ServiceMessageContext, message: &[u8], ) -> Result<(), ServiceSendError> { self.replied .lock() .expect("replied lock poisoned") .push((message_origin.clone(), message.to_vec())); Ok(()) } fn clone_box(&self) -> Box<dyn ServiceNetworkSender> { Box::new(self.clone()) } fn send_with_sender( &mut self, _recipient: &str, _message: &[u8], _sender: &str, ) -> Result<(), ServiceSendError> { Ok(()) } } }
35.702797
100
0.584713
e81022ced14268359f6672156df3a53efd596b99
4,828
use std::env; use std::io::prelude::*; use std::fs::File; use std::path::Path; extern crate toml; #[macro_use] extern crate serde_derive; // error in rust compiler. Bugfix requested in Sept. 2017 // these are used, but the compiler is not seeing it for // some reason #[allow(unused_imports)] #[macro_use] extern crate serde_json; // error in rust compiler. Bugfix has been submitted in Sept. 2017 #[allow(unused_imports)] #[macro_use] extern crate serde; // used in formatting the Cargo.toml file #[derive(Deserialize, Debug)] struct Tomlfile { contents: Contents, } // used in formatting the Cargo.toml file #[derive(Deserialize, Debug)] struct Metadata { deb: Deb, } // used in formatting the Cargo.toml file #[derive(Deserialize, Debug)] struct Deb { revision: Option<String>, } // used in formatting the Cargo.toml file #[derive(Deserialize, Debug)] struct Package { name: String, version: Option<String>, metadata: Metadata, } // used in formatting the Cargo.toml file #[derive(Deserialize, Debug)] struct Contents { package: Package, dependencies: Option<toml::Value>, } fn main() { let target = env::var("TARGET").unwrap(); println!("target={}", target); if let Ok(_mode) = env::var("LIBINDY_STATIC") { let libindy_lib_path = env::var("LIBINDY_DIR").unwrap(); println!("cargo:rustc-link-search=native={}",libindy_lib_path); println!("cargo:rustc-link-lib=static=indy"); }else if target.contains("aarch64-linux-android") || target.contains("armv7-linux-androideabi") || target.contains("arm-linux-androideabi") || target.contains("i686-linux-android") || target.contains("x86_64-linux-android") || target.contains("aarch64-apple-ios") || target.contains("armv7-apple-ios") || target.contains("armv7s-apple-ios") || target.contains("i386-apple-ios") || target.contains("x86_64-apple-ios") { let libindy_lib_path = match env::var("LIBINDY_DIR"){ Ok(val) => val, Err(..) => panic!("Missing required environment variable LIBINDY_DIR") }; let openssl = match env::var("OPENSSL_LIB_DIR") { Ok(val) => val, Err(..) => match env::var("OPENSSL_DIR") { Ok(dir) => Path::new(&dir[..]).join("/lib").to_string_lossy().into_owned(), Err(..) => panic!("Missing required environment variables OPENSSL_DIR or OPENSSL_LIB_DIR") } }; println!("cargo:rustc-link-search=native={}",libindy_lib_path); println!("cargo:rustc-link-lib=static=indy"); println!("cargo:rustc-link-search=native={}", openssl); println!("cargo:rustc-link-lib=static=crypto"); println!("cargo:rustc-link-lib=static=ssl"); }else if target.contains("darwin"){ //OSX specific logic println!("cargo:rustc-link-lib=indy"); //OSX does not allow 3rd party libs to be installed in /usr/lib. Instead install it in /usr/local/lib println!("cargo:rustc-link-search=native=/usr/local/lib"); }else if target.contains("-linux-"){ //Linux specific logic println!("cargo:rustc-link-lib=indy"); println!("cargo:rustc-link-search=native=/usr/lib"); } match env::var("CARGO_FEATURE_CI") { Ok(_) => { println!("injecting version information"); // Leaving as unwrap, this is in the build script. let revision = get_revision().unwrap(); write_variables(&revision); }, Err(_) => {println!("NOT injecting version information"); }, }; } // Writes to the file 'src/utils/version_constants.rs' for use // in outputing the version dynamically. fn write_variables(revision:&str) { let out_dir = "src/utils/"; let dest_path = Path::new(&out_dir).join("version_constants.rs"); let mut f = File::create(&dest_path).unwrap(); let s = format!("pub const VERSION: &'static str = env!(\"CARGO_PKG_VERSION\");\npub const REVISION: &'static str = \"{}\";\n", revision); if let Err(e) = f.write_all(s.as_bytes()) { panic!("Error creating version_constants.rs: {}", e); }; } // Gets the revision number from the Cargo.toml file. pub fn get_revision() -> Option<String> { let dir = match env::var("CARGO_MANIFEST_DIR"){ Ok(d) => d, Err(_) => panic!("Couldn't Manifest Directory"), }; let filename = "Cargo.toml"; let p = format!("{}/{}",dir,filename); let mut input = String::new(); File::open(p).and_then(|mut f| { f.read_to_string(&mut input)}).unwrap(); let tomlfile:Contents = toml::from_str(&input).unwrap(); let revision:String = match tomlfile.package.metadata.deb.revision { Some(v) => v, None => String::from(""), }; Some(format!("+{}", revision)) }
33.527778
142
0.630075
0889aa5a19940fe68897220c795a33b846d7ad1f
665
/* * Copyright 2021 Fluence Labs Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ mod instructions_tracker; pub use instructions_tracker::*;
33.25
75
0.745865
08d59b6fbc36f195058666ce5305a43d74ea720a
123
// compile-flags: -C overflow-checks=on // EMIT_MIR checked_add.main.ConstProp.diff fn main() { let x: u32 = 1 + 1; }
17.571429
43
0.650407
e536d3c95ba3d0d9e686571cb56b23498a8a39d4
18,859
//! This crate aims to provide a common set of tools that can be used to create a "environment" to //! run Lighthouse services like the `beacon_node` or `validator_client`. This allows for the //! unification of creating tokio runtimes, loggers and eth2 specifications in production and in //! testing. //! //! The idea is that the main thread creates an `Environment`, which is then used to spawn a //! `Context` which can be handed to any service that wishes to start async tasks or perform //! logging. use eth2_config::Eth2Config; use eth2_network_config::Eth2NetworkConfig; use futures::channel::mpsc::{channel, Receiver, Sender}; use futures::{future, StreamExt}; use slog::{error, info, o, warn, Drain, Duplicate, Level, Logger}; use sloggers::{ file::FileLoggerBuilder, null::NullLoggerBuilder, types::Format, types::Severity, Build, }; use std::fs::create_dir_all; use std::path::PathBuf; use std::sync::Arc; use task_executor::{ShutdownReason, TaskExecutor}; use tokio::runtime::{Builder as RuntimeBuilder, Runtime}; use types::{EthSpec, MainnetEthSpec, MinimalEthSpec}; #[cfg(target_family = "unix")] use { futures::Future, std::{pin::Pin, task::Context, task::Poll}, tokio::signal::unix::{signal, Signal, SignalKind}, }; #[cfg(not(target_family = "unix"))] use {futures::channel::oneshot, std::cell::RefCell}; const LOG_CHANNEL_SIZE: usize = 2048; /// The maximum time in seconds the client will wait for all internal tasks to shutdown. const MAXIMUM_SHUTDOWN_TIME: u64 = 15; /// Configuration for logging. /// Background file logging is disabled if one of: /// - `path` == None, /// - `max_log_size` == 0, /// - `max_log_number` == 0, pub struct LoggerConfig<'a> { pub path: Option<PathBuf>, pub debug_level: &'a str, pub logfile_debug_level: &'a str, pub log_format: Option<&'a str>, pub max_log_size: u64, pub max_log_number: usize, pub compression: bool, } /// Builds an `Environment`. pub struct EnvironmentBuilder<E: EthSpec> { runtime: Option<Arc<Runtime>>, log: Option<Logger>, eth_spec_instance: E, eth2_config: Eth2Config, eth2_network_config: Option<Eth2NetworkConfig>, } impl EnvironmentBuilder<MinimalEthSpec> { /// Creates a new builder using the `minimal` eth2 specification. pub fn minimal() -> Self { Self { runtime: None, log: None, eth_spec_instance: MinimalEthSpec, eth2_config: Eth2Config::minimal(), eth2_network_config: None, } } } impl EnvironmentBuilder<MainnetEthSpec> { /// Creates a new builder using the `mainnet` eth2 specification. pub fn mainnet() -> Self { Self { runtime: None, log: None, eth_spec_instance: MainnetEthSpec, eth2_config: Eth2Config::mainnet(), eth2_network_config: None, } } } impl<E: EthSpec> EnvironmentBuilder<E> { /// Specifies that a multi-threaded tokio runtime should be used. Ideal for production uses. /// /// The `Runtime` used is just the standard tokio runtime. pub fn multi_threaded_tokio_runtime(mut self) -> Result<Self, String> { self.runtime = Some(Arc::new( RuntimeBuilder::new_multi_thread() .enable_all() .build() .map_err(|e| format!("Failed to start runtime: {:?}", e))?, )); Ok(self) } /// Specifies that all logs should be sent to `null` (i.e., ignored). pub fn null_logger(mut self) -> Result<Self, String> { self.log = Some(null_logger()?); Ok(self) } /// Initializes the logger using the specified configuration. /// The logger is "async" because it has a dedicated thread that accepts logs and then /// asynchronously flushes them to stdout/files/etc. This means the thread that raised the log /// does not have to wait for the logs to be flushed. /// The logger can be duplicated and more detailed logs can be output to `logfile`. /// Note that background file logging will spawn a new thread. pub fn initialize_logger(mut self, config: LoggerConfig) -> Result<Self, String> { // Setting up the initial logger format and build it. let stdout_drain = if let Some(format) = config.log_format { match format.to_uppercase().as_str() { "JSON" => { let stdout_drain = slog_json::Json::default(std::io::stdout()).fuse(); slog_async::Async::new(stdout_drain) .chan_size(LOG_CHANNEL_SIZE) .build() } _ => return Err("Logging format provided is not supported".to_string()), } } else { let stdout_decorator = slog_term::TermDecorator::new().build(); let stdout_decorator = logging::AlignedTermDecorator::new(stdout_decorator, logging::MAX_MESSAGE_WIDTH); let stdout_drain = slog_term::FullFormat::new(stdout_decorator).build().fuse(); slog_async::Async::new(stdout_drain) .chan_size(LOG_CHANNEL_SIZE) .build() }; let stdout_drain = match config.debug_level { "info" => stdout_drain.filter_level(Level::Info), "debug" => stdout_drain.filter_level(Level::Debug), "trace" => stdout_drain.filter_level(Level::Trace), "warn" => stdout_drain.filter_level(Level::Warning), "error" => stdout_drain.filter_level(Level::Error), "crit" => stdout_drain.filter_level(Level::Critical), unknown => return Err(format!("Unknown debug-level: {}", unknown)), }; let stdout_logger = Logger::root(stdout_drain.fuse(), o!()); // Disable file logging if values set to 0. if config.max_log_size == 0 || config.max_log_number == 0 { self.log = Some(stdout_logger); return Ok(self); } // Disable file logging if no path is specified. let path = match config.path { Some(path) => path, None => { self.log = Some(stdout_logger); return Ok(self); } }; // Ensure directories are created becfore the logfile. if !path.exists() { let mut dir = path.clone(); dir.pop(); // Create the necessary directories for the correct service and network. if !dir.exists() { create_dir_all(dir).map_err(|e| format!("Unable to create directory: {:?}", e))?; } } let logfile_level = match config.logfile_debug_level { "info" => Severity::Info, "debug" => Severity::Debug, "trace" => Severity::Trace, "warn" => Severity::Warning, "error" => Severity::Error, "crit" => Severity::Critical, unknown => return Err(format!("Unknown loglevel-debug-level: {}", unknown)), }; let file_logger = FileLoggerBuilder::new(&path) .level(logfile_level) .channel_size(LOG_CHANNEL_SIZE) .format(match config.log_format { Some("JSON") => Format::Json, _ => Format::default(), }) .rotate_size(config.max_log_size) .rotate_keep(config.max_log_number) .rotate_compress(config.compression) .restrict_permissions(true) .build() .map_err(|e| format!("Unable to build file logger: {}", e))?; let log = Logger::root(Duplicate::new(stdout_logger, file_logger).fuse(), o!()); info!( log, "Logging to file"; "path" => format!("{:?}", path) ); self.log = Some(log); Ok(self) } /// Adds a network configuration to the environment. pub fn eth2_network_config( mut self, eth2_network_config: Eth2NetworkConfig, ) -> Result<Self, String> { // Create a new chain spec from the default configuration. self.eth2_config.spec = eth2_network_config.chain_spec::<E>()?; self.eth2_network_config = Some(eth2_network_config); Ok(self) } /// Optionally adds a network configuration to the environment. pub fn optional_eth2_network_config( self, optional_config: Option<Eth2NetworkConfig>, ) -> Result<Self, String> { if let Some(config) = optional_config { self.eth2_network_config(config) } else { Ok(self) } } /// Consumes the builder, returning an `Environment`. pub fn build(self) -> Result<Environment<E>, String> { let (signal, exit) = exit_future::signal(); let (signal_tx, signal_rx) = channel(1); Ok(Environment { runtime: self .runtime .ok_or("Cannot build environment without runtime")?, signal_tx, signal_rx: Some(signal_rx), signal: Some(signal), exit, log: self.log.ok_or("Cannot build environment without log")?, eth_spec_instance: self.eth_spec_instance, eth2_config: self.eth2_config, eth2_network_config: self.eth2_network_config.map(Arc::new), }) } } /// An execution context that can be used by a service. /// /// Distinct from an `Environment` because a `Context` is not able to give a mutable reference to a /// `Runtime`, instead it only has access to a `Runtime`. #[derive(Clone)] pub struct RuntimeContext<E: EthSpec> { pub executor: TaskExecutor, pub eth_spec_instance: E, pub eth2_config: Eth2Config, pub eth2_network_config: Option<Arc<Eth2NetworkConfig>>, } impl<E: EthSpec> RuntimeContext<E> { /// Returns a sub-context of this context. /// /// The generated service will have the `service_name` in all it's logs. pub fn service_context(&self, service_name: String) -> Self { Self { executor: self.executor.clone_with_name(service_name), eth_spec_instance: self.eth_spec_instance.clone(), eth2_config: self.eth2_config.clone(), eth2_network_config: self.eth2_network_config.clone(), } } /// Returns the `eth2_config` for this service. pub fn eth2_config(&self) -> &Eth2Config { &self.eth2_config } /// Returns a reference to the logger for this service. pub fn log(&self) -> &slog::Logger { self.executor.log() } } /// An environment where Lighthouse services can run. Used to start a production beacon node or /// validator client, or to run tests that involve logging and async task execution. pub struct Environment<E: EthSpec> { runtime: Arc<Runtime>, /// Receiver side of an internal shutdown signal. signal_rx: Option<Receiver<ShutdownReason>>, /// Sender to request shutting down. signal_tx: Sender<ShutdownReason>, signal: Option<exit_future::Signal>, exit: exit_future::Exit, log: Logger, eth_spec_instance: E, pub eth2_config: Eth2Config, pub eth2_network_config: Option<Arc<Eth2NetworkConfig>>, } impl<E: EthSpec> Environment<E> { /// Returns a mutable reference to the `tokio` runtime. /// /// Useful in the rare scenarios where it's necessary to block the current thread until a task /// is finished (e.g., during testing). pub fn runtime(&self) -> &Arc<Runtime> { &self.runtime } /// Returns a `Context` where no "service" has been added to the logger output. pub fn core_context(&mut self) -> RuntimeContext<E> { RuntimeContext { executor: TaskExecutor::new( Arc::downgrade(self.runtime()), self.exit.clone(), self.log.clone(), self.signal_tx.clone(), ), eth_spec_instance: self.eth_spec_instance.clone(), eth2_config: self.eth2_config.clone(), eth2_network_config: self.eth2_network_config.clone(), } } /// Returns a `Context` where the `service_name` is added to the logger output. pub fn service_context(&mut self, service_name: String) -> RuntimeContext<E> { RuntimeContext { executor: TaskExecutor::new( Arc::downgrade(self.runtime()), self.exit.clone(), self.log.new(o!("service" => service_name)), self.signal_tx.clone(), ), eth_spec_instance: self.eth_spec_instance.clone(), eth2_config: self.eth2_config.clone(), eth2_network_config: self.eth2_network_config.clone(), } } /// Block the current thread until a shutdown signal is received. /// /// This can be either the user Ctrl-C'ing or a task requesting to shutdown. #[cfg(target_family = "unix")] pub fn block_until_shutdown_requested(&mut self) -> Result<ShutdownReason, String> { // future of a task requesting to shutdown let mut rx = self .signal_rx .take() .ok_or("Inner shutdown already received")?; let inner_shutdown = async move { rx.next().await.ok_or("Internal shutdown channel exhausted") }; futures::pin_mut!(inner_shutdown); match self.runtime().block_on(async { let mut handles = vec![]; // setup for handling SIGTERM match signal(SignalKind::terminate()) { Ok(terminate_stream) => { let terminate = SignalFuture::new(terminate_stream, "Received SIGTERM"); handles.push(terminate); } Err(e) => error!(self.log, "Could not register SIGTERM handler"; "error" => e), }; // setup for handling SIGINT match signal(SignalKind::interrupt()) { Ok(interrupt_stream) => { let interrupt = SignalFuture::new(interrupt_stream, "Received SIGINT"); handles.push(interrupt); } Err(e) => error!(self.log, "Could not register SIGINT handler"; "error" => e), } // setup for handling a SIGHUP match signal(SignalKind::hangup()) { Ok(hup_stream) => { let hup = SignalFuture::new(hup_stream, "Received SIGHUP"); handles.push(hup); } Err(e) => error!(self.log, "Could not register SIGHUP handler"; "error" => e), } future::select(inner_shutdown, future::select_all(handles.into_iter())).await }) { future::Either::Left((Ok(reason), _)) => { info!(self.log, "Internal shutdown received"; "reason" => reason.message()); Ok(reason) } future::Either::Left((Err(e), _)) => Err(e.into()), future::Either::Right(((res, _, _), _)) => { res.ok_or_else(|| "Handler channel closed".to_string()) } } } /// Block the current thread until a shutdown signal is received. /// /// This can be either the user Ctrl-C'ing or a task requesting to shutdown. #[cfg(not(target_family = "unix"))] pub fn block_until_shutdown_requested(&mut self) -> Result<ShutdownReason, String> { // future of a task requesting to shutdown let mut rx = self .signal_rx .take() .ok_or("Inner shutdown already received")?; let inner_shutdown = async move { rx.next().await.ok_or("Internal shutdown channel exhausted") }; futures::pin_mut!(inner_shutdown); // setup for handling a Ctrl-C let (ctrlc_send, ctrlc_oneshot) = oneshot::channel(); let ctrlc_send_c = RefCell::new(Some(ctrlc_send)); let log = self.log.clone(); ctrlc::set_handler(move || { if let Some(ctrlc_send) = ctrlc_send_c.try_borrow_mut().unwrap().take() { if let Err(e) = ctrlc_send.send(()) { error!( log, "Error sending ctrl-c message"; "error" => e ); } } }) .map_err(|e| format!("Could not set ctrlc handler: {:?}", e))?; // Block this thread until a shutdown signal is received. match self .runtime() .block_on(future::select(inner_shutdown, ctrlc_oneshot)) { future::Either::Left((Ok(reason), _)) => { info!(self.log, "Internal shutdown received"; "reason" => reason.message()); Ok(reason) } future::Either::Left((Err(e), _)) => Err(e.into()), future::Either::Right((x, _)) => x .map(|()| ShutdownReason::Success("Received Ctrl+C")) .map_err(|e| format!("Ctrlc oneshot failed: {}", e)), } } /// Shutdown the `tokio` runtime when all tasks are idle. pub fn shutdown_on_idle(self) { match Arc::try_unwrap(self.runtime) { Ok(runtime) => { runtime.shutdown_timeout(std::time::Duration::from_secs(MAXIMUM_SHUTDOWN_TIME)) } Err(e) => warn!( self.log, "Failed to obtain runtime access to shutdown gracefully"; "error" => ?e ), } } /// Fire exit signal which shuts down all spawned services pub fn fire_signal(&mut self) { if let Some(signal) = self.signal.take() { let _ = signal.fire(); } } pub fn eth_spec_instance(&self) -> &E { &self.eth_spec_instance } pub fn eth2_config(&self) -> &Eth2Config { &self.eth2_config } } pub fn null_logger() -> Result<Logger, String> { let log_builder = NullLoggerBuilder; log_builder .build() .map_err(|e| format!("Failed to start null logger: {:?}", e)) } #[cfg(target_family = "unix")] struct SignalFuture { signal: Signal, message: &'static str, } #[cfg(target_family = "unix")] impl SignalFuture { pub fn new(signal: Signal, message: &'static str) -> SignalFuture { SignalFuture { signal, message } } } #[cfg(target_family = "unix")] impl Future for SignalFuture { type Output = Option<ShutdownReason>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { match self.signal.poll_recv(cx) { Poll::Pending => Poll::Pending, Poll::Ready(Some(_)) => Poll::Ready(Some(ShutdownReason::Success(self.message))), Poll::Ready(None) => Poll::Ready(None), } } }
36.690661
99
0.585662
e54844866b898c6ec87140bcfbdf90b64d2408a6
25,742
//! Oracles. //! //! Oracles take a test case and determine whether we have a bug. For example, //! one of the simplest oracles is to take a Wasm binary as our input test case, //! validate and instantiate it, and (implicitly) check that no assertions //! failed or segfaults happened. A more complicated oracle might compare the //! result of executing a Wasm file with and without optimizations enabled, and //! make sure that the two executions are observably identical. //! //! When an oracle finds a bug, it should report it to the fuzzing engine by //! panicking. pub mod dummy; use arbitrary::Arbitrary; use dummy::dummy_imports; use log::debug; use std::cell::Cell; use std::rc::Rc; use std::sync::atomic::{AtomicUsize, Ordering::SeqCst}; use std::sync::{Arc, Condvar, Mutex}; use std::time::{Duration, Instant}; use wasmtime::*; use wasmtime_wast::WastContext; static CNT: AtomicUsize = AtomicUsize::new(0); fn log_wasm(wasm: &[u8]) { if !log::log_enabled!(log::Level::Debug) { return; } let i = CNT.fetch_add(1, SeqCst); let name = format!("testcase{}.wasm", i); std::fs::write(&name, wasm).expect("failed to write wasm file"); log::debug!("wrote wasm file to `{}`", name); if let Ok(s) = wasmprinter::print_bytes(wasm) { let name = format!("testcase{}.wat", i); std::fs::write(&name, s).expect("failed to write wat file"); } } /// Methods of timing out execution of a WebAssembly module #[derive(Debug)] pub enum Timeout { /// No timeout is used, it should be guaranteed via some other means that /// the input does not infinite loop. None, /// A time-based timeout is used with a sleeping thread sending a signal /// after the specified duration. Time(Duration), /// Fuel-based timeouts are used where the specified fuel is all that the /// provided wasm module is allowed to consume. Fuel(u64), } /// Instantiate the Wasm buffer, and implicitly fail if we have an unexpected /// panic or segfault or anything else that can be detected "passively". /// /// Performs initial validation, and returns early if the Wasm is invalid. /// /// You can control which compiler is used via passing a `Strategy`. pub fn instantiate(wasm: &[u8], known_valid: bool, strategy: Strategy) { // Explicitly disable module linking for now since it's a breaking change to // pre-module-linking modules due to imports let mut cfg = crate::fuzz_default_config(strategy).unwrap(); cfg.wasm_module_linking(false); instantiate_with_config(wasm, known_valid, cfg, Timeout::None); } /// Instantiate the Wasm buffer, and implicitly fail if we have an unexpected /// panic or segfault or anything else that can be detected "passively". /// /// The engine will be configured using provided config. /// /// See also `instantiate` functions. pub fn instantiate_with_config( wasm: &[u8], known_valid: bool, mut config: Config, timeout: Timeout, ) { crate::init_fuzzing(); config.interruptable(match &timeout { Timeout::Time(_) => true, _ => false, }); config.consume_fuel(match &timeout { Timeout::Fuel(_) => true, _ => false, }); let engine = Engine::new(&config).unwrap(); let store = Store::new(&engine); let mut timeout_state = SignalOnDrop::default(); match timeout { Timeout::Fuel(fuel) => store.add_fuel(fuel).unwrap(), // If a timeout is requested then we spawn a helper thread to wait for // the requested time and then send us a signal to get interrupted. We // also arrange for the thread's sleep to get interrupted if we return // early (or the wasm returns within the time limit), which allows the // thread to get torn down. // // This prevents us from creating a huge number of sleeping threads if // this function is executed in a loop, like it does on nightly fuzzing // infrastructure. Timeout::Time(timeout) => { let handle = store.interrupt_handle().unwrap(); timeout_state.spawn_timeout(timeout, move || handle.interrupt()); } Timeout::None => {} } log_wasm(wasm); let module = match Module::new(&engine, wasm) { Ok(module) => module, Err(_) if !known_valid => return, Err(e) => panic!("failed to compile module: {:?}", e), }; let imports = dummy_imports(&store, module.imports()); match Instance::new(&store, &module, &imports) { Ok(_) => {} // Allow traps which can happen normally with `unreachable` or a timeout Err(e) if e.downcast_ref::<Trap>().is_some() => {} // Allow resource exhaustion since this is something that our wasm-smith // generator doesn't guarantee is forbidden. Err(e) if e.to_string().contains("resource limit exceeded") => {} // Also allow errors related to fuel consumption Err(e) if e.to_string().contains("all fuel consumed") => {} // Everything else should be a bug in the fuzzer Err(e) => panic!("failed to instantiate {}", e), } } /// Compile the Wasm buffer, and implicitly fail if we have an unexpected /// panic or segfault or anything else that can be detected "passively". /// /// Performs initial validation, and returns early if the Wasm is invalid. /// /// You can control which compiler is used via passing a `Strategy`. pub fn compile(wasm: &[u8], strategy: Strategy) { crate::init_fuzzing(); let engine = Engine::new(&crate::fuzz_default_config(strategy).unwrap()).unwrap(); log_wasm(wasm); let _ = Module::new(&engine, wasm); } /// Instantiate the given Wasm module with each `Config` and call all of its /// exports. Modulo OOM, non-canonical NaNs, and usage of Wasm features that are /// or aren't enabled for different configs, we should get the same results when /// we call the exported functions for all of our different configs. pub fn differential_execution( module: &wasm_smith::Module, configs: &[crate::generators::DifferentialConfig], ) { use std::collections::{HashMap, HashSet}; crate::init_fuzzing(); // We need at least two configs. if configs.len() < 2 // And all the configs should be unique. || configs.iter().collect::<HashSet<_>>().len() != configs.len() { return; } let configs: Vec<_> = match configs.iter().map(|c| c.to_wasmtime_config()).collect() { Ok(cs) => cs, // If the config is trying to use something that was turned off at // compile time, eg lightbeam, just continue to the next fuzz input. Err(_) => return, }; let mut export_func_results: HashMap<String, Result<Box<[Val]>, Trap>> = Default::default(); let wasm = module.to_bytes(); log_wasm(&wasm); for config in &configs { let engine = Engine::new(config).unwrap(); let store = Store::new(&engine); let module = Module::new(&engine, &wasm).unwrap(); // TODO: we should implement tracing versions of these dummy imports // that record a trace of the order that imported functions were called // in and with what values. Like the results of exported functions, // calls to imports should also yield the same values for each // configuration, and we should assert that. let imports = dummy_imports(&store, module.imports()); // Don't unwrap this: there can be instantiation-/link-time errors that // aren't caught during validation or compilation. For example, an imported // table might not have room for an element segment that we want to // initialize into it. let instance = match Instance::new(&store, &module, &imports) { Ok(instance) => instance, Err(e) => { eprintln!( "Warning: failed to instantiate `wasm-opt -ttf` module: {}", e ); continue; } }; for (name, f) in instance.exports().filter_map(|e| { let name = e.name(); e.into_func().map(|f| (name, f)) }) { // Always call the hang limit initializer first, so that we don't // infinite loop when calling another export. init_hang_limit(&instance); let ty = f.ty(); let params = dummy::dummy_values(ty.params()); let this_result = f.call(&params).map_err(|e| e.downcast::<Trap>().unwrap()); let existing_result = export_func_results .entry(name.to_string()) .or_insert_with(|| this_result.clone()); assert_same_export_func_result(&existing_result, &this_result, name); } } fn init_hang_limit(instance: &Instance) { match instance.get_export("hangLimitInitializer") { None => return, Some(Extern::Func(f)) => { f.call(&[]) .expect("initializing the hang limit should not fail"); } Some(_) => panic!("unexpected hangLimitInitializer export"), } } fn assert_same_export_func_result( lhs: &Result<Box<[Val]>, Trap>, rhs: &Result<Box<[Val]>, Trap>, func_name: &str, ) { let fail = || { panic!( "differential fuzzing failed: exported func {} returned two \ different results: {:?} != {:?}", func_name, lhs, rhs ) }; match (lhs, rhs) { (Err(_), Err(_)) => {} (Ok(lhs), Ok(rhs)) => { if lhs.len() != rhs.len() { fail(); } for (lhs, rhs) in lhs.iter().zip(rhs.iter()) { match (lhs, rhs) { (Val::I32(lhs), Val::I32(rhs)) if lhs == rhs => continue, (Val::I64(lhs), Val::I64(rhs)) if lhs == rhs => continue, (Val::V128(lhs), Val::V128(rhs)) if lhs == rhs => continue, (Val::F32(lhs), Val::F32(rhs)) if f32_equal(*lhs, *rhs) => continue, (Val::F64(lhs), Val::F64(rhs)) if f64_equal(*lhs, *rhs) => continue, (Val::ExternRef(_), Val::ExternRef(_)) | (Val::FuncRef(_), Val::FuncRef(_)) => continue, _ => fail(), } } } _ => fail(), } } } fn f32_equal(a: u32, b: u32) -> bool { let a = f32::from_bits(a); let b = f32::from_bits(b); a == b || (a.is_nan() && b.is_nan()) } fn f64_equal(a: u64, b: u64) -> bool { let a = f64::from_bits(a); let b = f64::from_bits(b); a == b || (a.is_nan() && b.is_nan()) } /// Invoke the given API calls. pub fn make_api_calls(api: crate::generators::api::ApiCalls) { use crate::generators::api::ApiCall; use std::collections::HashMap; crate::init_fuzzing(); let mut config: Option<Config> = None; let mut engine: Option<Engine> = None; let mut store: Option<Store> = None; let mut modules: HashMap<usize, Module> = Default::default(); let mut instances: HashMap<usize, Instance> = Default::default(); for call in api.calls { match call { ApiCall::ConfigNew => { log::trace!("creating config"); assert!(config.is_none()); config = Some(crate::fuzz_default_config(wasmtime::Strategy::Cranelift).unwrap()); } ApiCall::ConfigDebugInfo(b) => { log::trace!("enabling debuginfo"); config.as_mut().unwrap().debug_info(b); } ApiCall::ConfigInterruptable(b) => { log::trace!("enabling interruption"); config.as_mut().unwrap().interruptable(b); } ApiCall::EngineNew => { log::trace!("creating engine"); assert!(engine.is_none()); engine = Some(Engine::new(config.as_ref().unwrap()).unwrap()); } ApiCall::StoreNew => { log::trace!("creating store"); assert!(store.is_none()); store = Some(Store::new(engine.as_ref().unwrap())); } ApiCall::ModuleNew { id, wasm } => { log::debug!("creating module: {}", id); let wasm = wasm.to_bytes(); log_wasm(&wasm); let module = match Module::new(engine.as_ref().unwrap(), &wasm) { Ok(m) => m, Err(_) => continue, }; let old = modules.insert(id, module); assert!(old.is_none()); } ApiCall::ModuleDrop { id } => { log::trace!("dropping module: {}", id); drop(modules.remove(&id)); } ApiCall::InstanceNew { id, module } => { log::trace!("instantiating module {} as {}", module, id); let module = match modules.get(&module) { Some(m) => m, None => continue, }; let store = store.as_ref().unwrap(); let imports = dummy_imports(store, module.imports()); // Don't unwrap this: there can be instantiation-/link-time errors that // aren't caught during validation or compilation. For example, an imported // table might not have room for an element segment that we want to // initialize into it. if let Ok(instance) = Instance::new(store, &module, &imports) { instances.insert(id, instance); } } ApiCall::InstanceDrop { id } => { log::trace!("dropping instance {}", id); drop(instances.remove(&id)); } ApiCall::CallExportedFunc { instance, nth } => { log::trace!("calling instance export {} / {}", instance, nth); let instance = match instances.get(&instance) { Some(i) => i, None => { // Note that we aren't guaranteed to instantiate valid // modules, see comments in `InstanceNew` for details on // that. But the API call generator can't know if // instantiation failed, so we might not actually have // this instance. When that's the case, just skip the // API call and keep going. continue; } }; let funcs = instance .exports() .filter_map(|e| match e.into_extern() { Extern::Func(f) => Some(f.clone()), _ => None, }) .collect::<Vec<_>>(); if funcs.is_empty() { continue; } let nth = nth % funcs.len(); let f = &funcs[nth]; let ty = f.ty(); let params = dummy::dummy_values(ty.params()); let _ = f.call(&params); } } } } /// Executes the wast `test` spectest with the `config` specified. /// /// Ensures that spec tests pass regardless of the `Config`. pub fn spectest(fuzz_config: crate::generators::Config, test: crate::generators::SpecTest) { crate::init_fuzzing(); log::debug!("running {:?} with {:?}", test.file, fuzz_config); let mut config = fuzz_config.to_wasmtime(); config.wasm_reference_types(false); config.wasm_bulk_memory(false); let store = Store::new(&Engine::new(&config).unwrap()); if fuzz_config.consume_fuel { store.add_fuel(u64::max_value()).unwrap(); } let mut wast_context = WastContext::new(store); wast_context.register_spectest().unwrap(); wast_context .run_buffer(test.file, test.contents.as_bytes()) .unwrap(); } /// Execute a series of `table.get` and `table.set` operations. pub fn table_ops( fuzz_config: crate::generators::Config, ops: crate::generators::table_ops::TableOps, ) { let _ = env_logger::try_init(); let num_dropped = Rc::new(Cell::new(0)); { let mut config = fuzz_config.to_wasmtime(); config.wasm_reference_types(true); let engine = Engine::new(&config).unwrap(); let store = Store::new(&engine); if fuzz_config.consume_fuel { store.add_fuel(u64::max_value()).unwrap(); } let wasm = ops.to_wasm_binary(); log_wasm(&wasm); let module = match Module::new(&engine, &wasm) { Ok(m) => m, Err(_) => return, }; // To avoid timeouts, limit the number of explicit GCs we perform per // test case. const MAX_GCS: usize = 5; let num_gcs = Cell::new(0); let gc = Func::wrap(&store, move |caller: Caller| { if num_gcs.get() < MAX_GCS { caller.store().gc(); num_gcs.set(num_gcs.get() + 1); } }); let instance = Instance::new(&store, &module, &[gc.into()]).unwrap(); let run = instance.get_func("run").unwrap(); let args: Vec<_> = (0..ops.num_params()) .map(|_| Val::ExternRef(Some(ExternRef::new(CountDrops(num_dropped.clone()))))) .collect(); let _ = run.call(&args); } assert_eq!(num_dropped.get(), ops.num_params()); return; struct CountDrops(Rc<Cell<u8>>); impl Drop for CountDrops { fn drop(&mut self) { self.0.set(self.0.get().checked_add(1).unwrap()); } } } /// Configuration options for wasm-smith such that generated modules always /// conform to certain specifications. #[derive(Default, Debug, Arbitrary, Clone)] pub struct DifferentialWasmiModuleConfig; impl wasm_smith::Config for DifferentialWasmiModuleConfig { fn allow_start_export(&self) -> bool { false } fn min_funcs(&self) -> usize { 1 } fn max_funcs(&self) -> usize { 1 } fn min_memories(&self) -> u32 { 1 } fn max_memories(&self) -> usize { 1 } fn max_imports(&self) -> usize { 0 } fn min_exports(&self) -> usize { 2 } fn max_memory_pages(&self) -> u32 { 1 } fn memory_max_size_required(&self) -> bool { true } } /// Perform differential execution between Cranelift and wasmi, diffing the /// resulting memory image when execution terminates. This relies on the /// module-under-test to be instrumented to bound the execution time. Invoke /// with a module generated by `wasm-smith` using the /// `DiferentialWasmiModuleConfig` configuration type for best results. /// /// May return `None` if we early-out due to a rejected fuzz config; these /// should be rare if modules are generated appropriately. pub fn differential_wasmi_execution(wasm: &[u8], config: &crate::generators::Config) -> Option<()> { crate::init_fuzzing(); // Instantiate wasmi module and instance. let wasmi_module = wasmi::Module::from_buffer(&wasm[..]).ok()?; let wasmi_instance = wasmi::ModuleInstance::new(&wasmi_module, &wasmi::ImportsBuilder::default()).ok()?; let wasmi_instance = wasmi_instance.assert_no_start(); // TODO(paritytech/wasmi#19): wasmi does not currently canonicalize NaNs. To avoid spurious // fuzz failures, for now let's fuzz only integer Wasm programs. if wasmi_module.deny_floating_point().is_err() { return None; } // Instantiate wasmtime module and instance. let mut wasmtime_config = config.to_wasmtime(); wasmtime_config.cranelift_nan_canonicalization(true); let wasmtime_engine = Engine::new(&wasmtime_config).unwrap(); let wasmtime_store = Store::new(&wasmtime_engine); if config.consume_fuel { wasmtime_store.add_fuel(u64::max_value()).unwrap(); } let wasmtime_module = Module::new(&wasmtime_engine, &wasm).expect("Wasmtime can compile module"); let wasmtime_instance = Instance::new(&wasmtime_store, &wasmtime_module, &[]) .expect("Wasmtime can instantiate module"); // Introspect wasmtime module to find name of an exported function and of an // exported memory. Stop when we have one of each. (According to the config // above, there should be at most one of each.) let (func_name, memory_name) = { let mut func_name = None; let mut memory_name = None; for e in wasmtime_module.exports() { match e.ty() { wasmtime::ExternType::Func(..) => func_name = Some(e.name().to_string()), wasmtime::ExternType::Memory(..) => memory_name = Some(e.name().to_string()), _ => {} } if func_name.is_some() && memory_name.is_some() { break; } } (func_name?, memory_name?) }; let wasmi_mem_export = wasmi_instance.export_by_name(&memory_name[..]).unwrap(); let wasmi_mem = wasmi_mem_export.as_memory().unwrap(); let wasmi_main_export = wasmi_instance.export_by_name(&func_name[..]).unwrap(); let wasmi_main = wasmi_main_export.as_func().unwrap(); let wasmi_val = wasmi::FuncInstance::invoke(&wasmi_main, &[], &mut wasmi::NopExternals); let wasmtime_mem = wasmtime_instance .get_memory(&memory_name[..]) .expect("memory export is present"); let wasmtime_main = wasmtime_instance .get_func(&func_name[..]) .expect("function export is present"); let wasmtime_vals = wasmtime_main.call(&[]); let wasmtime_val = wasmtime_vals.map(|v| v.iter().next().cloned()); debug!( "Successful execution: wasmi returned {:?}, wasmtime returned {:?}", wasmi_val, wasmtime_val ); let show_wat = || { if let Ok(s) = wasmprinter::print_bytes(&wasm[..]) { eprintln!("wat:\n{}\n", s); } }; match (&wasmi_val, &wasmtime_val) { (&Ok(Some(wasmi::RuntimeValue::I32(a))), &Ok(Some(Val::I32(b)))) if a == b => {} (&Ok(Some(wasmi::RuntimeValue::F32(a))), &Ok(Some(Val::F32(b)))) if f32_equal(a.to_bits(), b) => {} (&Ok(Some(wasmi::RuntimeValue::I64(a))), &Ok(Some(Val::I64(b)))) if a == b => {} (&Ok(Some(wasmi::RuntimeValue::F64(a))), &Ok(Some(Val::F64(b)))) if f64_equal(a.to_bits(), b) => {} (&Ok(None), &Ok(None)) => {} (&Err(_), &Err(_)) => {} _ => { show_wat(); panic!( "Values do not match: wasmi returned {:?}; wasmtime returned {:?}", wasmi_val, wasmtime_val ); } } if wasmi_mem.current_size().0 != wasmtime_mem.size() as usize { show_wat(); panic!("resulting memories are not the same size"); } // Wasmi memory may be stored non-contiguously; copy it out to a contiguous chunk. let mut wasmi_buf: Vec<u8> = vec![0; wasmtime_mem.data_size()]; wasmi_mem .get_into(0, &mut wasmi_buf[..]) .expect("can access wasmi memory"); let wasmtime_slice = unsafe { wasmtime_mem.data_unchecked() }; if wasmi_buf.len() >= 64 { debug!("-> First 64 bytes of wasmi heap: {:?}", &wasmi_buf[0..64]); debug!( "-> First 64 bytes of Wasmtime heap: {:?}", &wasmtime_slice[0..64] ); } if &wasmi_buf[..] != &wasmtime_slice[..] { show_wat(); panic!("memory contents are not equal"); } Some(()) } #[derive(Default)] struct SignalOnDrop { state: Arc<(Mutex<bool>, Condvar)>, thread: Option<std::thread::JoinHandle<()>>, } impl SignalOnDrop { fn spawn_timeout(&mut self, dur: Duration, closure: impl FnOnce() + Send + 'static) { let state = self.state.clone(); let start = Instant::now(); self.thread = Some(std::thread::spawn(move || { // Using our mutex/condvar we wait here for the first of `dur` to // pass or the `SignalOnDrop` instance to get dropped. let (lock, cvar) = &*state; let mut signaled = lock.lock().unwrap(); while !*signaled { // Adjust our requested `dur` based on how much time has passed. let dur = match dur.checked_sub(start.elapsed()) { Some(dur) => dur, None => break, }; let (lock, result) = cvar.wait_timeout(signaled, dur).unwrap(); signaled = lock; // If we timed out for sure then there's no need to continue // since we'll just abort on the next `checked_sub` anyway. if result.timed_out() { break; } } drop(signaled); closure(); })); } } impl Drop for SignalOnDrop { fn drop(&mut self) { if let Some(thread) = self.thread.take() { let (lock, cvar) = &*self.state; // Signal our thread that we've been dropped and wake it up if it's // blocked. let mut g = lock.lock().unwrap(); *g = true; cvar.notify_one(); drop(g); // ... and then wait for the thread to exit to ensure we clean up // after ourselves. thread.join().unwrap(); } } }
36.154494
100
0.566156
9191e48cdf0135f69ccf3a4798474278c44fe86a
4,676
use std::{ fs::{read_to_string, File, OpenOptions, ReadDir}, io::Error as IoError, os::unix::prelude::MetadataExt, path::PathBuf, }; use goblin::elf::Elf; use crate::{ ext::{ElfExt, PathBufExt}, Gid, ProcId, Uid, VirtAddr, }; /// A newtype that references the [`/proc/<id>`](https://man7.org/linux/man-pages/man5/proc.5.html) directory. pub(crate) struct Proc(pub(crate) PathBuf); impl Proc { /// Creates a new [`Proc`] that references the host process. pub(crate) fn current() -> Self { Proc(PathBuf::root().join("proc").join("self")) } /// Creates a new [`Proc`] that references the task identified by `id`. /// /// Returns [`None`] if the path `/proc/<id>` does not exist. pub(crate) fn new(id: ProcId) -> Option<Self> { let path = PathBuf::root().join("proc").join(id.to_string()); path.exists().then(|| Proc(path)) } /// Gets the owner of the current [`Proc`]. pub(crate) fn owner(&self) -> Result<(Uid, Gid), IoError> { let metadata = self.0.metadata()?; Ok((metadata.uid(), metadata.gid())) } /// Reads `/proc/<id>/exe` of the current [`Proc`]. pub(crate) fn exe(&self) -> Result<File, IoError> { File::open(self.0.join("exe")) } /// Reads `/proc/<id>/maps` of the current [`Proc`]. pub(crate) fn maps(&self) -> Result<File, IoError> { File::open(self.0.join("maps")) } /// Reads `/proc/<id>/mem` of the current [`Proc`]. pub(crate) fn mem(&self) -> Result<File, IoError> { OpenOptions::new() .read(true) .write(true) .open(self.0.join("mem")) } /// Reads `/proc/<id>/syscall` of the current [`Proc`]. pub(crate) fn syscall(&self) -> Result<File, IoError> { File::open(self.0.join("syscall")) } /// Reads `/proc/<id>/task` of the current [`Proc`]. pub(crate) fn task(&self) -> Result<ReadDir, IoError> { std::fs::read_dir(self.0.join("task")) } } /// A struct that represents a loaded shared library. pub(crate) struct ProcLib { /// The base virtual address where the library is located at. pub(crate) base_addr: VirtAddr, /// The path where the library is located at. pub(crate) path: PathBuf, } impl ProcLib { /// Creates a new [`ProcLib`] that references a shared library loaded at `base_add` and located at `path`. pub(crate) fn new(base_addr: u64, path: PathBuf) -> Self { ProcLib { base_addr, path } } /// Finds a symbol with the given `name` exported by the current library. /// /// Returns [`None`] if no symbol with the given name was found. pub(crate) fn find_sym_addr(&self, name: &str) -> Option<ProcSym> { let buf = std::fs::read(&self.path).ok()?; let sym = Elf::parse(&buf).ok()?.find_sym_by_name(name)?; Some(ProcSym::new(self.base_addr + sym.st_value)) } } /// A struct that represents a library symbol. pub(crate) struct ProcSym { /// The virtual address where the symbol is located at. pub(crate) addr: VirtAddr, } impl ProcSym { /// Creates a new [`ProcSym`] that references a symbol located at `addr`. pub(crate) fn new(addr: VirtAddr) -> Self { ProcSym { addr } } } /// A enum that represents the class of a process (32 bit or 64 bit). pub(crate) enum ProcClass { /// The values used to describe 32 bit processes. #[cfg(any(target_pointer_width = "64", target_pointer_width = "32"))] ThirtyTwo, /// The values used to describe 64 bit processes. #[cfg(target_pointer_width = "64")] SixtyFour, } /// A enum that represents the content of `/proc/sys/kernel/yama/ptrace_scope`. /// /// Even if we do not use `ptrace`, `/proc/<pid>/` readability depends on this value. /// /// Source: https://man7.org/linux/man-pages/man2/ptrace.2.html pub(crate) enum PtraceScope { /// 0. All, /// 1. Restricted, /// 2. Admin, /// 3. None, } impl PtraceScope { /// Gets the content of `/proc/sys/kernel/yama/ptrace_scope`, which determines whether we can read `/proc/<pid>` or not. /// If the file doesn't exists, we assume the kernel was not built with the Yama Linux Security Module. pub(crate) fn current() -> Self { if let Ok(scope) = read_to_string("/proc/sys/kernel/yama/ptrace_scope") { match scope.trim() { "0" => PtraceScope::All, "1" => PtraceScope::Restricted, "2" => PtraceScope::Admin, "3" => PtraceScope::None, _ => unreachable!(), } } else { PtraceScope::All } } }
30.966887
124
0.591745
895df5808dbe16c6eb0cbb2abbeecab6d35c9598
13,026
//! Simplifying Candidates //! //! *Simplifying* a match pair `place @ pattern` means breaking it down //! into bindings or other, simpler match pairs. For example: //! //! - `place @ (P1, P2)` can be simplified to `[place.0 @ P1, place.1 @ P2]` //! - `place @ x` can be simplified to `[]` by binding `x` to `place` //! //! The `simplify_candidate` routine just repeatedly applies these //! sort of simplifications until there is nothing left to //! simplify. Match pairs cannot be simplified if they require some //! sort of test: for example, testing which variant an enum is, or //! testing a value against a constant. use crate::build::expr::as_place::PlaceBuilder; use crate::build::matches::{Ascription, Binding, Candidate, MatchPair}; use crate::build::Builder; use rustc_hir::RangeEnd; use rustc_middle::thir::{self, *}; use rustc_middle::ty; use rustc_middle::ty::layout::IntegerExt; use rustc_target::abi::{Integer, Size}; use std::mem; impl<'a, 'tcx> Builder<'a, 'tcx> { /// Simplify a candidate so that all match pairs require a test. /// /// This method will also split a candidate, in which the only /// match-pair is an or-pattern, into multiple candidates. /// This is so that /// /// match x { /// 0 | 1 => { ... }, /// 2 | 3 => { ... }, /// } /// /// only generates a single switch. If this happens this method returns /// `true`. pub(super) fn simplify_candidate<'pat>( &mut self, candidate: &mut Candidate<'pat, 'tcx>, ) -> bool { // repeatedly simplify match pairs until fixed point is reached debug!(?candidate, "simplify_candidate"); // existing_bindings and new_bindings exists to keep the semantics in order. // Reversing the binding order for bindings after `@` changes the binding order in places // it shouldn't be changed, for example `let (Some(a), Some(b)) = (x, y)` // // To avoid this, the binding occurs in the following manner: // * the bindings for one iteration of the following loop occurs in order (i.e. left to // right) // * the bindings from the previous iteration of the loop is prepended to the bindings from // the current iteration (in the implementation this is done by mem::swap and extend) // * after all iterations, these new bindings are then appended to the bindings that were // preexisting (i.e. `candidate.binding` when the function was called). // // example: // candidate.bindings = [1, 2, 3] // binding in iter 1: [4, 5] // binding in iter 2: [6, 7] // // final binding: [1, 2, 3, 6, 7, 4, 5] let mut existing_bindings = mem::take(&mut candidate.bindings); let mut new_bindings = Vec::new(); loop { let match_pairs = mem::take(&mut candidate.match_pairs); if let [MatchPair { pattern: Pat { kind: box PatKind::Or { pats }, .. }, place }] = &*match_pairs { existing_bindings.extend_from_slice(&new_bindings); mem::swap(&mut candidate.bindings, &mut existing_bindings); candidate.subcandidates = self.create_or_subcandidates(candidate, place.clone(), pats); return true; } let mut changed = false; for match_pair in match_pairs { match self.simplify_match_pair(match_pair, candidate) { Ok(()) => { changed = true; } Err(match_pair) => { candidate.match_pairs.push(match_pair); } } } // Avoid issue #69971: the binding order should be right to left if there are more // bindings after `@` to please the borrow checker // Ex // struct NonCopyStruct { // copy_field: u32, // } // // fn foo1(x: NonCopyStruct) { // let y @ NonCopyStruct { copy_field: z } = x; // // the above should turn into // let z = x.copy_field; // let y = x; // } candidate.bindings.extend_from_slice(&new_bindings); mem::swap(&mut candidate.bindings, &mut new_bindings); candidate.bindings.clear(); if !changed { existing_bindings.extend_from_slice(&new_bindings); mem::swap(&mut candidate.bindings, &mut existing_bindings); // Move or-patterns to the end, because they can result in us // creating additional candidates, so we want to test them as // late as possible. candidate .match_pairs .sort_by_key(|pair| matches!(*pair.pattern.kind, PatKind::Or { .. })); debug!(simplified = ?candidate, "simplify_candidate"); return false; // if we were not able to simplify any, done. } } } /// Given `candidate` that has a single or-pattern for its match-pairs, /// creates a fresh candidate for each of its input subpatterns passed via /// `pats`. fn create_or_subcandidates<'pat>( &mut self, candidate: &Candidate<'pat, 'tcx>, place: PlaceBuilder<'tcx>, pats: &'pat [Pat<'tcx>], ) -> Vec<Candidate<'pat, 'tcx>> { pats.iter() .map(|pat| { let mut candidate = Candidate::new(place.clone(), pat, candidate.has_guard); self.simplify_candidate(&mut candidate); candidate }) .collect() } /// Tries to simplify `match_pair`, returning `Ok(())` if /// successful. If successful, new match pairs and bindings will /// have been pushed into the candidate. If no simplification is /// possible, `Err` is returned and no changes are made to /// candidate. fn simplify_match_pair<'pat>( &mut self, match_pair: MatchPair<'pat, 'tcx>, candidate: &mut Candidate<'pat, 'tcx>, ) -> Result<(), MatchPair<'pat, 'tcx>> { let tcx = self.tcx; match *match_pair.pattern.kind { PatKind::AscribeUserType { ref subpattern, ascription: thir::Ascription { variance, user_ty, user_ty_span }, } => { // Apply the type ascription to the value at `match_pair.place`, which is the if let Ok(place_resolved) = match_pair.place.clone().try_upvars_resolved(self.tcx, self.typeck_results) { candidate.ascriptions.push(Ascription { span: user_ty_span, user_ty, source: place_resolved.into_place(self.tcx, self.typeck_results), variance, }); } candidate.match_pairs.push(MatchPair::new(match_pair.place, subpattern)); Ok(()) } PatKind::Wild => { // nothing left to do Ok(()) } PatKind::Binding { name: _, mutability: _, mode, var, ty: _, ref subpattern, is_primary: _, } => { if let Ok(place_resolved) = match_pair.place.clone().try_upvars_resolved(self.tcx, self.typeck_results) { candidate.bindings.push(Binding { span: match_pair.pattern.span, source: place_resolved.into_place(self.tcx, self.typeck_results), var_id: var, binding_mode: mode, }); } if let Some(subpattern) = subpattern.as_ref() { // this is the `x @ P` case; have to keep matching against `P` now candidate.match_pairs.push(MatchPair::new(match_pair.place, subpattern)); } Ok(()) } PatKind::Constant { .. } => { // FIXME normalize patterns when possible Err(match_pair) } PatKind::Range(PatRange { lo, hi, end }) => { let (range, bias) = match *lo.ty().kind() { ty::Char => { (Some(('\u{0000}' as u128, '\u{10FFFF}' as u128, Size::from_bits(32))), 0) } ty::Int(ity) => { let size = Integer::from_int_ty(&tcx, ity).size(); let max = size.truncate(u128::MAX); let bias = 1u128 << (size.bits() - 1); (Some((0, max, size)), bias) } ty::Uint(uty) => { let size = Integer::from_uint_ty(&tcx, uty).size(); let max = size.truncate(u128::MAX); (Some((0, max, size)), 0) } _ => (None, 0), }; if let Some((min, max, sz)) = range { if let (Some(lo), Some(hi)) = (lo.try_to_bits(sz), hi.try_to_bits(sz)) { // We want to compare ranges numerically, but the order of the bitwise // representation of signed integers does not match their numeric order. // Thus, to correct the ordering, we need to shift the range of signed // integers to correct the comparison. This is achieved by XORing with a // bias (see pattern/_match.rs for another pertinent example of this // pattern). let (lo, hi) = (lo ^ bias, hi ^ bias); if lo <= min && (hi > max || hi == max && end == RangeEnd::Included) { // Irrefutable pattern match. return Ok(()); } } } Err(match_pair) } PatKind::Slice { ref prefix, ref slice, ref suffix } => { if prefix.is_empty() && slice.is_some() && suffix.is_empty() { // irrefutable self.prefix_slice_suffix( &mut candidate.match_pairs, &match_pair.place, prefix, slice.as_ref(), suffix, ); Ok(()) } else { Err(match_pair) } } PatKind::Variant { adt_def, substs, variant_index, ref subpatterns } => { let irrefutable = adt_def.variants().iter_enumerated().all(|(i, v)| { i == variant_index || { self.tcx.features().exhaustive_patterns && !v .uninhabited_from( self.tcx, substs, adt_def.adt_kind(), self.param_env, ) .is_empty() } }) && (adt_def.did().is_local() || !adt_def.is_variant_list_non_exhaustive()); if irrefutable { let place_builder = match_pair.place.downcast(adt_def, variant_index); candidate .match_pairs .extend(self.field_match_pairs(place_builder, subpatterns)); Ok(()) } else { Err(match_pair) } } PatKind::Array { ref prefix, ref slice, ref suffix } => { self.prefix_slice_suffix( &mut candidate.match_pairs, &match_pair.place, prefix, slice.as_ref(), suffix, ); Ok(()) } PatKind::Leaf { ref subpatterns } => { // tuple struct, match subpats (if any) candidate.match_pairs.extend(self.field_match_pairs(match_pair.place, subpatterns)); Ok(()) } PatKind::Deref { ref subpattern } => { let place_builder = match_pair.place.deref(); candidate.match_pairs.push(MatchPair::new(place_builder, subpattern)); Ok(()) } PatKind::Or { .. } => Err(match_pair), } } }
41.091483
100
0.482573
f77ba9d68530f97a4c855394717924b901f09cbc
1,385
use std::io; pub enum CqlVersion { V_3_0_0, } impl CqlVersion { pub fn to_str(&self) -> &'static str { match *self { CqlVersion::V_3_0_0 => "3.0.0", } } } pub enum Compression { LZ4, Snappy, } impl Compression { pub fn to_str(&self) -> &'static str { match *self { Compression::LZ4 => "lz4", Compression::Snappy => "snappy", } } } pub enum CqlValue { Int(i32), } pub struct CqlOption { id: i16, value: CqlValue, } pub enum CqlTypeCode { Custom = 0x0000, Ascii = 0x0001, Bigint = 0x0002, Blob = 0x0003, Boolean = 0x0004, Counter = 0x0005, Decimal = 0x0006, Double = 0x0007, Float = 0x0008, Int = 0x0009, Timestamp = 0x000B, Uuid = 0x000C, Varchar = 0x000D, Varint = 0x000E, TimeUuid = 0x000F, Inet = 0x0010, List = 0x0020, Map = 0x0021, Set = 0x0022, Udt = 0x0030, Tuple = 0x0031, } pub enum CqlType { Custom(String), Ascii, Bigint, Blob, Boolean, Counter, Decimal, Double, Float, Int, Timestamp, Uuid, Varchar, Varint, Timeuuid, Inet, //List(CqlOption), //Map(CqlOption, CqlOption), //Set(CqlOption), //UDT(CqlUDT), //Tuple(CqlTuple), } pub trait ToCql { fn write(&self, buf: &mut Vec<u8>) -> io::Result<()>; }
15.388889
57
0.54296
909409d098bf58a280620c8a0ccbaeeba5929b63
2,129
use crate::{ expr::Expr, function::Function, ident::Ident, lit::{Number, Str}, pat::Pat, stmt::BlockStmt, typescript::TsTypeAnn, }; use swc_common::{ast_node, Span}; #[ast_node] #[derive(Eq, Hash)] pub enum Prop { /// `a` in `{ a, }` #[tag("Identifier")] Shorthand(Ident), /// `key: value` in `{ key: value, }` #[tag("KeyValueProperty")] KeyValue(KeyValueProp), /// This is **invalid** for object literal. #[tag("AssignmentProperty")] Assign(AssignProp), #[tag("GetterProperty")] Getter(GetterProp), #[tag("SetterProperty")] Setter(SetterProp), #[tag("MethodProperty")] Method(MethodProp), } #[ast_node("KeyValueProperty")] #[derive(Eq, Hash)] pub struct KeyValueProp { #[span(lo)] pub key: PropName, #[span(hi)] pub value: Box<Expr>, } #[ast_node("AssignmentProperty")] #[derive(Eq, Hash)] pub struct AssignProp { #[span(lo)] pub key: Ident, #[span(hi)] pub value: Box<Expr>, } #[ast_node("GetterProperty")] #[derive(Eq, Hash)] pub struct GetterProp { pub span: Span, pub key: PropName, #[serde(default, rename = "typeAnnotation")] pub type_ann: Option<TsTypeAnn>, #[serde(default)] pub body: Option<BlockStmt>, } #[ast_node("SetterProperty")] #[derive(Eq, Hash)] pub struct SetterProp { pub span: Span, pub key: PropName, pub param: Pat, #[serde(default)] pub body: Option<BlockStmt>, } #[ast_node("MethodProperty")] #[derive(Eq, Hash)] pub struct MethodProp { #[span(lo)] pub key: PropName, #[serde(flatten)] #[span(hi)] pub function: Function, } #[ast_node] #[derive(Eq, Hash)] pub enum PropName { #[tag("Identifier")] Ident(Ident), /// String literal. #[tag("StringLiteral")] Str(Str), /// Numeric literal. #[tag("NumericLiteral")] Num(Number), #[tag("Computed")] Computed(ComputedPropName), } #[ast_node("Computed")] #[derive(Eq, Hash)] pub struct ComputedPropName { /// Span including `[` and `]`. pub span: Span, #[serde(rename = "expression")] pub expr: Box<Expr>, }
19.53211
48
0.601221
e2883d16bc681fed3f884a89ac7bcd852f9a8127
11,018
#[doc = "Register `CTRL` reader"] pub struct R(crate::R<CTRL_SPEC>); impl core::ops::Deref for R { type Target = crate::R<CTRL_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<CTRL_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<CTRL_SPEC>) -> Self { R(reader) } } #[doc = "Register `CTRL` writer"] pub struct W(crate::W<CTRL_SPEC>); impl core::ops::Deref for W { type Target = crate::W<CTRL_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<CTRL_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<CTRL_SPEC>) -> Self { W(writer) } } #[doc = "Group interrupt status. This bit is cleared by writing a one to it. Writing zero has no effect.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum INT_A { #[doc = "0: No request. No interrupt request is pending."] NO_REQUEST = 0, #[doc = "1: Request active. Interrupt request is active."] REQUEST_ACTIVE = 1, } impl From<INT_A> for bool { #[inline(always)] fn from(variant: INT_A) -> Self { variant as u8 != 0 } } #[doc = "Field `INT` reader - Group interrupt status. This bit is cleared by writing a one to it. Writing zero has no effect."] pub struct INT_R(crate::FieldReader<bool, INT_A>); impl INT_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { INT_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> INT_A { match self.bits { false => INT_A::NO_REQUEST, true => INT_A::REQUEST_ACTIVE, } } #[doc = "Checks if the value of the field is `NO_REQUEST`"] #[inline(always)] pub fn is_no_request(&self) -> bool { **self == INT_A::NO_REQUEST } #[doc = "Checks if the value of the field is `REQUEST_ACTIVE`"] #[inline(always)] pub fn is_request_active(&self) -> bool { **self == INT_A::REQUEST_ACTIVE } } impl core::ops::Deref for INT_R { type Target = crate::FieldReader<bool, INT_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `INT` writer - Group interrupt status. This bit is cleared by writing a one to it. Writing zero has no effect."] pub struct INT_W<'a> { w: &'a mut W, } impl<'a> INT_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: INT_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "No request. No interrupt request is pending."] #[inline(always)] pub fn no_request(self) -> &'a mut W { self.variant(INT_A::NO_REQUEST) } #[doc = "Request active. Interrupt request is active."] #[inline(always)] pub fn request_active(self) -> &'a mut W { self.variant(INT_A::REQUEST_ACTIVE) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01); self.w } } #[doc = "Combine enabled inputs for group interrupt\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum COMB_A { #[doc = "0: Or. OR functionality: A grouped interrupt is generated when any one of the enabled inputs is active (based on its programmed polarity)."] OR = 0, #[doc = "1: And. AND functionality: An interrupt is generated when all enabled bits are active (based on their programmed polarity)."] AND = 1, } impl From<COMB_A> for bool { #[inline(always)] fn from(variant: COMB_A) -> Self { variant as u8 != 0 } } #[doc = "Field `COMB` reader - Combine enabled inputs for group interrupt"] pub struct COMB_R(crate::FieldReader<bool, COMB_A>); impl COMB_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { COMB_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> COMB_A { match self.bits { false => COMB_A::OR, true => COMB_A::AND, } } #[doc = "Checks if the value of the field is `OR`"] #[inline(always)] pub fn is_or(&self) -> bool { **self == COMB_A::OR } #[doc = "Checks if the value of the field is `AND`"] #[inline(always)] pub fn is_and(&self) -> bool { **self == COMB_A::AND } } impl core::ops::Deref for COMB_R { type Target = crate::FieldReader<bool, COMB_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `COMB` writer - Combine enabled inputs for group interrupt"] pub struct COMB_W<'a> { w: &'a mut W, } impl<'a> COMB_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: COMB_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Or. OR functionality: A grouped interrupt is generated when any one of the enabled inputs is active (based on its programmed polarity)."] #[inline(always)] pub fn or(self) -> &'a mut W { self.variant(COMB_A::OR) } #[doc = "And. AND functionality: An interrupt is generated when all enabled bits are active (based on their programmed polarity)."] #[inline(always)] pub fn and(self) -> &'a mut W { self.variant(COMB_A::AND) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | ((value as u32 & 0x01) << 1); self.w } } #[doc = "Group interrupt trigger\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TRIG_A { #[doc = "0: Edge-triggered."] EDGE_TRIGGERED = 0, #[doc = "1: Level-triggered."] LEVEL_TRIGGERED = 1, } impl From<TRIG_A> for bool { #[inline(always)] fn from(variant: TRIG_A) -> Self { variant as u8 != 0 } } #[doc = "Field `TRIG` reader - Group interrupt trigger"] pub struct TRIG_R(crate::FieldReader<bool, TRIG_A>); impl TRIG_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { TRIG_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> TRIG_A { match self.bits { false => TRIG_A::EDGE_TRIGGERED, true => TRIG_A::LEVEL_TRIGGERED, } } #[doc = "Checks if the value of the field is `EDGE_TRIGGERED`"] #[inline(always)] pub fn is_edge_triggered(&self) -> bool { **self == TRIG_A::EDGE_TRIGGERED } #[doc = "Checks if the value of the field is `LEVEL_TRIGGERED`"] #[inline(always)] pub fn is_level_triggered(&self) -> bool { **self == TRIG_A::LEVEL_TRIGGERED } } impl core::ops::Deref for TRIG_R { type Target = crate::FieldReader<bool, TRIG_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `TRIG` writer - Group interrupt trigger"] pub struct TRIG_W<'a> { w: &'a mut W, } impl<'a> TRIG_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: TRIG_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Edge-triggered."] #[inline(always)] pub fn edge_triggered(self) -> &'a mut W { self.variant(TRIG_A::EDGE_TRIGGERED) } #[doc = "Level-triggered."] #[inline(always)] pub fn level_triggered(self) -> &'a mut W { self.variant(TRIG_A::LEVEL_TRIGGERED) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | ((value as u32 & 0x01) << 2); self.w } } impl R { #[doc = "Bit 0 - Group interrupt status. This bit is cleared by writing a one to it. Writing zero has no effect."] #[inline(always)] pub fn int(&self) -> INT_R { INT_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - Combine enabled inputs for group interrupt"] #[inline(always)] pub fn comb(&self) -> COMB_R { COMB_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 2 - Group interrupt trigger"] #[inline(always)] pub fn trig(&self) -> TRIG_R { TRIG_R::new(((self.bits >> 2) & 0x01) != 0) } } impl W { #[doc = "Bit 0 - Group interrupt status. This bit is cleared by writing a one to it. Writing zero has no effect."] #[inline(always)] pub fn int(&mut self) -> INT_W { INT_W { w: self } } #[doc = "Bit 1 - Combine enabled inputs for group interrupt"] #[inline(always)] pub fn comb(&mut self) -> COMB_W { COMB_W { w: self } } #[doc = "Bit 2 - Group interrupt trigger"] #[inline(always)] pub fn trig(&mut self) -> TRIG_W { TRIG_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "GPIO grouped interrupt control register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [ctrl](index.html) module"] pub struct CTRL_SPEC; impl crate::RegisterSpec for CTRL_SPEC { type Ux = u32; } #[doc = "`read()` method returns [ctrl::R](R) reader structure"] impl crate::Readable for CTRL_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [ctrl::W](W) writer structure"] impl crate::Writable for CTRL_SPEC { type Writer = W; } #[doc = "`reset()` method sets CTRL to value 0"] impl crate::Resettable for CTRL_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
31.570201
424
0.585315
269788a946eab5644e275e51fe22ce9ad2b31c1c
1,059
use crate::vector::{Vector2, Vector3, Vector4}; impl<T> From<nalgebra::Vector2<T>> for Vector2<T> { #[inline] fn from(vec: nalgebra::Vector2<T>) -> Self { let [xy] = vec.data.0; Self::new(xy) } } impl<T> From<nalgebra::Vector3<T>> for Vector3<T> { #[inline] fn from(vec: nalgebra::Vector3<T>) -> Self { let [xyz] = vec.data.0; Self::new(xyz) } } impl<T> From<nalgebra::Vector4<T>> for Vector4<T> { #[inline] fn from(vec: nalgebra::Vector4<T>) -> Self { let [xyzw] = vec.data.0; Self::new(xyzw) } } impl<T: nalgebra::Scalar> From<Vector2<T>> for nalgebra::Vector2<T> { #[inline] fn from(vec: Vector2<T>) -> Self { Self::from(vec.0) } } impl<T: nalgebra::Scalar> From<Vector3<T>> for nalgebra::Vector3<T> { #[inline] fn from(vec: Vector3<T>) -> Self { Self::from(vec.0) } } impl<T: nalgebra::Scalar> From<Vector4<T>> for nalgebra::Vector4<T> { #[inline] fn from(vec: Vector4<T>) -> Self { Self::from(vec.0) } }
22.531915
69
0.554297
fb99eb8b3f1a4bf42bb8b5036ca68919c8281809
1,012
/* * * * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: 1.0.0 * * Generated by: https://openapi-generator.tech */ #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LolStoreGiftingFriend { #[serde(rename = "friendsSince", skip_serializing_if = "Option::is_none")] pub friends_since: Option<String>, #[serde(rename = "nick", skip_serializing_if = "Option::is_none")] pub nick: Option<String>, #[serde(rename = "oldFriends", skip_serializing_if = "Option::is_none")] pub old_friends: Option<bool>, #[serde(rename = "summonerId", skip_serializing_if = "Option::is_none")] pub summoner_id: Option<i64>, } impl LolStoreGiftingFriend { pub fn new() -> LolStoreGiftingFriend { LolStoreGiftingFriend { friends_since: None, nick: None, old_friends: None, summoner_id: None, } } }
26.631579
109
0.658103
e5c6b93c52d5525a79f6ced541303d7c7faac0ec
1,495
pub struct IconReport { props: crate::Props, } impl yew::Component for IconReport { type Properties = crate::Props; type Message = (); fn create(props: Self::Properties, _: yew::prelude::ComponentLink<Self>) -> Self { Self { props } } fn update(&mut self, _: Self::Message) -> yew::prelude::ShouldRender { true } fn change(&mut self, _: Self::Properties) -> yew::prelude::ShouldRender { false } fn view(&self) -> yew::prelude::Html { yew::prelude::html! { <svg class=self.props.class.unwrap_or("") width=self.props.size.unwrap_or(24).to_string() height=self.props.size.unwrap_or(24).to_string() viewBox="0 0 24 24" fill=self.props.fill.unwrap_or("none") stroke=self.props.color.unwrap_or("currentColor") stroke-width=self.props.stroke_width.unwrap_or(2).to_string() stroke-linecap=self.props.stroke_linecap.unwrap_or("round") stroke-linejoin=self.props.stroke_linejoin.unwrap_or("round") > <svg xmlns="http://www.w3.org/2000/svg" height="24" viewBox="0 0 24 24" width="24"><path d="M0 0h24v24H0V0z" fill="none"/><path d="M15.73 3H8.27L3 8.27v7.46L8.27 21h7.46L21 15.73V8.27L15.73 3zM12 17.3c-.72 0-1.3-.58-1.3-1.3s.58-1.3 1.3-1.3 1.3.58 1.3 1.3-.58 1.3-1.3 1.3zm1-4.3h-2V7h2v6z"/></svg> </svg> } } }
32.5
308
0.572575
7159011567034766328c511ebe8ca0359ab7a367
908
//! [`nekolib`] の verify に関するもの。 //! //! そのうちちゃんと作ります。 //! //! [`nekolib`]: ../nekolib/index.html //! //! `library-rs` では verify をしているのですが、 //! いまいち運用しやすい形式を確立できていないので、早くなんとかしたいです。 //! //! どの問題で何を verify したかとかを見やすい形式で可視化できたらいいよね。 //! たとえば、次のような形式のドキュメントを生成しやすいように作ってみる? //! //! --- //! //! # Sample (verifier) //! //! `some_algo` の verify をします。 //! //! ## Verified by //! - ソルバへのリンク 1 (passing/failing) //! - ソルバへのリンク 2 (passing/failing) //! - ソルバへのリンク 3 (passing/failing) //! //! --- //! //! # Sample (solver for algo) //! //! `some_algo` を用いて問題 A を解きます。 //! //! ## Solves //! - 問題 A へのリンク //! //! ## Explanations //! 解法の概要などが必要であれば書く。 //! //! --- //! //! # Sample (solver for ds) //! //! トレイト `T` を実装した `some_ds` を用いて問題 B を解きます。 //! //! ## Solves //! - 問題 B へのリンク //! //! ## Explanations //! 解法の概要などが必要であれば書く。 #[cfg(test)] mod tests { #[test] fn it_works() { assert_eq!(2 + 2, 4); } }
16.509091
44
0.560573
21cf75d968ad99719476efc6e7b84a8aaeafc1b8
4,549
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { // TODO(porce): Rename the aliases as fidl_* fidl_fuchsia_wlan_common as wlan_common, fidl_fuchsia_wlan_device as wlan_device, fidl_fuchsia_wlan_mlme as wlan_mlme, fidl_fuchsia_wlan_tap as wlantap, }; pub fn create_wlantap_config( hw_mac_address: [u8; 6], mac_role: wlan_device::MacRole, ) -> wlantap::WlantapPhyConfig { use fidl_fuchsia_wlan_common::DriverFeature; use fidl_fuchsia_wlan_device::SupportedPhy; wlantap::WlantapPhyConfig { phy_info: wlan_device::PhyInfo { id: 0, dev_path: None, hw_mac_address, supported_phys: vec![ SupportedPhy::Dsss, SupportedPhy::Cck, SupportedPhy::Ofdm, SupportedPhy::Ht, ], driver_features: vec![ DriverFeature::TempDirectSmeChannel, DriverFeature::Synth, DriverFeature::TxStatusReport, ], mac_roles: vec![mac_role], caps: vec![], bands: vec![create_2_4_ghz_band_info()], }, name: String::from("wlantap0"), quiet: false, } } fn create_2_4_ghz_band_info() -> wlan_device::BandInfo { wlan_device::BandInfo { band_id: wlan_common::Band::WlanBand2Ghz, ht_caps: Some(Box::new(wlan_mlme::HtCapabilities { ht_cap_info: wlan_mlme::HtCapabilityInfo { ldpc_coding_cap: false, chan_width_set: wlan_mlme::ChanWidthSet::TwentyForty as u8, sm_power_save: wlan_mlme::SmPowerSave::Disabled as u8, greenfield: true, short_gi_20: true, short_gi_40: true, tx_stbc: true, rx_stbc: 1, delayed_block_ack: false, max_amsdu_len: wlan_mlme::MaxAmsduLen::Octets3839 as u8, dsss_in_40: false, intolerant_40: false, lsig_txop_protect: false, }, ampdu_params: wlan_mlme::AmpduParams { exponent: 0, min_start_spacing: wlan_mlme::MinMpduStartSpacing::NoRestrict as u8, }, mcs_set: wlan_mlme::SupportedMcsSet { rx_mcs_set: 0x01000000ff, rx_highest_rate: 0, tx_mcs_set_defined: true, tx_rx_diff: false, tx_max_ss: 1, tx_ueqm: false, }, ht_ext_cap: wlan_mlme::HtExtCapabilities { pco: false, pco_transition: wlan_mlme::PcoTransitionTime::PcoReserved as u8, mcs_feedback: wlan_mlme::McsFeedback::McsNofeedback as u8, htc_ht_support: false, rd_responder: false, }, txbf_cap: wlan_mlme::TxBfCapability { implicit_rx: false, rx_stag_sounding: false, tx_stag_sounding: false, rx_ndp: false, tx_ndp: false, implicit: false, calibration: wlan_mlme::Calibration::CalibrationNone as u8, csi: false, noncomp_steering: false, comp_steering: false, csi_feedback: wlan_mlme::Feedback::FeedbackNone as u8, noncomp_feedback: wlan_mlme::Feedback::FeedbackNone as u8, comp_feedback: wlan_mlme::Feedback::FeedbackNone as u8, min_grouping: wlan_mlme::MinGroup::MinGroupOne as u8, csi_antennas: 1, noncomp_steering_ants: 1, comp_steering_ants: 1, csi_rows: 1, chan_estimation: 1, }, asel_cap: wlan_mlme::AselCapability { asel: false, csi_feedback_tx_asel: false, ant_idx_feedback_tx_asel: false, explicit_csi_feedback: false, antenna_idx_feedback: false, rx_asel: false, tx_sounding_ppdu: false, }, })), vht_caps: None, basic_rates: vec![2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108], supported_channels: wlan_device::ChannelList { base_freq: 2407, channels: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], }, } }
37.595041
84
0.547813
4a80ae9da2ed898a6fa0ee32b616b94160184585
50,134
#![cfg_attr(not(feature = "std"), no_std)] /// Edit this file to define custom logic or remove it if it is not needed. /// Learn more about FRAME and the core library of Substrate FRAME pallets: /// <https://substrate.dev/docs/en/knowledgebase/runtime/frame> use codec::{Decode, Encode}; use cumulus_pallet_xcm::{ensure_sibling_para, Origin as CumulusOrigin}; use cumulus_primitives_core::ParaId; use frame_support::{ dispatch::DispatchResultWithPostInfo, log, pallet_prelude::*, traits::{Currency, EstimateCallFee, UnixTime}, IterableStorageMap, }; use frame_system::{ self as system, offchain::{ AppCrypto, CreateSignedTransaction, SendSignedTransaction, Signer, SubmitTransaction, }, pallet_prelude::*, Config as SystemConfig, }; use hex::ToHex; use lite_json::{ json::{JsonValue, NumberValue}, Serialize as JsonSerialize, }; use scale_info::TypeInfo; use sp_std::{borrow::ToOwned, convert::TryFrom, prelude::*, str, vec, vec::Vec}; use sp_core::crypto::KeyTypeId; use sp_runtime::{ offchain::{ http, storage::{MutateStorageError, StorageRetrievalError, StorageValueRef}, Duration, }, traits::{Hash, UniqueSaturatedInto, Zero}, }; use xcm::latest::{prelude::*, Junction, OriginKind, SendXcm, Xcm}; pub use pallet::*; #[cfg(test)] mod tests; // Runtime benchmarking features #[cfg(feature = "runtime-benchmarks")] mod benchmarking; pub mod weights; pub use weights::*; type BalanceOf<T> = <<T as Config>::Currency as Currency<<T as frame_system::Config>::AccountId>>::Balance; /// Defines application identifier for crypto keys of this module. /// /// Every module that deals with signatures needs to declare its unique identifier for /// its crypto keys. /// When offchain worker is signing transactions it's going to request keys of type /// `KeyTypeId` from the keystore and use the ones it finds to sign the transaction. /// The keys can be inserted manually via RPC (see `author_insertKey`). /// ocpf mean off-chain worker price fetch pub const KEY_TYPE: KeyTypeId = KeyTypeId(*b"ocpf"); /// Based on the above `KeyTypeId` we need to generate a pallet-specific crypto type wrappers. /// We can use from supported crypto kinds (`sr25519`, `ed25519` and `ecdsa`) and augment /// the types with this pallet-specific identifier. pub mod crypto { use super::KEY_TYPE; use sp_core::sr25519::Signature as Sr25519Signature; use sp_runtime::{ app_crypto::{app_crypto, sr25519}, traits::Verify, }; use sp_runtime::{MultiSignature, MultiSigner}; app_crypto!(sr25519, KEY_TYPE); pub struct TestAuthId; // implemented for ocw-runtime impl frame_system::offchain::AppCrypto<MultiSigner, MultiSignature> for TestAuthId { type RuntimeAppPublic = Public; type GenericSignature = sp_core::sr25519::Signature; type GenericPublic = sp_core::sr25519::Public; } impl frame_system::offchain::AppCrypto<<Sr25519Signature as Verify>::Signer, Sr25519Signature> for TestAuthId { type RuntimeAppPublic = Public; type GenericSignature = sp_core::sr25519::Signature; type GenericPublic = sp_core::sr25519::Public; } } /// An index to a block. /// #[frame_support::pallet] pub mod pallet { use super::*; #[pallet::config] pub trait Config: CreateSignedTransaction<Call<Self>> + frame_system::Config + pallet_balances::Config where <Self as frame_system::Config>::AccountId: AsRef<[u8]> + ToHex, { /// The identifier type for an offchain worker. type AuthorityId: AppCrypto<Self::Public, Self::Signature>; type Origin: From<<Self as SystemConfig>::Origin> + Into<Result<CumulusOrigin, <Self as Config>::Origin>>; /// The overarching event type. type Event: From<Event<Self>> + IsType<<Self as frame_system::Config>::Event>; /// The overarching dispatch call type. type Call: From<Call<Self>> + Encode; type XcmSender: SendXcm; type UnixTime: UnixTime; /// A configuration for base priority of unsigned transactions. /// /// This is exposed so that it can be tuned for particular runtime, when /// multiple pallets send unsigned transactions. #[pallet::constant] type UnsignedPriority: Get<TransactionPriority>; /// Type representing the weight of this pallet type WeightInfo: WeightInfo; type EstimateCallFee: EstimateCallFee<Call<Self>, BalanceOf<Self>>; type Currency: frame_support::traits::Currency<Self::AccountId>; } #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] pub struct Pallet<T>(_); #[pallet::hooks] impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> where T::AccountId: AsRef<[u8]> + ToHex, { fn offchain_worker(block_number: T::BlockNumber) { // Note that having logs compiled to WASM may cause the size of the blob to increase // significantly. You can use `RuntimeDebug` custom derive to hide details of the types // in WASM. The `sp-api` crate also provides a feature `disable-logging` to disable // all logging and thus, remove any logging from the WASM. let parent_hash = <system::Pallet<T>>::block_hash(block_number - 1u32.into()); log::debug!( "Current block: {:?} (parent hash: {:?})", block_number, parent_hash ); // It's a good practice to keep `fn offchain_worker()` function minimal, and move most // of the code to separate `impl` block. // Here we call a helper function to calculate current average price. // This function reads storage entries of the current state. let should_send = Self::choose_transaction_type(block_number); let res = match should_send { TransactionType::Signed => Self::fetch_data_and_send_signed(block_number), TransactionType::Raw | TransactionType::UnsignedForAll | TransactionType::UnsignedForAny => { Self::fetch_data_and_send_raw_unsigned(block_number) } _ => Ok(()), }; if let Err(e) = res { log::error!("Error: {}", e); } } } // Dispatchable functions allows users to interact with the pallet and invoke state changes. // These functions materialize as "extrinsics", which are often compared to transactions. // Dispatchable functions must be annotated with a weight and must return a DispatchResult. #[pallet::call] impl<T: Config> Pallet<T> where T::AccountId: AsRef<[u8]> + ToHex, T: pallet_balances::Config, { #[pallet::weight(<T as Config>::WeightInfo::clear_api_queue_unsigned())] pub fn clear_api_queue_unsigned( origin: OriginFor<T>, _block_number: T::BlockNumber, processed_requests: Vec<u64>, ) -> DispatchResultWithPostInfo { // This ensures that the function can only be called via unsigned transaction. ensure_none(origin)?; for key in processed_requests.iter() { <ApiQueue<T>>::remove(&key); } Ok(().into()) } #[pallet::weight(0 + T::DbWeight::get().writes(1))] pub fn clear_processed_requests_unsigned( origin: OriginFor<T>, _block_number: T::BlockNumber, processed_requests: Vec<u64>, ) -> DispatchResultWithPostInfo { // This ensures that the function can only be called via unsigned transaction. ensure_none(origin)?; let block_number = <system::Pallet<T>>::block_number(); let current_timestamp = T::UnixTime::now().as_millis(); for key in processed_requests.iter() { if SavedRequests::<T>::contains_key(key.clone()) { let saved_request = Self::saved_data_requests(key); let processed_request = DataRequest { para_id: saved_request.para_id, account_id: saved_request.account_id, feed_name: saved_request.feed_name.clone(), requested_block_number: saved_request.requested_block_number, processed_block_number: Some(block_number), requested_timestamp: saved_request.requested_timestamp, processed_timestamp: Some(current_timestamp), payload: saved_request.payload.clone(), is_query: saved_request.is_query, url: saved_request.url.clone(), }; <SavedRequests<T>>::insert(key, processed_request.clone()); let encoded_hash = hex::encode( sp_runtime::traits::BlakeTwo256::hash( processed_request.clone().encode().as_slice(), ) .as_bytes(), ) .as_bytes() .to_vec(); if processed_request.is_query { Self::deposit_event(Event::ReadFromDWH( processed_request.para_id, encoded_hash.clone(), processed_request.feed_name.clone(), processed_request.clone(), processed_request.processed_block_number.clone().unwrap(), )); } else { //insert to api <ApiQueue<T>>::insert(key, processed_request.clone()); let feed_owner = Self::feed_account_lookup(processed_request.feed_name.clone()).0; <FeedAccountLookup<T>>::insert( processed_request.feed_name.clone(), (&feed_owner, encoded_hash.clone()), ); Self::deposit_event(Event::SavedToDWH( processed_request.para_id, encoded_hash.clone(), processed_request.feed_name.clone(), processed_request.clone(), processed_request.processed_block_number.clone().unwrap(), )); } <DataRequests<T>>::remove(&key); <NextUnsignedAt<T>>::put(block_number); } } Ok(().into()) } #[pallet::weight(<T as Config>::WeightInfo::query_data())] pub fn query_data( origin: OriginFor<T>, para_id: Option<ParaId>, feed_name: Vec<u8>, ) -> DispatchResult { ensure_signed(origin.clone())?; let submitter_account_id = ensure_signed(origin.clone())?; Self::query_feed(submitter_account_id, para_id, feed_name) } #[pallet::weight(0)] pub fn receive_response_from_parachain( origin: OriginFor<T>, feed_name: Vec<u8>, response: Vec<u8>, ) -> DispatchResult { let para_id = ensure_sibling_para(<T as Config>::Origin::from(origin))?; let block_number = <system::Pallet<T>>::block_number(); Self::deposit_event(Event::ResponseReceived( para_id, feed_name.clone(), response.clone(), block_number, )); Ok(()) } #[pallet::weight(<T as Config>::WeightInfo::submit_data_signed())] pub fn submit_data_signed( origin: OriginFor<T>, block_number: T::BlockNumber, key: u64, data: Vec<u8>, ) -> DispatchResult { // Check that the extrinsic was signed and get the signer. // This function will return an error if the extrinsic is not signed. // https://substrate.dev/docs/en/knowledgebase/runtime/origin ensure_signed(origin.clone())?; let submitter_account_id = ensure_signed(origin.clone())?; let data_request = Self::data_requests(key); let saved_request = DataRequest { para_id: data_request.para_id, account_id: Some(submitter_account_id), feed_name: data_request.feed_name.clone(), requested_block_number: data_request.requested_block_number, processed_block_number: Some(block_number), requested_timestamp: data_request.requested_timestamp, processed_timestamp: None, payload: data, is_query: data_request.is_query, url: data_request.url.clone(), }; Self::save_data_response_onchain(block_number, key, saved_request); Ok(()) } #[pallet::weight(<T as Config>::WeightInfo::submit_data_unsigned())] pub fn submit_data_unsigned( origin: OriginFor<T>, block_number: T::BlockNumber, key: u64, data: Vec<u8>, ) -> DispatchResult { ensure_none(origin.clone())?; let data_request = Self::data_requests(key); let saved_request = DataRequest { para_id: data_request.para_id, account_id: data_request.account_id, feed_name: data_request.feed_name.clone(), requested_block_number: data_request.requested_block_number, processed_block_number: Some(block_number), requested_timestamp: data_request.requested_timestamp, processed_timestamp: None, payload: data, is_query: data_request.is_query, url: data_request.url.clone(), }; Self::save_data_response_onchain(block_number, key, saved_request); Self::send_response_to_parachain(block_number, key) } #[pallet::weight(<T as Config>::WeightInfo::submit_data_via_api())] pub fn submit_data_via_api( origin: OriginFor<T>, para_id: Option<ParaId>, url: Vec<u8>, feed_name: Vec<u8>, ) -> DispatchResult { ensure_signed(origin.clone())?; let submitter_account_id = ensure_signed(origin.clone())?; let new_feed_name = (str::from_utf8(b"custom_").unwrap().to_owned() + str::from_utf8(&feed_name).unwrap()) .as_bytes() .to_vec(); let result = Self::ensure_account_owns_table( submitter_account_id.clone(), new_feed_name.clone(), ); match result { Ok(()) => Self::add_data_request( Some(submitter_account_id), para_id, Some(url), new_feed_name, Vec::new(), false, ), _ => result, } } #[pallet::weight(<T as Config>::WeightInfo::submit_price_feed())] pub fn submit_price_feed( origin: OriginFor<T>, para_id: Option<ParaId>, requested_currencies: Vec<u8>, ) -> DispatchResult { let submitter_account_id = ensure_signed(origin.clone())?; let feed_name = "price_feeding".as_bytes().to_vec(); let result = Self::ensure_account_owns_table(submitter_account_id.clone(), feed_name.clone()); match result { Ok(()) => { let currencies = str::from_utf8(&requested_currencies).unwrap(); let api_url = str::from_utf8(b"https://api.kylin-node.co.uk/prices?currency_pairs=") .unwrap(); let url = api_url.clone().to_owned() + currencies.clone(); Self::add_data_request( Some(submitter_account_id), para_id, Some(url.as_bytes().to_vec()), "price_feeding".as_bytes().to_vec(), Vec::new(), false, ) } _ => result, } } #[pallet::weight(<T as Config>::WeightInfo::sudo_remove_feed_account())] pub fn sudo_remove_feed_account( origin: OriginFor<T>, feed_name: Vec<u8>, ) -> DispatchResult { ensure_root(origin)?; let feed_exists = FeedAccountLookup::<T>::contains_key(feed_name.clone()); if feed_exists { <FeedAccountLookup<T>>::remove(&feed_name); Self::deposit_event(Event::RemovedFeedAccount(feed_name.clone())) } Ok(()) } #[pallet::weight(<T as Config>::WeightInfo::write_data_onchain())] pub fn write_data_onchain( origin: OriginFor<T>, feed_name: Vec<u8>, data: Vec<u8>, ) -> DispatchResult { ensure_signed(origin.clone())?; let submitter_account_id = ensure_signed(origin.clone())?; let new_feed_name = (str::from_utf8(b"custom_").unwrap().to_owned() + str::from_utf8(&feed_name).unwrap()) .as_bytes() .to_vec(); let result = Self::ensure_account_owns_table( submitter_account_id.clone(), new_feed_name.clone(), ); match result { Ok(()) => Self::add_data_request( Some(submitter_account_id), None, None, new_feed_name, data, false, ), _ => result, } } #[pallet::weight(10_000 + T::DbWeight::get().writes(1))] pub fn xcm_submit_data_via_api( origin: OriginFor<T>, url: Vec<u8>, feed_name: Vec<u8>, ) -> DispatchResult { let requester_para_id = ensure_sibling_para(<T as Config>::Origin::from(origin.clone()))?; let submitter_account_id = ensure_signed(origin.clone())?; let new_feed_name = (str::from_utf8(b"custom_").unwrap().to_owned() + str::from_utf8(&feed_name).unwrap()) .as_bytes() .to_vec(); let result = Self::ensure_account_owns_table( submitter_account_id.clone(), new_feed_name.clone(), ); match result { Ok(()) => Self::add_data_request( Some(submitter_account_id), Some(requester_para_id), Some(url), new_feed_name, Vec::new(), false, ), _ => result, } } } // #[pallet::event where <T as frame_system::Config>:: AccountId: AsRef<[u8]> + ToHex + Decode + Serialize] #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] // #[pallet::metadata(T::AccountId = "AccountId")] pub enum Event<T: Config> where <T as frame_system::Config>::AccountId: AsRef<[u8]> + ToHex, { RemovedFeedAccount(Vec<u8>), SubmitNewData( Option<ParaId>, Vec<u8>, Option<Vec<u8>>, Option<T::AccountId>, T::BlockNumber, ), SavedToDWH( Option<ParaId>, Vec<u8>, Vec<u8>, DataRequest<ParaId, T::BlockNumber, T::AccountId>, T::BlockNumber, ), ReadFromDWH( Option<ParaId>, Vec<u8>, Vec<u8>, DataRequest<ParaId, T::BlockNumber, T::AccountId>, T::BlockNumber, ), ResponseSent( ParaId, DataRequest<ParaId, T::BlockNumber, T::AccountId>, T::BlockNumber, ), ErrorSendingResponse( SendError, ParaId, DataRequest<ParaId, T::BlockNumber, T::AccountId>, ), ResponseReceived(ParaId, Vec<u8>, Vec<u8>, T::BlockNumber), QueryFeeAwarded( T::AccountId, <<T as pallet::Config>::Currency as Currency< <T as frame_system::Config>::AccountId, >>::Balance, Vec<u8>, ), } #[pallet::validate_unsigned] impl<T: Config> ValidateUnsigned for Pallet<T> where T::AccountId: AsRef<[u8]> + ToHex, T: pallet_balances::Config, { type Call = Call<T>; /// Validate unsigned call to this module. /// /// By default unsigned transactions are disallowed, but implementing the validator /// here we make sure that some particular calls (the ones produced by offchain worker) /// are being whitelisted and marked as valid. fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { if let Call::submit_data_unsigned { block_number, key: _, data: _, } = call { Self::validate_transaction(block_number) } else if let Call::clear_processed_requests_unsigned { block_number, processed_requests: _, } = call { Self::validate_transaction(block_number) } else if let Call::clear_api_queue_unsigned { block_number, processed_requests: _, } = call { Self::validate_transaction(block_number) } else { InvalidTransaction::Call.into() } } } #[pallet::type_value] pub fn InitialDataId<T: Config>() -> u64 where <T as frame_system::Config>::AccountId: AsRef<[u8]> + ToHex, { 10000000u64 } #[pallet::storage] pub type DataId<T: Config> = StorageValue<_, u64, ValueQuery, InitialDataId<T>>; #[pallet::storage] #[pallet::getter(fn data_requests)] pub type DataRequests<T: Config> = StorageMap<_, Identity, u64, DataRequest<ParaId, T::BlockNumber, T::AccountId>, ValueQuery>; #[pallet::storage] #[pallet::getter(fn saved_data_requests)] pub type SavedRequests<T: Config> = StorageMap<_, Identity, u64, DataRequest<ParaId, T::BlockNumber, T::AccountId>, ValueQuery>; #[pallet::storage] #[pallet::getter(fn api_queue)] pub type ApiQueue<T: Config> = StorageMap<_, Identity, u64, DataRequest<ParaId, T::BlockNumber, T::AccountId>, ValueQuery>; #[pallet::storage] #[pallet::getter(fn next_unsigned_at)] pub(super) type NextUnsignedAt<T: Config> = StorageValue<_, T::BlockNumber, ValueQuery>; #[pallet::storage] #[pallet::getter(fn feed_account_lookup)] pub(super) type FeedAccountLookup<T: Config> = StorageMap<_, Identity, Vec<u8>, (T::AccountId, Vec<u8>), ValueQuery>; } #[derive(Clone, PartialEq, Eq, Encode, Decode, Default, Hash, TypeInfo)] #[cfg_attr(feature = "std", derive(Debug))] pub struct DataRequest<ParaId, BlockNumber, AccountId> { para_id: Option<ParaId>, account_id: Option<AccountId>, requested_block_number: BlockNumber, processed_block_number: Option<BlockNumber>, requested_timestamp: u128, processed_timestamp: Option<u128>, payload: Vec<u8>, feed_name: Vec<u8>, is_query: bool, url: Option<Vec<u8>>, } impl<BlockNumber, ParaId, AccountId> DataRequest<ParaId, BlockNumber, AccountId> where BlockNumber: Clone + sp_std::fmt::Debug + Default + UniqueSaturatedInto<u32>, ParaId: Copy + Default + From<u32> + Into<u32>, AccountId: Encode + Default + Encode + Clone + Eq + PartialEq + sp_std::fmt::Debug + AsRef<[u8]> + ToHex, { // fn to_json_string(&self, &mut object_elements:Vec<u8,JsonValue>) -> Result<&str, Utf8Error> { fn to_json_string(&self, encoded_value: Vec<u8>) -> Vec<u8> { let mut object_elements = Vec::new(); let para_key = str::from_utf8(b"para_id").unwrap().chars().collect(); if self.para_id.is_some() { let para_id_number_value = NumberValue { integer: self.para_id.unwrap().into() as i64, fraction: 0, fraction_length: 0, exponent: 0, }; object_elements.push((para_key, JsonValue::Number(para_id_number_value))); } else { object_elements.push((para_key, JsonValue::Null)) } let account_id_key = str::from_utf8(b"account_id").unwrap().chars().collect(); if self.account_id.is_some() { let account_id_in_hex = hex::encode(self.account_id.clone().unwrap().as_ref()); object_elements.push(( account_id_key, JsonValue::String(account_id_in_hex.chars().collect()), )); } else { object_elements.push((account_id_key, JsonValue::Null)) } let requested_block_number_key = str::from_utf8(b"requested_block_number") .unwrap() .chars() .collect(); let requested_block_number = NumberValue { integer: self.requested_block_number.clone().unique_saturated_into() as i64, fraction: 0, fraction_length: 0, exponent: 0, }; object_elements.push(( requested_block_number_key, JsonValue::Number(requested_block_number), )); let processed_block_number_key = str::from_utf8(b"processed_block_number") .unwrap() .chars() .collect(); let processed_block_number = NumberValue { integer: self .processed_block_number .clone() .unwrap() .unique_saturated_into() as i64, fraction: 0, fraction_length: 0, exponent: 0, }; object_elements.push(( processed_block_number_key, JsonValue::Number(processed_block_number), )); let requested_timestamp_key = str::from_utf8(b"requested_timestamp") .unwrap() .chars() .collect(); let requested_timestamp = NumberValue { integer: i64::try_from(self.requested_timestamp).unwrap(), fraction: 0, fraction_length: 0, exponent: 0, }; object_elements.push(( requested_timestamp_key, JsonValue::Number(requested_timestamp), )); let processed_timestamp_key = str::from_utf8(b"processed_timestamp") .unwrap() .chars() .collect(); let processed_timestamp = NumberValue { integer: self.processed_timestamp.clone().unwrap() as i64, fraction: 0, fraction_length: 0, exponent: 0, }; object_elements.push(( processed_timestamp_key, JsonValue::Number(processed_timestamp), )); let payload_key = str::from_utf8(b"payload").unwrap().chars().collect(); let payload = str::from_utf8(&self.payload).unwrap().chars().collect(); object_elements.push((payload_key, JsonValue::String(payload))); let feed_name_key = str::from_utf8(b"feed_name").unwrap().chars().collect(); let feed_name = str::from_utf8(&self.feed_name).unwrap().chars().collect(); object_elements.push((feed_name_key, JsonValue::String(feed_name))); let url_key = str::from_utf8(b"url").unwrap().chars().collect(); if self.url.is_some() { let url_string = self.url.as_ref().unwrap(); let url = str::from_utf8(&url_string).unwrap().chars().collect(); object_elements.push((url_key, JsonValue::String(url))); } else { object_elements.push((url_key, JsonValue::Null)); } let json = JsonValue::Object(object_elements.clone()); object_elements = Vec::new(); let data_key = str::from_utf8(b"data").unwrap().chars().collect(); object_elements.push((data_key, json)); let hash_key = str::from_utf8(b"hash").unwrap().chars().collect(); let encoded_hash = hex::encode(sp_runtime::traits::BlakeTwo256::hash(encoded_value.as_slice()).as_bytes()) .chars() .collect(); object_elements.push((hash_key, JsonValue::String(encoded_hash))); let final_json = JsonValue::Object(object_elements.clone()).format(4); let json_output = str::from_utf8(&final_json).unwrap().as_bytes().to_vec(); return json_output; } } enum TransactionType { Signed, UnsignedForAny, UnsignedForAll, Raw, None, } impl<T: Config> Pallet<T> where T::AccountId: AsRef<[u8]> + ToHex, { fn ensure_account_owns_table( submitter_account_id: T::AccountId, feed_name: Vec<u8>, ) -> DispatchResult { let feed_exists = FeedAccountLookup::<T>::contains_key(feed_name.clone()); if feed_exists { let feed_owner = Self::feed_account_lookup(feed_name).0; if feed_owner == submitter_account_id { Ok(()) } else { Err(DispatchError::BadOrigin) } } else { let new_hash: Vec<u8> = Vec::new(); <FeedAccountLookup<T>>::insert(feed_name, (submitter_account_id.clone(), new_hash)); Ok(()) } } fn query_feed( submitter_account_id: T::AccountId, para_id: Option<ParaId>, feed_name: Vec<u8>, ) -> DispatchResult { let feed_exists = FeedAccountLookup::<T>::contains_key(feed_name.clone()); if feed_exists { let feed = Self::feed_account_lookup(feed_name.clone()); let latest_hash = feed.1; let api_url = str::from_utf8(b"https://api.kylin-node.co.uk/query?hash=").unwrap(); let url = api_url.clone().to_owned() + str::from_utf8(&latest_hash.clone()).unwrap(); let total_reward = { let call = Call::query_data { para_id: para_id.clone(), feed_name: feed_name.clone(), }; let call_fee = T::EstimateCallFee::estimate_call_fee(&call, None.into()); call_fee }; let query_fee = total_reward * 25u32.into() / 100u32.into(); T::Currency::deposit_into_existing(&feed.0, query_fee)?; Self::deposit_event(Event::QueryFeeAwarded(feed.0, query_fee, feed_name.clone())); Self::add_data_request( Some(submitter_account_id), para_id, Some(url.as_bytes().to_vec()), feed_name.clone(), Vec::new(), true, ) } else { Err(DispatchError::CannotLookup) } } fn choose_transaction_type(block_number: T::BlockNumber) -> TransactionType { /// A friendlier name for the error that is going to be returned in case we are in the grace /// period. const RECENTLY_SENT: () = (); // Start off by creating a reference to Local Storage value. // Since the local storage is common for all offchain workers, it's a good practice // to prepend your entry with the module name. let val = StorageValueRef::persistent(b"kylin_oracle::last_send"); // The Local Storage is persisted and shared between runs of the offchain workers, // and offchain workers may run concurrently. We can use the `mutate` function, to // write a storage entry in an atomic fashion. Under the hood it uses `compare_and_set` // low-level method of local storage API, which means that only one worker // will be able to "acquire a lock" and send a transaction if multiple workers // happen to be executed concurrently. let res = val.mutate( |last_send: Result<Option<T::BlockNumber>, StorageRetrievalError>| { match last_send { // If we already have a value in storage and the block number is recent enough // we avoid sending another transaction at this time. Ok(Some(block)) if block_number < block => Err(RECENTLY_SENT), // In every other case we attempt to acquire the lock and send a transaction. _ => Ok(block_number), } }, ); // The result of `mutate` call will give us a nested `Result` type. // The first one matches the return of the closure passed to `mutate`, i.e. // if we return `Err` from the closure, we get an `Err` here. // In case we return `Ok`, here we will have another (inner) `Result` that indicates // if the value has been set to the storage correctly - i.e. if it wasn't // written to in the meantime. match res { // The value has been set correctly, which means we can safely send a transaction now. Ok(block_number) => { // Depending if the block is even or odd we will send a `Signed` or `Unsigned` // transaction. // Note that this logic doesn't really guarantee that the transactions will be sent // in an alternating fashion (i.e. fairly distributed). Depending on the execution // order and lock acquisition, we may end up for instance sending two `Signed` // transactions in a row. If a strict order is desired, it's better to use // the storage entry for that. (for instance store both block number and a flag // indicating the type of next transaction to send). let transaction_type = block_number % 3u32.into(); if transaction_type == Zero::zero() { TransactionType::Signed } else if transaction_type == T::BlockNumber::from(1u32) { TransactionType::UnsignedForAny } else if transaction_type == T::BlockNumber::from(2u32) { TransactionType::UnsignedForAll } else { TransactionType::Raw } } // We are in the grace period, we should not send a transaction this time. Err(MutateStorageError::ValueFunctionFailed(RECENTLY_SENT)) => TransactionType::None, // We wanted to send a transaction, but failed to write the block number (acquire a // lock). This indicates that another offchain worker that was running concurrently // most likely executed the same logic and succeeded at writing to storage. // Thus we don't really want to send the transaction, knowing that the other run // already did. Err(MutateStorageError::ConcurrentModification(_)) => TransactionType::None, } } fn add_data_request( account_id: Option<T::AccountId>, para_id: Option<ParaId>, url: Option<Vec<u8>>, feed_name: Vec<u8>, payload: Vec<u8>, is_query: bool, ) -> DispatchResult { let index = DataId::<T>::get(); let block_number = <system::Pallet<T>>::block_number(); let current_timestamp = T::UnixTime::now().as_millis(); <DataRequests<T>>::insert( index, DataRequest { para_id: para_id, account_id: account_id.clone(), feed_name: feed_name.clone(), requested_block_number: block_number, processed_block_number: None, requested_timestamp: current_timestamp, processed_timestamp: None, payload: payload, is_query: is_query, url: url.clone(), }, ); if !is_query { Self::deposit_event(Event::SubmitNewData( para_id, feed_name.clone(), url.clone(), account_id.clone(), block_number, )); } DataId::<T>::put(index + 1u64); Ok(()) } fn save_data_response_onchain( block_number: T::BlockNumber, key: u64, data_request: DataRequest<ParaId, T::BlockNumber, T::AccountId>, ) -> () { let current_timestamp = T::UnixTime::now().as_millis(); let saved_data_request = DataRequest { para_id: data_request.para_id, account_id: data_request.account_id.clone(), feed_name: data_request.feed_name.clone(), requested_block_number: data_request.requested_block_number, processed_block_number: Some(block_number), requested_timestamp: data_request.requested_timestamp, processed_timestamp: Some(current_timestamp), payload: data_request.payload.clone(), is_query: data_request.is_query, url: data_request.url.clone(), }; <SavedRequests<T>>::insert(key, saved_data_request.clone()); } fn send_response_to_parachain(block_number: T::BlockNumber, key: u64) -> DispatchResult { let saved_request = Self::saved_data_requests(key); if saved_request.para_id.is_some() { match T::XcmSender::send_xcm( ( 1, Junction::Parachain(saved_request.para_id.unwrap().into()), ), Xcm(vec![Transact { origin_type: OriginKind::Native, require_weight_at_most: 1_000, call: <T as Config>::Call::from(Call::<T>::receive_response_from_parachain { feed_name: saved_request.feed_name.clone(), response: saved_request.payload.clone(), }) .encode() .into(), }]), ) { Ok(()) => Self::deposit_event(Event::ResponseSent( saved_request.para_id.unwrap(), saved_request.clone(), block_number, )), Err(e) => Self::deposit_event(Event::ErrorSendingResponse( e, saved_request.para_id.unwrap(), saved_request.clone(), )), } } Ok(()) } /// A helper function to fetch the price and send signed transaction. fn fetch_data_and_send_signed(block_number: T::BlockNumber) -> Result<(), &'static str> { let signer = Signer::<T, T::AuthorityId>::all_accounts(); if !signer.can_sign() { return Err( "No local accounts available. Consider adding one via `author_insertKey` RPC.", )?; } let mut processed_requests: Vec<u64> = Vec::new(); for (key, val) in <DataRequests<T> as IterableStorageMap<_, _>>::iter() { let mut response = val.payload.clone(); if val.url.is_some() { response = Self::fetch_http_get_result(val.url.clone().unwrap()) .unwrap_or("Failed fetch data".as_bytes().to_vec()); }; // write data to postgres dB processed_requests.push(key); let results = signer.send_signed_transaction(|_account| Call::submit_data_signed { block_number: block_number, key: key, data: response.clone(), }); for (acc, res) in &results { match res { Ok(()) => log::info!("[{:?}] Submitted data {}", acc.id, key), Err(e) => log::error!("[{:?}] Failed to submit transaction: {:?}", acc.id, e), } } } if processed_requests.iter().count() > 0 { let results = signer.send_signed_transaction(|_account| { Call::clear_processed_requests_unsigned { block_number: block_number, processed_requests: processed_requests.clone(), } }); for (acc, res) in &results { match res { Ok(()) => log::info!("[{:?}] Clearing out processed requests.", acc.id), Err(e) => log::error!( "[{:?}] Failed to clear out processed requests: {:?}", acc.id, e ), } } } let mut queue_to_api: Vec<u64> = Vec::new(); for (key, val) in <ApiQueue<T> as IterableStorageMap<_, _>>::iter() { // write data to postgres dB let url = str::from_utf8(b"https://api.kylin-node.co.uk/submit").unwrap(); let _post_response = Self::submit_http_post_request(url.as_bytes().to_vec(), val) .unwrap_or("Failed to submit data".as_bytes().to_vec()); queue_to_api.push(key); } if queue_to_api.iter().count() > 0 { let result = SubmitTransaction::<T, Call<T>>::submit_unsigned_transaction( Call::clear_api_queue_unsigned { block_number: block_number, processed_requests: queue_to_api, } .into(), ); if let Err(e) = result { log::error!("Error clearing api queue: {:?}", e); } } Ok(()) } fn fetch_data_and_send_raw_unsigned(block_number: T::BlockNumber) -> Result<(), &'static str> { let next_unsigned_at = <NextUnsignedAt<T>>::get(); if next_unsigned_at > block_number { return Err("Too early to send unsigned transaction"); } let mut processed_requests: Vec<u64> = Vec::new(); for (key, val) in <DataRequests<T> as IterableStorageMap<_, _>>::iter() { let mut response = val.payload.clone(); if val.url.is_some() { response = Self::fetch_http_get_result(val.url.clone().unwrap()) .unwrap_or("Failed fetch data".as_bytes().to_vec()); } processed_requests.push(key); let result = SubmitTransaction::<T, Call<T>>::submit_unsigned_transaction( Call::submit_data_unsigned { block_number: block_number, key: key, data: response, } .into(), ); if let Err(e) = result { log::error!("Error submitting unsigned transaction: {:?}", e); } } if processed_requests.iter().count() > 0 { let result = SubmitTransaction::<T, Call<T>>::submit_unsigned_transaction( Call::clear_processed_requests_unsigned { block_number: block_number, processed_requests: processed_requests, } .into(), ); if let Err(e) = result { log::error!("Error clearing queue: {:?}", e); } } let mut queue_to_api: Vec<u64> = Vec::new(); for (key, val) in <ApiQueue<T> as IterableStorageMap<_, _>>::iter() { // write data to postgres dB let url = str::from_utf8(b"https://api.kylin-node.co.uk/submit").unwrap(); let _post_response = Self::submit_http_post_request(url.as_bytes().to_vec(), val) .unwrap_or("Failed to submit data".as_bytes().to_vec()); queue_to_api.push(key); } if queue_to_api.iter().count() > 0 { let result = SubmitTransaction::<T, Call<T>>::submit_unsigned_transaction( Call::clear_api_queue_unsigned { block_number: block_number, processed_requests: queue_to_api, } .into(), ); if let Err(e) = result { log::error!("Error clearing api queue: {:?}", e); } } Ok(()) } /// Fetch current price and return the result in cents. fn fetch_http_get_result(url: Vec<u8>) -> Result<Vec<u8>, http::Error> { // We want to keep the offchain worker execution time reasonable, so we set a hard-coded // deadline to 2s to complete the external call. // You can also wait idefinitely for the response, however you may still get a timeout // coming from the host machine. let deadline = sp_io::offchain::timestamp().add(Duration::from_millis(10_000)); // Initiate an external HTTP GET request. // This is using high-level wrappers from `sp_runtime`, for the low-level calls that // you can find in `sp_io`. The API is trying to be similar to `reqwest`, but // since we are running in a custom WASM execution environment we can't simply // import the library here. let request = http::Request::get(str::from_utf8(&url).unwrap()); // We set the deadline for sending of the request, note that awaiting response can§ // have a separate deadline. Next we send the request, before that it's also possible // to alter request headers or stream body content in case of non-GET requests. let pending = request .deadline(deadline) .send() .map_err(|_| http::Error::IoError)?; // The request is already being processed by the host, we are free to do anything // else in the worker (we can send multiple concurrent requests too). // At some point however we probably want to check the response though, // so we can block current thread and wait for it to finish. // Note that since the request is being driven by the host, we don't have to wait // for the request to have it complete, we will just not read the response. let response = pending .try_wait(deadline) .map_err(|_| http::Error::DeadlineReached)??; // Let's check the status code before we proceed to reading the response. if response.code != 200 { log::info!("Unexpected status code: {}", response.code); return Err(http::Error::Unknown); } // Next we want to fully read the response body and collect it to a vector of bytes. // Note that the return object allows you to read the body in chunks as well // with a way to control the deadline. let body = response.body().collect::<Vec<u8>>(); // Create a str slice from the body. let body_str = sp_std::str::from_utf8(&body).map_err(|_| { log::info!("No UTF8 body"); http::Error::Unknown })?; Ok(body_str.clone().as_bytes().to_vec()) } fn submit_http_post_request( url: Vec<u8>, val: DataRequest<ParaId, T::BlockNumber, T::AccountId>, ) -> Result<Vec<u8>, http::Error> { // Establish deadline let deadline = sp_io::offchain::timestamp().add(Duration::from_millis(10_000)); let encoded_hash = val.clone().encode(); let request_body = val.clone().to_json_string(encoded_hash.clone()); let request = http::Request::post(str::from_utf8(&url).unwrap(), vec![request_body.clone()]) .add_header("x-api-key", "test_api_key") .add_header("content-type", "application/json"); // Send post request let pending = request .deadline(deadline) .body(vec![request_body.clone()]) .send() .map_err(|_| http::Error::IoError)?; // Wait for response let response = pending .try_wait(deadline) .map_err(|_| http::Error::DeadlineReached)??; // Check status code if response.code != 200 { log::info!("Unexpected status code: {}", response.code); return Err(http::Error::Unknown); } // Collect body let body = response.body().collect::<Vec<u8>>(); let body_str = sp_std::str::from_utf8(&body).map_err(|_| { log::info!("No UTF8 body"); http::Error::Unknown })?; Ok(body_str.as_bytes().to_vec()) } fn validate_transaction(block_number: &T::BlockNumber) -> TransactionValidity { // Now let's check if the transaction has any chance to succeed. let next_unsigned_at = <NextUnsignedAt<T>>::get(); if &next_unsigned_at > block_number { return InvalidTransaction::Stale.into(); } // Let's make sure to reject transactions from the future. let current_block = <system::Pallet<T>>::block_number(); if &current_block < block_number { return InvalidTransaction::Future.into(); } ValidTransaction::with_tag_prefix("KylinOCW") .priority(T::UnsignedPriority::get()) .longevity(5) .propagate(true) .build() } }
40.011173
111
0.557466