hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
6a0da9f9b9f44db481cbace747366e8226879c7e
1,718
// Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0. use futures::{Future, Sink, Stream}; use grpcio::*; use kvproto::tikvpb::BatchCommandsRequest; use kvproto::tikvpb_grpc::TikvClient; use std::sync::{mpsc, Arc}; use std::thread; use std::time::Duration; use test_raftstore::new_server_cluster; use tikv_util::HandyRwLock; #[test] fn test_batch_commands() { let mut cluster = new_server_cluster(0, 1); cluster.run(); let leader = cluster.get_region(b"").get_peers()[0].clone(); let addr = cluster.sim.rl().get_addr(leader.get_store_id()).to_owned(); let env = Arc::new(Environment::new(1)); let channel = ChannelBuilder::new(env).connect(&addr); let client = TikvClient::new(channel); let (mut sender, receiver) = client.batch_commands().unwrap(); for _ in 0..1000 { let mut batch_req = BatchCommandsRequest::default(); for i in 0..10 { batch_req.mut_requests().push(Default::default()); batch_req.mut_request_ids().push(i); } match sender.send((batch_req, WriteFlags::default())).wait() { Ok(s) => sender = s, Err(e) => panic!("tikv cilent send fail: {:?}", e), } } let (tx, rx) = mpsc::sync_channel(1); thread::spawn(move || { // We have send 10k requests to the server, so we should get 10k responses. let mut count = 0; for x in receiver .wait() .map(move |b| b.unwrap().get_responses().len()) { count += x; if count == 10000 { tx.send(1).unwrap(); return; } } }); rx.recv_timeout(Duration::from_secs(1)).unwrap(); }
31.236364
83
0.587893
e5f60968123fc0de6b69da83479821c1068d1c2d
9,820
// DO NOT EDIT ! // This file was generated automatically from 'src/mako/api/lib.rs.mako' // DO NOT EDIT ! //! This documentation was generated from *Postmaster Tools* crate version *2.0.4+20210330*, where *20210330* is the exact revision of the *gmailpostmastertools:v1* schema built by the [mako](http://www.makotemplates.org/) code generator *v2.0.4*. //! //! Everything else about the *Postmaster Tools* *v1* API can be found at the //! [official documentation site](https://developers.google.com/gmail/postmaster). //! The original source code is [on github](https://github.com/Byron/google-apis-rs/tree/main/gen/gmailpostmastertools1). //! # Features //! //! Handle the following *Resources* with ease from the central [hub](PostmasterTools) ... //! //! * [domains](api::Domain) //! * [*get*](api::DomainGetCall), [*list*](api::DomainListCall), [*traffic stats get*](api::DomainTrafficStatGetCall) and [*traffic stats list*](api::DomainTrafficStatListCall) //! //! //! //! //! Not what you are looking for ? Find all other Google APIs in their Rust [documentation index](http://byron.github.io/google-apis-rs). //! //! # Structure of this Library //! //! The API is structured into the following primary items: //! //! * **[Hub](PostmasterTools)** //! * a central object to maintain state and allow accessing all *Activities* //! * creates [*Method Builders*](client::MethodsBuilder) which in turn //! allow access to individual [*Call Builders*](client::CallBuilder) //! * **[Resources](client::Resource)** //! * primary types that you can apply *Activities* to //! * a collection of properties and *Parts* //! * **[Parts](client::Part)** //! * a collection of properties //! * never directly used in *Activities* //! * **[Activities](client::CallBuilder)** //! * operations to apply to *Resources* //! //! All *structures* are marked with applicable traits to further categorize them and ease browsing. //! //! Generally speaking, you can invoke *Activities* like this: //! //! ```Rust,ignore //! let r = hub.resource().activity(...).doit().await //! ``` //! //! Or specifically ... //! //! ```ignore //! let r = hub.domains().traffic_stats_get(...).doit().await //! let r = hub.domains().traffic_stats_list(...).doit().await //! let r = hub.domains().get(...).doit().await //! let r = hub.domains().list(...).doit().await //! ``` //! //! The `resource()` and `activity(...)` calls create [builders][builder-pattern]. The second one dealing with `Activities` //! supports various methods to configure the impending operation (not shown here). It is made such that all required arguments have to be //! specified right away (i.e. `(...)`), whereas all optional ones can be [build up][builder-pattern] as desired. //! The `doit()` method performs the actual communication with the server and returns the respective result. //! //! # Usage //! //! ## Setting up your Project //! //! To use this library, you would put the following lines into your `Cargo.toml` file: //! //! ```toml //! [dependencies] //! google-gmailpostmastertools1 = "*" //! # This project intentionally uses an old version of Hyper. See //! # https://github.com/Byron/google-apis-rs/issues/173 for more //! # information. //! hyper = "^0.14" //! hyper-rustls = "^0.22" //! serde = "^1.0" //! serde_json = "^1.0" //! yup-oauth2 = "^5.0" //! ``` //! //! ## A complete example //! //! ```test_harness,no_run //! extern crate hyper; //! extern crate hyper_rustls; //! extern crate yup_oauth2 as oauth2; //! extern crate google_gmailpostmastertools1 as gmailpostmastertools1; //! use gmailpostmastertools1::{Result, Error}; //! # async fn dox() { //! use std::default::Default; //! use oauth2; //! use gmailpostmastertools1::PostmasterTools; //! //! // Get an ApplicationSecret instance by some means. It contains the `client_id` and //! // `client_secret`, among other things. //! let secret: oauth2::ApplicationSecret = Default::default(); //! // Instantiate the authenticator. It will choose a suitable authentication flow for you, //! // unless you replace `None` with the desired Flow. //! // Provide your own `AuthenticatorDelegate` to adjust the way it operates and get feedback about //! // what's going on. You probably want to bring in your own `TokenStorage` to persist tokens and //! // retrieve them from storage. //! let auth = yup_oauth2::InstalledFlowAuthenticator::builder( //! secret, //! yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, //! ).build().await.unwrap(); //! let mut hub = PostmasterTools::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth); //! // You can configure optional parameters by calling the respective setters at will, and //! // execute the final call using `doit()`. //! // Values shown here are possibly random and not representative ! //! let result = hub.domains().traffic_stats_list("parent") //! .start_date_year(-52) //! .start_date_month(-20) //! .start_date_day(-55) //! .page_token("gubergren") //! .page_size(-51) //! .end_date_year(-12) //! .end_date_month(-75) //! .end_date_day(-4) //! .doit().await; //! //! match result { //! Err(e) => match e { //! // The Error enum provides details about what exactly happened. //! // You can also just use its `Debug`, `Display` or `Error` traits //! Error::HttpError(_) //! |Error::Io(_) //! |Error::MissingAPIKey //! |Error::MissingToken(_) //! |Error::Cancelled //! |Error::UploadSizeLimitExceeded(_, _) //! |Error::Failure(_) //! |Error::BadRequest(_) //! |Error::FieldClash(_) //! |Error::JsonDecodeError(_, _) => println!("{}", e), //! }, //! Ok(res) => println!("Success: {:?}", res), //! } //! # } //! ``` //! ## Handling Errors //! //! All errors produced by the system are provided either as [Result](client::Result) enumeration as return value of //! the doit() methods, or handed as possibly intermediate results to either the //! [Hub Delegate](client::Delegate), or the [Authenticator Delegate](https://docs.rs/yup-oauth2/*/yup_oauth2/trait.AuthenticatorDelegate.html). //! //! When delegates handle errors or intermediate values, they may have a chance to instruct the system to retry. This //! makes the system potentially resilient to all kinds of errors. //! //! ## Uploads and Downloads //! If a method supports downloads, the response body, which is part of the [Result](client::Result), should be //! read by you to obtain the media. //! If such a method also supports a [Response Result](client::ResponseResult), it will return that by default. //! You can see it as meta-data for the actual media. To trigger a media download, you will have to set up the builder by making //! this call: `.param("alt", "media")`. //! //! Methods supporting uploads can do so using up to 2 different protocols: //! *simple* and *resumable*. The distinctiveness of each is represented by customized //! `doit(...)` methods, which are then named `upload(...)` and `upload_resumable(...)` respectively. //! //! ## Customization and Callbacks //! //! You may alter the way an `doit()` method is called by providing a [delegate](client::Delegate) to the //! [Method Builder](client::CallBuilder) before making the final `doit()` call. //! Respective methods will be called to provide progress information, as well as determine whether the system should //! retry on failure. //! //! The [delegate trait](client::Delegate) is default-implemented, allowing you to customize it with minimal effort. //! //! ## Optional Parts in Server-Requests //! //! All structures provided by this library are made to be [encodable](client::RequestValue) and //! [decodable](client::ResponseResult) via *json*. Optionals are used to indicate that partial requests are responses //! are valid. //! Most optionals are are considered [Parts](client::Part) which are identifiable by name, which will be sent to //! the server to indicate either the set parts of the request or the desired parts in the response. //! //! ## Builder Arguments //! //! Using [method builders](client::CallBuilder), you are able to prepare an action call by repeatedly calling it's methods. //! These will always take a single argument, for which the following statements are true. //! //! * [PODs][wiki-pod] are handed by copy //! * strings are passed as `&str` //! * [request values](client::RequestValue) are moved //! //! Arguments will always be copied or cloned into the builder, to make them independent of their original life times. //! //! [wiki-pod]: http://en.wikipedia.org/wiki/Plain_old_data_structure //! [builder-pattern]: http://en.wikipedia.org/wiki/Builder_pattern //! [google-go-api]: https://github.com/google/google-api-go-client //! //! // Unused attributes happen thanks to defined, but unused structures // We don't warn about this, as depending on the API, some data structures or facilities are never used. // Instead of pre-determining this, we just disable the lint. It's manually tuned to not have any // unused imports in fully featured APIs. Same with unused_mut ... . #![allow(unused_imports, unused_mut, dead_code)] // DO NOT EDIT ! // This file was generated automatically from 'src/mako/api/lib.rs.mako' // DO NOT EDIT ! #[macro_use] extern crate serde_derive; extern crate hyper; extern crate serde; extern crate serde_json; extern crate yup_oauth2 as oauth2; extern crate mime; extern crate url; pub mod api; pub mod client; // Re-export the hub type and some basic client structs pub use api::PostmasterTools; pub use client::{Result, Error, Delegate};
44.840183
247
0.674949
753bbede84dec008a7435ba4896d1a595d508e84
509
//Entrypoint to the program use solana_program::{ pubkey::Pubkey, account_info::AccountInfo, entrypoint, entrypoint::ProgramResult, msg }; entrypoint!(process_instruction); fn process_instruction( program_id: &Pubkey, accounts: &[AccountInfo], instruction_data: &[u8] ) -> ProgramResult { msg!( "Process Instruction: {}: {} accounts, data = {:?}", program_id, accounts.len(), instruction_data ); Ok(()) }
19.576923
61
0.585462
18b22d11f018148aef5d527cc86ae6753a19b9ce
11,618
use mpstthree::binary::struct_trait::{end::End, recv::Recv, send::Send, session::Session}; use mpstthree::role::broadcast::RoleBroadcast; use mpstthree::role::end::RoleEnd; use mpstthree::{ bundle_struct_fork_close_multi, create_fn_choose_mpst_multi_to_all_bundle, create_multiple_normal_role_short, create_recv_http_session_bundle, create_send_mpst_http_bundle, offer_http_mpst, }; use hyper::Request; use rand::{thread_rng, Rng}; use std::error::Error; use std::marker; // See the folder scribble_protocols for the Scribble protocol // Create the new MeshedChannels for three participants and the close and fork functions bundle_struct_fork_close_multi!(close_mpst_multi, fork_mpst, MeshedChannelsThree, 3); // Create new roles // normal create_multiple_normal_role_short!(A, C, S); // Create new send functions // A create_send_mpst_http_bundle!( send_http_a_to_c, RoleC, 1 | send_http_a_to_s, RoleS, 2 | => RoleA, MeshedChannelsThree, 3 ); // C create_send_mpst_http_bundle!( send_http_c_to_a, RoleA, 1 | send_http_c_to_s, RoleS, 2 | => RoleC, MeshedChannelsThree, 3 ); // S create_send_mpst_http_bundle!( send_http_s_to_a, RoleA, 1 | send_http_s_to_c, RoleC, 2 | => RoleS, MeshedChannelsThree, 3 ); // Create new recv functions and related types // A create_recv_http_session_bundle!( recv_http_a_to_c, RoleC, 1 | recv_http_a_to_s, RoleS, 2 | => RoleA, MeshedChannelsThree, 3 ); // C create_recv_http_session_bundle!( recv_http_c_to_a, RoleA, 1 | recv_http_c_to_s, RoleS, 2 | => RoleC, MeshedChannelsThree, 3 ); // S create_recv_http_session_bundle!( recv_http_s_to_a, RoleA, 1 | recv_http_s_to_c, RoleC, 2 | => RoleS, MeshedChannelsThree, 3 ); // Names type NameA = RoleA<RoleEnd>; type NameC = RoleC<RoleEnd>; type NameS = RoleS<RoleEnd>; // Types // S type Choose2fromStoA<N> = Send<Branching2fromStoA<N>, End>; type Choose2fromStoC<N> = Send<Branching2fromStoC<N>, End>; type Choice0fromAtoS<N> = <Choose0fromAtoS<N> as Session>::Dual; type Choice1fromCtoS<N> = <Choose1fromCtoS<N> as Session>::Dual; // C type Choose1fromCtoA<N> = Send<Branching1fromCtoA<N>, End>; type Choose1fromCtoS<N> = Send<Branching1fromCtoS<N>, End>; type Choice0fromAtoC<N> = <Choose0fromAtoC<N> as Session>::Dual; type Choice2fromStoC<N> = <Choose2fromStoC<N> as Session>::Dual; // A type Choose0fromAtoC<N> = Send<Branching0fromAtoC<N>, End>; type Choose0fromAtoS<N> = Send<Branching0fromAtoS<N>, End>; type Choice1fromCtoA<N> = <Choose1fromCtoA<N> as Session>::Dual; type Choice2fromStoA<N> = <Choose2fromStoA<N> as Session>::Dual; // A type EndpointAAuth<N> = MeshedChannelsThree<Send<N, Choice1fromCtoA<N>>, End, RoleC<RoleC<RoleEnd>>, NameA>; type EndpointAAuthLoop<N> = MeshedChannelsThree<Choice1fromCtoA<N>, End, RoleC<RoleEnd>, NameA>; type EndpointADone<N> = MeshedChannelsThree<Send<N, End>, End, RoleC<RoleEnd>, NameA>; enum Branching1fromCtoA<N: marker::Send> { Continue( MeshedChannelsThree< End, Recv<N, Send<N, Choice2fromStoA<N>>>, RoleS<RoleS<RoleS<RoleEnd>>>, NameA, >, ), Close(MeshedChannelsThree<End, Recv<N, End>, RoleS<RoleEnd>, NameA>), } type EndpointAContinue<N> = MeshedChannelsThree<End, Choice2fromStoA<N>, RoleS<RoleEnd>, NameA>; enum Branching2fromStoA<N: marker::Send> { Picture(MeshedChannelsThree<Choice1fromCtoA<N>, End, RoleC<RoleEnd>, NameA>), Refusal(MeshedChannelsThree<Choice1fromCtoA<N>, End, RoleC<RoleEnd>, NameA>), } // C enum Branching0fromAtoC<N: marker::Send> { Auth( MeshedChannelsThree< Recv<N, Choose1fromCtoA<N>>, Choose1fromCtoS<N>, RoleA<RoleBroadcast>, NameC, >, ), Done(MeshedChannelsThree<Recv<N, End>, Send<N, End>, RoleA<RoleS<RoleEnd>>, NameC>), } type EndpointCContinue<N> = MeshedChannelsThree<End, Send<N, Choice2fromStoC<N>>, RoleS<RoleS<RoleEnd>>, NameC>; type EndpointCContinueLoop<N> = MeshedChannelsThree<Choose1fromCtoA<N>, Choose1fromCtoS<N>, RoleBroadcast, NameC>; type EndpointCDone<N> = MeshedChannelsThree<End, Send<N, End>, RoleS<RoleEnd>, NameC>; enum Branching2fromStoC<N: marker::Send> { Picture( MeshedChannelsThree< Choose1fromCtoA<N>, Recv<N, Choose1fromCtoS<N>>, RoleS<RoleBroadcast>, NameC, >, ), Refusal( MeshedChannelsThree< Choose1fromCtoA<N>, Recv<N, Choose1fromCtoS<N>>, RoleS<RoleBroadcast>, NameC, >, ), } type EndpointCPicture<N> = MeshedChannelsThree<End, Choice2fromStoC<N>, RoleS<RoleEnd>, NameC>; // S enum Branching0fromAtoS<N: marker::Send> { Auth(MeshedChannelsThree<End, Choice1fromCtoS<N>, RoleC<RoleEnd>, NameS>), Done(MeshedChannelsThree<End, Recv<N, End>, RoleC<RoleEnd>, NameS>), } type EndpointSContinue<N> = MeshedChannelsThree<End, Choice1fromCtoS<N>, RoleC<RoleEnd>, NameS>; enum Branching1fromCtoS<N: marker::Send> { Continue( MeshedChannelsThree< Send<N, Recv<N, Choose2fromStoA<N>>>, Recv<N, Choose2fromStoC<N>>, RoleC<RoleA<RoleA<RoleBroadcast>>>, NameS, >, ), Close(MeshedChannelsThree<Send<N, End>, Recv<N, End>, RoleC<RoleA<RoleEnd>>, NameS>), } type EndpointSContinueLoop<N> = MeshedChannelsThree<Choose2fromStoA<N>, Choose2fromStoC<N>, RoleBroadcast, NameS>; type EndpointSPicture<N> = MeshedChannelsThree<End, Send<N, Choice1fromCtoS<N>>, RoleC<RoleC<RoleEnd>>, NameS>; type EndpointSRefusal<N> = MeshedChannelsThree<End, Send<N, Choice1fromCtoS<N>>, RoleC<RoleC<RoleEnd>>, NameS>; // Creating the MP sessions // A type EndpointA<N> = MeshedChannelsThree< Recv<N, Choose0fromAtoC<N>>, Choose0fromAtoS<N>, RoleC<RoleBroadcast>, NameA, >; // C type EndpointC<N> = MeshedChannelsThree<Send<N, Choice0fromAtoC<N>>, End, RoleA<RoleA<RoleEnd>>, NameC>; // S type EndpointS<N> = MeshedChannelsThree<Choice0fromAtoS<N>, End, RoleA<RoleEnd>, NameS>; create_fn_choose_mpst_multi_to_all_bundle!( auth_from_a_to_all, again_from_a_to_all, => Auth, Done, => EndpointAAuth<i32>, EndpointADone<i32>, => Branching0fromAtoC::<i32>, Branching0fromAtoS::<i32>, => RoleC, RoleS, => RoleA, MeshedChannelsThree, 1 ); create_fn_choose_mpst_multi_to_all_bundle!( continue_from_c_to_all, close_from_c_to_all, => Continue, Close, => EndpointCContinue<i32>, EndpointCDone<i32>, => Branching1fromCtoA::<i32>, Branching1fromCtoS::<i32>, => RoleA, RoleS, => RoleC, MeshedChannelsThree, 2 ); create_fn_choose_mpst_multi_to_all_bundle!( picture_from_s_to_all, refusal_from_s_to_all, => Picture, Refusal, => EndpointSPicture<i32>, EndpointSRefusal<i32>, => Branching2fromStoA::<i32>, Branching2fromStoC::<i32>, => RoleA, RoleC, => RoleS, MeshedChannelsThree, 3 ); // Functions fn endpoint_a(s: EndpointA<i32>) -> Result<(), Box<dyn Error>> { let (pwd, s, _resp) = recv_http_a_to_c(s, true, Vec::new())?; // Should fail because true but Vec::new() let expected = thread_rng().gen_range(1..=3); if pwd == expected { let s = auth_from_a_to_all(s); let (s, _req) = send_http_a_to_c(0, s, false, Request::default())?; auth_a(s) } else { let s = again_from_a_to_all(s); let (s, _req) = send_http_a_to_c(1, s, false, Request::default())?; close_mpst_multi(s) } } fn auth_a(s: EndpointAAuthLoop<i32>) -> Result<(), Box<dyn Error>> { offer_http_mpst!(s, recv_http_a_to_c, { Branching1fromCtoA::Continue(s) => { let (_, s, _resp) = recv_http_a_to_s(s, false, Vec::new())?; let (s, _req) = send_http_a_to_s(0, s, false, Request::default())?; continue_a(s) }, Branching1fromCtoA::Close(s) => { let (_, s, _resp) = recv_http_a_to_s(s, false, Vec::new())?; close_mpst_multi(s) }, }) } fn continue_a(s: EndpointAContinue<i32>) -> Result<(), Box<dyn Error>> { offer_http_mpst!(s, recv_http_a_to_s, { Branching2fromStoA::Picture(s) => { auth_a(s) }, Branching2fromStoA::Refusal(s) => { auth_a(s) }, }) } fn endpoint_c(s: EndpointC<i32>) -> Result<(), Box<dyn Error>> { let (s, _req) = send_http_c_to_a(0, s, false, Request::default())?; offer_http_mpst!(s, recv_http_c_to_a, { Branching0fromAtoC::<i32>::Done(s) => { let (_quit, s, _resp) = recv_http_c_to_a(s, false, Vec::new())?; let (s, _req) = send_http_c_to_s(0, s, false, Request::default())?; close_mpst_multi(s) }, Branching0fromAtoC::<i32>::Auth(s) => { let (_quit, s, _resp) = recv_http_c_to_a(s, false, Vec::new())?; continue_c(s) }, }) } fn continue_c(s: EndpointCContinueLoop<i32>) -> Result<(), Box<dyn Error>> { let choice = thread_rng().gen_range(1..=6); if choice == 1 { let s = close_from_c_to_all(s); let (s, _req) = send_http_c_to_s(0, s, false, Request::default())?; close_mpst_multi(s) } else { let s = continue_from_c_to_all(s); let (s, _req) = send_http_c_to_s(0, s, false, Request::default())?; picture_c(s) } } fn picture_c(s: EndpointCPicture<i32>) -> Result<(), Box<dyn Error>> { offer_http_mpst!(s, recv_http_c_to_s, { Branching2fromStoC::<i32>::Picture(s) => { let (_quit, s, _resp) = recv_http_c_to_s(s, false, Vec::new())?; continue_c(s) }, Branching2fromStoC::<i32>::Refusal(s) => { let (_quit, s, _resp) = recv_http_c_to_s(s, false, Vec::new())?; continue_c(s) }, }) } fn endpoint_s(s: EndpointS<i32>) -> Result<(), Box<dyn Error>> { offer_http_mpst!(s, recv_http_s_to_a, { Branching0fromAtoS::<i32>::Done(s) => { let (_quit, s, _resp) = recv_http_s_to_c(s, false, Vec::new())?; close_mpst_multi(s) }, Branching0fromAtoS::<i32>::Auth(s) => { continue_s(s) }, }) } fn continue_s(s: EndpointSContinue<i32>) -> Result<(), Box<dyn Error>> { offer_http_mpst!(s, recv_http_s_to_c, { Branching1fromCtoS::<i32>::Continue(s) => { let (_quit, s, _resp) = recv_http_s_to_c(s, false, Vec::new())?; let (s, _req) = send_http_s_to_a(0, s, false, Request::default())?; let (_quit, s, _resp) = recv_http_s_to_a(s, false, Vec::new())?; picture_s(s) }, Branching1fromCtoS::<i32>::Close(s) => { let (_quit, s, _resp) = recv_http_s_to_c(s, false, Vec::new())?; let (s, _req) = send_http_s_to_a(0, s, false, Request::default())?; close_mpst_multi(s) }, }) } fn picture_s(s: EndpointSContinueLoop<i32>) -> Result<(), Box<dyn Error>> { let choice = thread_rng().gen_range(1..=6); if choice == 1 { let s = refusal_from_s_to_all(s); let (s, _req) = send_http_s_to_c(0, s, false, Request::default())?; continue_s(s) } else { let s = picture_from_s_to_all(s); let (s, _req) = send_http_s_to_c(0, s, false, Request::default())?; continue_s(s) } } ///////////////////////// pub fn main() { let (thread_a, thread_c, thread_s) = fork_mpst(endpoint_a, endpoint_c, endpoint_s); assert!(thread_a.join().is_err()); assert!(thread_c.join().is_err()); assert!(thread_s.join().is_err()); }
30.020672
108
0.638922
281621386a70ef230c23be78472134c577452d96
12,028
use bit_array::BitArray; use typenum::U128; fn state2unocc(state: &Vec<isize>, M: isize) -> Vec<isize> { let mut stateunocc = Vec::new(); for p in 1..M + 1 { if !state.iter().any(|&i| i == p) { stateunocc.push(p) } } stateunocc } pub fn bit_slaterdeterminants( excitation: String, n: usize, m: usize, truncation: usize, ) -> Vec<BitArray<u64, U128>> { fn creatinitialstate(excite: String, n: usize) -> Vec<Vec<isize>> { let mut stateout = Vec::new(); if excite == "Singlet" { if n % 2 == 0 { let mut stateup = Vec::new(); let mut statedown = Vec::new(); for i in num_iter::range(0, n / 2) { stateup.push((i + 1) as isize); statedown.push((i + 1) as isize); } stateout.push(stateup); stateout.push(statedown); return stateout; } else { let mut stateup = Vec::new(); let mut statedown = Vec::new(); for i in num_iter::range(0, n / 2) { stateup.push((i + 1) as isize); statedown.push((i + 1) as isize); } stateup.push((n / 2 + 1) as isize); stateout.push(stateup); stateout.push(statedown); return stateout; } } if excite == "Triplet" { //Left for Future! if n % 2 == 0 { let mut stateup = Vec::new(); let mut statedown = Vec::new(); for i in num_iter::range(0, n / 2) { stateup.push((i + 1) as isize); statedown.push((i + 1) as isize); } stateout.push(stateup); stateout.push(statedown); return stateout; } else { let mut stateup = Vec::new(); let mut statedown = Vec::new(); for i in num_iter::range(0, n / 2) { stateup.push((i + 1) as isize); statedown.push((i + 1) as isize); } stateup.push((n / 2 + 1) as isize); stateout.push(stateup); stateout.push(statedown); return stateout; } } stateout } fn odometer(state: Vec<isize>, n: isize, m: isize) -> Vec<isize> { let mut newstate = state; for j in num_iter::range_step(n - 1, -1, -1) { if newstate[j as usize] < m + 1 - n + j { let l = newstate[j as usize]; for k in num_iter::range(j, n) { newstate[k as usize] = l + 1 + k - j; } if newstate[j as usize] != l { return newstate; } } } newstate.iter_mut().for_each(|x| *x = 0); newstate } fn compare(state: Vec<isize>, ground: Vec<isize>) -> usize { let mut numberofexited = 0; for i in &state { if !ground.contains(&i) { numberofexited += 1; } } numberofexited } fn createbinarystatearray(state: Vec<isize>) -> BitArray<u64, U128> { let mut binstate = BitArray::<u64, U128>::from_elem(false); for i in state { let k: usize = (i - 1) as usize; binstate.set(k, true); } binstate } fn mix(state1: Vec<isize>, state2: Vec<isize>) -> Vec<isize> { let mut state = Vec::new(); for i in state1 { state.push(2 * i - 1); } for i in state2 { state.push(2 * i); } state } fn createslaterdeterminants_t( n: usize, m: usize, excite: String, t: usize, ) -> Vec<BitArray<u64, U128>> { let mut binstates = Vec::new(); let N: usize; if n % 2 == 0 { N = n / 2; } else { N = n / 2 + 1; } let mut stateup = creatinitialstate(excite.to_string(), n as usize)[0].clone(); let mut statedown = creatinitialstate(excite.to_string(), n as usize)[1].clone(); let mut statesup = Vec::new(); statesup.push(stateup.clone()); let mut statesdown = Vec::new(); statesdown.push(statedown.clone()); let mut up = true; let mut down = true; let ground = mix(statesup[0].to_vec(), statesdown[0].to_vec()); match t { 0 => { while up { stateup = odometer(stateup, N as isize, m as isize); let sm: isize = stateup.iter().sum(); if sm == 0 { up = false; } else { statesup.push(stateup.clone()); } } while down { statedown = odometer(statedown, (n / 2) as isize, m as isize); let sm: isize = statedown.iter().sum(); if sm == 0 { down = false; } else { statesdown.push(statedown.clone()); } } for i in statesup { for j in &statesdown { let state = mix(i.to_vec(), j.to_vec()); let binstate = createbinarystatearray(state); binstates.push(binstate); } } binstates } 1 => { for i in &stateup { for j in state2unocc(&stateup, m as isize) { let mut state = stateup.clone(); let stateD = statedown.clone(); state.retain(|&x| x != *i); state.push(j); let statemix = mix(state, stateD); let binstate = createbinarystatearray(statemix); binstates.push(binstate) } } for k in &statedown { for l in state2unocc(&statedown, m as isize) { let mut state = statedown.clone(); let stateU = stateup.clone(); state.retain(|&x| x != *k); state.push(l); let statemix = mix(stateU, state); let binstate = createbinarystatearray(statemix); binstates.push(binstate) } } binstates.push(createbinarystatearray(mix(stateup, statedown))); binstates } 2 => { for i in &stateup { for j in state2unocc(&stateup, m as isize) { let mut state = stateup.clone(); let stateD = statedown.clone(); state.retain(|&x| x != *i); state.push(j); let statemix = mix(state, stateD); let binstate = createbinarystatearray(statemix); binstates.push(binstate) } } for k in &statedown { for l in state2unocc(&statedown, m as isize) { let mut state = statedown.clone(); let stateU = stateup.clone(); state.retain(|&x| x != *k); state.push(l); let statemix = mix(stateU, state); let binstate = createbinarystatearray(statemix); binstates.push(binstate) } } for i in &stateup { for j in state2unocc(&stateup, m as isize) { for k in &statedown { for l in state2unocc(&statedown, m as isize) { let mut stateU = stateup.clone(); let mut stateD = statedown.clone(); stateU.retain(|&x| x != *i); stateU.push(j); stateD.retain(|&x| x != *k); stateD.push(l); let mut state = mix(stateU, stateD).clone(); let binstate = createbinarystatearray(state); binstates.push(binstate) } } } } let combup = combination::combine::from_vec_at(&stateup, 2); let combupunocc = combination::combine::from_vec_at(&state2unocc(&stateup, m as isize), 2); for i in combup { for j in &combupunocc { let mut state = stateup.clone(); let stateD = statedown.clone(); state.retain(|&x| x != i[0]); state.push(j[0]); state.retain(|&x| x != i[1]); state.push(j[1]); let statemix = mix(state, stateD); let binstate = createbinarystatearray(statemix); binstates.push(binstate) } } let combdown = combination::combine::from_vec_at(&statedown, 2); let combdownunocc = combination::combine::from_vec_at(&state2unocc(&statedown, m as isize), 2); for i in combdown { for j in &combdownunocc { let mut state = statedown.clone(); let stateU = stateup.clone(); state.retain(|&x| x != i[0]); state.push(j[0]); state.retain(|&x| x != i[1]); state.push(j[1]); let statemix = mix(stateU, state); let binstate = createbinarystatearray(statemix); binstates.push(binstate) } } binstates.push(createbinarystatearray(mix(stateup, statedown))); binstates } _ => { while up { stateup = odometer(stateup, N as isize, m as isize); let sm: isize = stateup.iter().sum(); if sm == 0 { up = false; } else if compare(stateup.clone(), ground.clone()) < t + 1 { statesup.push(stateup.clone()); } } while down { statedown = odometer(statedown, (n / 2) as isize, m as isize); let sm: isize = statedown.iter().sum(); if sm == 0 { down = false; } else if compare(statedown.clone(), ground.clone()) < t + 1 { statesdown.push(statedown.clone()); } } for i in statesup { for j in &statesdown { let state = mix(i.to_vec(), j.to_vec()); if compare(state.clone(), ground.clone()) < t + 1 { let binstate = createbinarystatearray(state); binstates.push(binstate); } } } binstates } } } let excite = excitation; createslaterdeterminants_t(n, m, excite, truncation) }
38.305732
95
0.403808
e6a313ae0d9c3e9b67e25148aed4d5db4950baf9
4,214
use std::{ convert::TryFrom, ffi::OsStr, fmt::Display, path::{Path, PathBuf}, }; use CompressionFormat::*; use crate::utils; /// Represents the extension of a file, but only really caring about /// compression formats (and .tar). /// Ex.: Extension::new("file.tar.gz") == Extension { first_ext: Some(Tar), second_ext: Gzip } #[derive(Clone, Debug, PartialEq, Eq)] pub struct Extension { pub first_ext: Option<CompressionFormat>, pub second_ext: CompressionFormat, } pub fn get_extension_from_filename(file_name: &OsStr) -> Option<(&OsStr, &OsStr)> { let path = Path::new(file_name); let ext = path.extension()?; let previous_extension = path.file_stem().and_then(get_extension_from_filename); if let Some((_, prev)) = previous_extension { Some((prev, ext)) } else { Some((OsStr::new(""), ext)) } } impl From<CompressionFormat> for Extension { fn from(second_ext: CompressionFormat) -> Self { Self { first_ext: None, second_ext } } } impl Extension { pub fn from(file_name: &OsStr) -> crate::Result<Self> { let compression_format_from = |ext: &OsStr| match ext { _ if ext == "zip" => Ok(Zip), _ if ext == "tar" => Ok(Tar), _ if ext == "gz" => Ok(Gzip), _ if ext == "bz" || ext == "bz2" => Ok(Bzip), _ if ext == "xz" || ext == "lz" || ext == "lzma" => Ok(Lzma), other => Err(crate::Error::UnknownExtensionError(utils::to_utf(other))), }; let (first_ext, second_ext) = match get_extension_from_filename(&file_name) { Some(extension_tuple) => match extension_tuple { (os_str, snd) if os_str.is_empty() => (None, snd), (fst, snd) => (Some(fst), snd), }, None => return Err(crate::Error::MissingExtensionError(PathBuf::from(file_name))), }; let (first_ext, second_ext) = match (first_ext, second_ext) { (None, snd) => { let ext = compression_format_from(snd)?; (None, ext) }, (Some(fst), snd) => { let snd = compression_format_from(snd)?; let fst = compression_format_from(fst).ok(); (fst, snd) }, }; Ok(Self { first_ext, second_ext }) } } #[derive(Clone, PartialEq, Eq, Debug)] /// Accepted extensions for input and output pub enum CompressionFormat { Gzip, // .gz Bzip, // .bz Lzma, // .lzma Tar, // .tar (technically not a compression extension, but will do for now) Zip, // .zip } fn extension_from_os_str(ext: &OsStr) -> Result<CompressionFormat, crate::Error> { // let ext = Path::new(ext); let ext = match ext.to_str() { Some(str) => str, None => return Err(crate::Error::InvalidUnicode), }; match ext { "zip" => Ok(Zip), "tar" => Ok(Tar), "gz" => Ok(Gzip), "bz" | "bz2" => Ok(Bzip), "xz" | "lzma" | "lz" => Ok(Lzma), other => Err(crate::Error::UnknownExtensionError(other.into())), } } impl TryFrom<&PathBuf> for CompressionFormat { type Error = crate::Error; fn try_from(ext: &PathBuf) -> Result<Self, Self::Error> { let ext = match ext.extension() { Some(ext) => ext, None => { return Err(crate::Error::MissingExtensionError(PathBuf::new())); }, }; extension_from_os_str(ext) } } impl TryFrom<&str> for CompressionFormat { type Error = crate::Error; fn try_from(file_name: &str) -> Result<Self, Self::Error> { let file_name = Path::new(file_name); let ext = match file_name.extension() { Some(ext) => ext, None => return Err(crate::Error::MissingExtensionError(PathBuf::new())), }; extension_from_os_str(ext) } } impl Display for CompressionFormat { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", match self { Gzip => ".gz", Bzip => ".bz", Lzma => ".lz", Tar => ".tar", Zip => ".zip", }) } }
29.468531
94
0.546512
5b391bc8d5958cbc71a4cfcc64b21b297101ccce
432
fn main() { let pair = (0, -2); // TODO ^ `pair`に別の値を入れてみましょう。 println!("Tell me about {:?}", pair); // `match`を用いてタプルをデストラクトしてみましょう。 match pair { // 2つ目の値をデストラクト (0, y) => println!("First is `0` and `y` is `{:?}`", y), (x, 0) => println!("`x` is `{:?}` and last is `0`", x), _ => println!("It doesn't matter what they are"), // ここでは`_`は、値を変数に束縛しないことを意味します。 } }
28.8
64
0.483796
56fd035d0ab464ba10633d1a0e92d8d95d7aeac3
61
pub mod config; pub mod instrument; #[cfg(test)] mod tests;
10.166667
19
0.704918
0141d2a20cd4d20a830aff8b2c113b6f40299879
146
pub mod add; pub mod cameras; pub mod debug_settings; pub mod diagnostics; pub mod hierarchy; pub mod inspector; pub mod scenes; pub mod windows;
16.222222
23
0.780822
5b03541a1171cdcc4b385af7e33cda9521975e18
2,731
use crate::types::*; use crate::errors::*; use uuid::Uuid; /// Represents a basic group of 0-200 users (must be upgraded to a supergroup to accommodate more than 200 users) #[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct BasicGroup { #[doc(hidden)] #[serde(rename(serialize = "@type", deserialize = "@type"))] td_name: String, #[doc(hidden)] #[serde(rename(serialize = "@extra", deserialize = "@extra"))] extra: Option<String>, /// Group identifier id: i64, /// Number of members in the group member_count: i64, /// Status of the current user in the group status: ChatMemberStatus, /// True, if the group is active is_active: bool, /// Identifier of the supergroup to which this group was upgraded; 0 if none upgraded_to_supergroup_id: i64, } impl RObject for BasicGroup { #[doc(hidden)] fn td_name(&self) -> &'static str { "basicGroup" } #[doc(hidden)] fn extra(&self) -> Option<String> { self.extra.clone() } fn to_json(&self) -> RTDResult<String> { Ok(serde_json::to_string(self)?) } } impl BasicGroup { pub fn from_json<S: AsRef<str>>(json: S) -> RTDResult<Self> { Ok(serde_json::from_str(json.as_ref())?) } pub fn builder() -> RTDBasicGroupBuilder { let mut inner = BasicGroup::default(); inner.td_name = "basicGroup".to_string(); inner.extra = Some(Uuid::new_v4().to_string()); RTDBasicGroupBuilder { inner } } pub fn id(&self) -> i64 { self.id } pub fn member_count(&self) -> i64 { self.member_count } pub fn status(&self) -> &ChatMemberStatus { &self.status } pub fn is_active(&self) -> bool { self.is_active } pub fn upgraded_to_supergroup_id(&self) -> i64 { self.upgraded_to_supergroup_id } } #[doc(hidden)] pub struct RTDBasicGroupBuilder { inner: BasicGroup } impl RTDBasicGroupBuilder { pub fn build(&self) -> BasicGroup { self.inner.clone() } pub fn id(&mut self, id: i64) -> &mut Self { self.inner.id = id; self } pub fn member_count(&mut self, member_count: i64) -> &mut Self { self.inner.member_count = member_count; self } pub fn status<T: AsRef<ChatMemberStatus>>(&mut self, status: T) -> &mut Self { self.inner.status = status.as_ref().clone(); self } pub fn is_active(&mut self, is_active: bool) -> &mut Self { self.inner.is_active = is_active; self } pub fn upgraded_to_supergroup_id(&mut self, upgraded_to_supergroup_id: i64) -> &mut Self { self.inner.upgraded_to_supergroup_id = upgraded_to_supergroup_id; self } } impl AsRef<BasicGroup> for BasicGroup { fn as_ref(&self) -> &BasicGroup { self } } impl AsRef<BasicGroup> for RTDBasicGroupBuilder { fn as_ref(&self) -> &BasicGroup { &self.inner } }
24.827273
113
0.666789
14e003788469860bee538052bca30a52c53cc016
906
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(slice_patterns)] fn main() { let mut x: &[_] = &[1, 2, 3, 4]; let mut result = vec![]; loop { x = match *x { [1, n, 3, ref rest..] => { result.push(n); rest } [n, ref rest..] => { result.push(n); rest } [] => break } } assert_eq!(result, [2, 4]); }
26.647059
68
0.533113
216f5a87b7b33781240fd27cdc9fc759020cb6e7
2,871
use crate::common::*; use datamodel::{common::ScalarType, DefaultValue}; use prisma_value::PrismaValue; #[test] fn skipping_of_env_vars() { let dml = r#" datasource db { provider = "postgresql" url = env("POSTGRES_URL") } model User { id Int @id tags String[] } "#; // must fail without env var parse_error(dml); // must not fail when ignore flag is set if let Err(err) = datamodel::parse_datamodel_and_ignore_datasource_urls(dml) { panic!("Skipping env var errors did not work. Error was {:?}", err) } // must not fail with invalid env var set and ignore flag is set std::env::set_var("POSTGRES_URL", "mysql://"); // wrong protocol if let Err(err) = datamodel::parse_datamodel_and_ignore_datasource_urls(dml) { panic!("Skipping env var errors did not work. Error was {:?}", err) } // must not fail with correct env var set std::env::set_var("POSTGRES_URL", "postgresql://localhost:5432"); parse(dml); } #[ignore] #[test] fn interpolate_environment_variables() { let dml = r#" model User { id Int @id firstName String @default(env("TEST_USER")) lastName String } "#; std::env::set_var("TEST_USER", "prisma-user"); let schema = parse(dml); let user_model = schema.assert_has_model("User"); user_model.assert_is_embedded(false); user_model .assert_has_field("firstName") .assert_base_type(&ScalarType::String) .assert_default_value(DefaultValue::Single(PrismaValue::String(String::from("prisma-user")))); } // This is very useless, except being a good test case. #[ignore] #[test] fn interpolate_nested_environment_variables() { let dml = r#" model User { id Int @id firstName String @default(env(env("TEST_USER_VAR"))) lastName String } "#; std::env::set_var("TEST_USER_VAR", "TEST_USER"); std::env::set_var("TEST_USER", "prisma-user"); let schema = parse(dml); let user_model = schema.assert_has_model("User"); user_model.assert_is_embedded(false); user_model .assert_has_field("firstName") .assert_base_type(&ScalarType::String) .assert_default_value(DefaultValue::Single(PrismaValue::String(String::from("prisma-user")))); } #[ignore] #[test] fn ducktype_environment_variables() { let dml = r#" model User { id Int @id age Int @default(env("USER_AGE")) name String } "#; std::env::set_var("USER_AGE", "18"); let schema = parse(dml); let user_model = schema.assert_has_model("User"); user_model.assert_is_embedded(false); user_model .assert_has_field("age") .assert_base_type(&ScalarType::Int) .assert_default_value(DefaultValue::Single(PrismaValue::Int(18))); }
27.342857
102
0.635319
87a25e92ead1da7a55f15c198473ab56f1bb1c21
4,139
extern crate wasm_bindgen; extern crate web_sys; use wasm_bindgen::prelude::*; use wasm_bindgen::JsCast; use web_sys::Node; #[wasm_bindgen(raw_module = "../globals.js")] extern "wasm-bindgen" { #[wasm_bindgen(js_name = jsthunk)] fn js_thunk(); #[wasm_bindgen(js_name = add)] fn js_add(a: i32, b: i32) -> i32; pub type Foo; #[wasm_bindgen(method, final, js_name = bar)] fn bar_final(this: &Foo); #[wasm_bindgen(method, structural, js_name = bar)] fn bar_structural(this: &Foo); #[wasm_bindgen(js_name = jsthunk)] fn doesnt_throw(); #[wasm_bindgen(catch, js_name = jsthunk)] fn doesnt_throw_catch() -> Result<(), JsValue>; } #[wasm_bindgen] pub fn call_js_thunk_n_times(n: usize) { for _ in 0..n { js_thunk(); } } #[wasm_bindgen] pub fn call_js_add_n_times(n: usize, a: i32, b: i32) { for _ in 0..n { js_add(a, b); } } #[wasm_bindgen] pub fn thunk() {} #[wasm_bindgen] pub fn add(a: i32, b: i32) -> i32 { a + b } static mut FIB_HIGH: i32 = 0; #[wasm_bindgen] pub fn fibonacci(n: i32) -> i32 { let mut a = 1u64; let mut b = 1; for _ in 0..n { let tmp = b; b += a; a = tmp; } unsafe { FIB_HIGH = (a >> 32) as i32; } return a as i32; } #[wasm_bindgen] pub fn fibonacci_high() -> i32 { unsafe { FIB_HIGH } } #[wasm_bindgen] pub fn call_foo_bar_final_n_times(n: usize, foo: &Foo) { for _ in 0..n { foo.bar_final(); } } #[wasm_bindgen] pub fn call_foo_bar_structural_n_times(n: usize, foo: &Foo) { for _ in 0..n { foo.bar_structural(); } } #[wasm_bindgen] pub fn call_doesnt_throw_n_times(n: usize) { for _ in 0..n { doesnt_throw(); } } #[wasm_bindgen] pub fn call_doesnt_throw_with_catch_n_times(n: usize) { for _ in 0..n { if let Err(e) = doesnt_throw_catch() { wasm_bindgen::throw_val(e); } } } #[wasm_bindgen] extern "wasm-bindgen" { pub type Element; #[wasm_bindgen(method, js_name = firstChild, final, getter)] fn first_child_final(this: &Element) -> Element; #[wasm_bindgen(method, js_name = firstChild, structural, getter)] fn first_child_structural(this: &Element) -> Element; } #[wasm_bindgen] pub fn call_first_child_final_n_times(n: usize, element: &Element) { for _ in 0..n { drop(element.first_child_final()); } } #[wasm_bindgen] pub fn call_first_child_structural_n_times(n: usize, element: &Element) { for _ in 0..n { drop(element.first_child_structural()); } } #[wasm_bindgen] pub fn call_node_first_child_n_times(n: usize, elements: Vec<JsValue>) { for _ in 0..n { for element in elements.iter() { let element = element.unchecked_ref::<Node>(); assert!(element.first_child().is_some()); } } } #[wasm_bindgen] pub fn call_node_node_type_n_times(n: usize, elements: Vec<JsValue>) { for _ in 0..n { for element in elements.iter() { let element = element.unchecked_ref::<Node>(); assert!(element.node_type() != 100); } } } #[wasm_bindgen] pub fn call_node_has_child_nodes_n_times(n: usize, elements: Vec<JsValue>) { for _ in 0..n { for element in elements.iter() { let element = element.unchecked_ref::<Node>(); assert!(element.has_child_nodes()); } } } #[wasm_bindgen] pub fn count_node_types(element: Node) { let mut count = Vec::new(); count_node_types(element, &mut count); fn count_node_types(mut element: Node, count: &mut Vec<u32>) { loop { let t = element.node_type(); if t as usize >= count.len() { count.resize(t as usize + 1, 0); } count[t as usize] += 1; if let Some(s) = element.first_child() { count_node_types(s, count); } match element.next_sibling() { Some(s) => element = s, None => break, } } } } #[wasm_bindgen] pub fn str_roundtrip(s: String) -> String { s }
22.741758
76
0.588065
d9fe61f4cb0641662970b512426a805220146957
24,890
#![allow(dead_code)] #![allow(non_snake_case)] #![cfg_attr(test, allow(dead_code))] #![deny(deprecated)] #[cfg(not(test))] pub use base::{Node, ParseState, Data, Children, NodeContents, PreOrderNodes}; #[macro_use] mod base { pub use self::not::NotEx; pub use self::and::And; pub use self::fuse::Fuse; pub use self::char_class::CharClass; pub use self::literal::Literal; pub use self::dot::Dot; pub use self::option::OptionEx; pub use self::star::Star; pub use self::plus::Plus; pub use self::or::Or; pub use self::sequence::Sequence; pub use self::wrap::WrapEx; pub use self::node::{Node, NodeContents, Data, Children, PreOrderNodes}; mod node { use std::fmt; use std::str; pub use self::NodeContents::{Data, Children}; static NO_NAME : &'static str = "<none>"; pub struct PreOrderNodes<'a, 'b:'a> { queue: Vec<&'a Node<'b>> } impl<'a, 'b:'a> Iterator for PreOrderNodes<'a, 'b> { type Item = &'a Node<'b>; fn next( &mut self ) -> Option<&'a Node<'b>> { match self.queue.pop() { Some( node ) => { match node.contents { Children( ref x ) => { for child in x.iter().rev() { self.queue.push( child ) } } _ => () }; Some( node ) } _ => None } } } #[derive(Debug, PartialEq)] pub enum NodeContents<'a> { /// A `&[u8]` byte slice this node matched in the parse input. Only leaf nodes /// have `Data` contents. Data( &'a [u8] ), /// Children of the node, if any. Only non-leaf nodes have `Children` /// contents. Children( Vec<Node<'a>> ) } #[derive(PartialEq)] pub struct Node<'a> { /// The name of the node. pub name: &'static str, /// The (inclusive) start index of the range this node matches. It's the byte /// (NOT char) offset of the parse input. pub start: usize, /// The (exclusive) end index of the range this node matches. It's the byte /// (NOT char) offset of the parse input. pub end: usize, /// The contents of the node; this can be either children nodes or a matched /// `&[u8]` slice. pub contents: NodeContents<'a> } fn indent( formatter: &mut fmt::Formatter, indent_spaces: u32 ) -> fmt::Result { for _ in 0 .. indent_spaces { try!( write!( formatter, " " ) ) } Ok(()) } impl<'a> Node<'a> { fn format( &self, formatter: &mut fmt::Formatter, indent_spaces: u32 ) -> fmt::Result { try!( indent( formatter, indent_spaces ) ); try!( write!( formatter, "{0:?} [{1:?}, {2:?}>", self.displayName(), self.start, self.end ) ); match self.contents { Data( data ) => { match str::from_utf8( data ) { Ok( string ) => { try!( writeln!( formatter, ": \"{0:?}\"", string ) ); } _ => { try!( writeln!( formatter, ": \"{0:?}\"", data ) ); } } } Children( ref children ) => { try!( writeln!( formatter, "" ) ); for child in children.iter() { try!( child.format( formatter, indent_spaces + 1) ) } } }; Ok(()) } /// The node name if set, or "<none>" if unset. pub fn displayName( &self ) -> &'static str { if !self.name.is_empty() { self.name } else { NO_NAME } } /// Creates a `Node` with an empty name. pub fn withoutName( start: usize, end: usize, contents: NodeContents<'a> ) -> Node<'a> { Node { name: "", start: start, end: end, contents: contents } } /// Creates a `Node` with the provided `name` and makes it a parent of the /// provided `children`. pub fn withChildren( name: &'static str, mut children: Vec<Node<'a>> ) -> Node<'a> { if children.len() == 1 && children[ 0 ].name.is_empty() { match children.pop() { Some( mut child ) => { child.name = name; return child; } _ => () } } let start = if children.len() != 0 { children[ 0 ].start } else { 0 }; let end = children.last().map_or( 0, |node| node.end ); Node { name: name, start: start, end: end, contents: Children( children ) } } /// Traverses the tree rooted at the node with pre-order traversal. Includes /// the `self` node as the first node. #[allow(dead_code)] pub fn preOrder<'b>( &'b self ) -> PreOrderNodes<'b, 'a> { PreOrderNodes { queue: vec!( self ) } } /// Concatenates and returns all `&[u8]` data in the leaf nodes beneath /// the current node. #[allow(dead_code)] pub fn matchedData( &self ) -> Vec<u8> { match self.contents { Data( x ) => x.to_vec(), Children( ref children ) => { let mut out : Vec<u8> = vec!(); for child in children.iter() { out.extend( child.matchedData() ); } out } } } } impl<'a> fmt::Debug for Node<'a> { fn fmt( &self, formatter: &mut fmt::Formatter ) -> fmt::Result { self.format( formatter, 0 ) } } } #[cfg(test)] #[macro_use] pub mod test_utils { use base::ParseState; pub fn ToParseState<'a>( bytes: &'a [u8] ) -> ParseState<'a> { ParseState { input: bytes, offset: 0 } } macro_rules! input_state( ( $ex:expr ) => ( { use base::ParseState; ParseState { input: $ex.as_bytes(), offset: 0 } } ) ); } #[macro_use] mod literal { use super::{Expression, ParseState, ParseResult}; macro_rules! lit( ( $ex:expr ) => ( &base::Literal::new( $ex.as_bytes() ) ) ); pub struct Literal { text: &'static [u8] } impl Literal { pub fn new( text: &'static [u8] ) -> Literal { Literal { text: text } } } impl Expression for Literal { fn apply<'a>( &self, parse_state: &ParseState<'a> ) -> Option< ParseResult<'a> > { if parse_state.input.len() < self.text.len() || &parse_state.input[ .. self.text.len() ] != self.text { return None; } parse_state.offsetToResult( parse_state.offset + self.text.len() ) } } } #[macro_use] mod char_class { use base::unicode::{bytesFollowing, readCodepoint}; use super::{Expression, ParseState, ParseResult}; macro_rules! class( ( $ex:expr ) => ( &base::CharClass::new( $ex.as_bytes() ) ) ); fn toU32Vector( input: &[u8] ) -> Vec<u32> { let mut i = 0; let mut out_vec : Vec<u32> = vec!(); loop { match input.get( i ) { Some( byte ) => match bytesFollowing( *byte ) { Some( num_following ) => { if num_following > 0 { match readCodepoint( &input[ i.. ] ) { Some( ch ) => { out_vec.push( ch as u32 ); i += num_following + 1 } _ => { out_vec.push( *byte as u32 ); i += 1 } }; } else { out_vec.push( *byte as u32 ); i += 1 } } _ => { out_vec.push( *byte as u32 ); i += 1 } }, _ => return out_vec } } } pub struct CharClass { single_chars: Vec<u32>, ranges: Vec<( u32, u32 )> } impl CharClass { pub fn new( contents: &[u8] ) -> CharClass { fn rangeAtIndex( index: usize, chars: &[u32] ) -> Option<( u32, u32 )> { match ( chars.get( index ), chars.get( index + 1 ), chars.get( index + 2 ) ) { ( Some( char1 ), Some( char2 ), Some( char3 ) ) if *char2 == '-' as u32 => Some( ( *char1, *char3 ) ), _ => None } } let chars = toU32Vector( &contents ); let mut char_class = CharClass { single_chars: Vec::new(), ranges: Vec::new() }; let mut index = 0; loop { match rangeAtIndex( index, &chars ) { Some( range ) => { char_class.ranges.push( range ); index += 3; } _ => { if index >= chars.len() { break } char_class.single_chars.push( chars[ index ] ); index += 1; } }; } char_class } fn matches( &self, character: u32 ) -> bool { return self.single_chars.contains( &character ) || self.ranges.iter().any( | &(from, to) | character >= from && character <= to ); } fn applyToUtf8<'a>( &self, parse_state: &ParseState<'a> ) -> Option< ParseResult<'a> > { match readCodepoint( parse_state.input ) { Some( ch ) if self.matches( ch as u32 ) => { let num_following = bytesFollowing( parse_state.input[ 0 ] ).unwrap(); parse_state.offsetToResult( parse_state.offset + num_following + 1 ) } _ => None } } fn applyToBytes<'a>( &self, parse_state: &ParseState<'a> ) -> Option< ParseResult<'a> > { match parse_state.input.get( 0 ) { Some( byte ) if self.matches( *byte as u32 ) => { parse_state.offsetToResult( parse_state.offset + 1 ) } _ => None } } } impl Expression for CharClass { fn apply<'a>( &self, parse_state: &ParseState<'a> ) -> Option< ParseResult<'a> > { self.applyToUtf8( parse_state ).or( self.applyToBytes( parse_state ) ) } } } #[macro_use] mod not { use super::{Expression, ParseState, ParseResult}; macro_rules! not( ( $ex:expr ) => ( &base::NotEx::new($ex) ); ); pub struct NotEx<'a> { expr: &'a ( Expression + 'a ) } impl<'a> NotEx<'a> { pub fn new( expr: &Expression ) -> NotEx { NotEx { expr: expr } } } impl<'b> Expression for NotEx<'b> { fn apply<'a>( &self, parse_state: &ParseState<'a> ) -> Option< ParseResult<'a> > { match self.expr.apply( parse_state ) { Some( _ ) => None, _ => Some( ParseResult::fromParseState( *parse_state ) ) } } } } #[macro_use] mod and { use super::{Expression, ParseState, ParseResult}; macro_rules! and( ( $ex:expr ) => ( &base::And::new( $ex ) ); ); pub struct And<'a> { expr: &'a ( Expression + 'a ) } impl<'a> And<'a> { pub fn new( expr: &Expression ) -> And { And { expr: expr } } } impl<'b> Expression for And<'b> { fn apply<'a>( &self, parse_state: &ParseState<'a> ) -> Option< ParseResult<'a> > { match self.expr.apply( parse_state ) { Some( _ ) => Some( ParseResult::fromParseState( *parse_state ) ), _ => None } } } } mod dot { use super::{Expression, ParseState, ParseResult}; use base::unicode::{bytesFollowing, readCodepoint}; pub struct Dot; impl Expression for Dot { fn apply<'a>( &self, parse_state: &ParseState<'a> ) -> Option< ParseResult<'a> > { match readCodepoint( parse_state.input ) { Some( _ ) => { let num_following = bytesFollowing( parse_state.input[ 0 ] ).unwrap(); return parse_state.offsetToResult( parse_state.offset + num_following + 1 ) } _ => () } match parse_state.input.get( 0 ) { Some( _ ) => parse_state.offsetToResult( parse_state.offset + 1 ), _ => None } } } } #[macro_use] mod option { use super::{Expression, ParseState, ParseResult}; macro_rules! opt( ( $ex:expr ) => ( &base::OptionEx::new( $ex ) ); ); pub struct OptionEx<'a> { expr: &'a ( Expression + 'a ) } impl<'a> OptionEx<'a> { pub fn new( expr: &Expression ) -> OptionEx { OptionEx { expr: expr } } } impl<'b> Expression for OptionEx<'b> { fn apply<'a>( &self, parse_state: &ParseState<'a> ) -> Option< ParseResult<'a> > { self.expr.apply( parse_state ).or( Some( ParseResult::fromParseState( *parse_state ) ) ) } } } #[macro_use] mod star { use super::{Expression, ParseState, ParseResult}; macro_rules! star( ( $ex:expr ) => ( &base::Star::new( $ex ) ); ); pub struct Star<'a> { expr: &'a ( Expression + 'a ) } impl<'b> Star<'b> { pub fn new( expr: &Expression ) -> Star { Star { expr: expr } } } impl<'b> Expression for Star<'b> { fn apply<'a>( &self, parse_state: &ParseState<'a> ) -> Option< ParseResult<'a> > { let mut final_result = ParseResult::fromParseState( *parse_state ); loop { match self.expr.apply( &final_result.parse_state ) { Some( result ) => { final_result.parse_state = result.parse_state; final_result.nodes.extend( result.nodes.into_iter() ); } _ => break } } Some( final_result ) } } } #[macro_use] mod plus { use super::{Expression, ParseState, ParseResult}; macro_rules! plus( ( $ex:expr ) => ( &base::Plus::new( $ex ) ); ); pub struct Plus<'a> { expr: &'a ( Expression + 'a ) } impl<'b> Plus<'b> { pub fn new( expr: &Expression ) -> Plus { Plus { expr: expr } } } impl<'b> Expression for Plus<'b> { fn apply<'a>( &self, parse_state: &ParseState<'a> ) -> Option< ParseResult<'a> > { let mut final_result = ParseResult::fromParseState( *parse_state ); let mut num_matches = 0; loop { match self.expr.apply( &final_result.parse_state ) { Some( result ) => { final_result.parse_state = result.parse_state; final_result.nodes.extend( result.nodes.into_iter() ); num_matches += 1; } _ => break } } if num_matches > 0 { Some( final_result ) } else { None } } } } #[macro_use] mod or { use super::{Expression, ParseState, ParseResult}; macro_rules! or( ( $( $ex:expr ),* ) => ( &base::Or::new( &[ $( $ex ),* ] ) ); ); pub struct Or<'a> { exprs: &'a [&'a (Expression + 'a)] } impl<'b> Or<'b> { pub fn new<'a>( exprs: &'a [&Expression] ) -> Or<'a> { Or { exprs: exprs } } } impl<'b> Expression for Or<'b> { fn apply<'a>( &self, parse_state: &ParseState<'a> ) -> Option< ParseResult<'a> > { for expr in self.exprs.iter() { match expr.apply( parse_state ) { result @ Some( _ ) => return result, _ => () } } None } } } #[macro_use] mod fuse { use super::{Expression, ParseState, ParseResult}; macro_rules! fuse( ( $ex:expr ) => ( &base::Fuse::new( $ex ) ); ); pub struct Fuse<'a> { expr: &'a ( Expression + 'a ) } impl<'a> Fuse<'a> { pub fn new( expr: & Expression ) -> Fuse { Fuse { expr: expr } } } impl<'b> Expression for Fuse<'b> { fn apply<'a>( &self, parse_state: &ParseState<'a> ) -> Option< ParseResult<'a> > { self.expr.apply( parse_state ).and_then( |result| parse_state.offsetToResult( result.parse_state.offset ) ) } } } #[macro_use] mod sequence { use super::{Expression, ParseState, ParseResult}; macro_rules! seq( ( $( $ex:expr ),* ) => ( &base::Sequence::new( &[ $( $ex ),* ] ) ); ); pub struct Sequence<'a> { exprs: &'a [&'a (Expression + 'a)] } impl<'b> Sequence<'b> { pub fn new<'a>( exprs: &'a [&Expression] ) -> Sequence<'a> { Sequence { exprs: exprs } } } impl<'b> Expression for Sequence<'b> { fn apply<'a>( &self, parse_state: &ParseState<'a> ) -> Option< ParseResult<'a> > { let mut final_result = ParseResult::fromParseState( *parse_state ); for expr in self.exprs.iter() { match expr.apply( &final_result.parse_state ) { Some( result ) => { final_result.parse_state = result.parse_state; final_result.nodes.extend( result.nodes.into_iter() ); } _ => return None } } Some( final_result ) } } } #[macro_use] mod wrap { use super::{Expression, ParseState, ParseResult, Rule}; macro_rules! ex( ( $ex:expr ) => ( &base::WrapEx{ rule: $ex } ); ); pub struct WrapEx { pub rule: Rule } impl Expression for WrapEx { fn apply<'a>( &self, parse_state: &ParseState<'a> ) -> Option< ParseResult<'a> > { (self.rule)( parse_state ) } } } mod unicode { use std::char; pub static UTF8_1BYTE_FOLLOWING: u8 = 0b11000000; pub static UTF8_2BYTE_FOLLOWING: u8 = 0b11100000; pub static UTF8_3BYTE_FOLLOWING: u8 = 0b11110000; pub fn readCodepoint( input: &[u8] ) -> Option< char > { fn isContinuationByte( byte: u8 ) -> bool { byte & 0b11000000 == 0b10000000 } fn codepointBitsFromLeadingByte( byte: u8 ) -> u32 { let good_bits = if isAscii( byte ) { byte } else if byte & 0b11100000 == UTF8_1BYTE_FOLLOWING { byte & 0b00011111 } else if byte & 0b11110000 == UTF8_2BYTE_FOLLOWING { byte & 0b00001111 } else { byte & 0b00000111 }; good_bits as u32 } fn codepointBitsFromContinuationByte( byte: u8 ) -> u32 { ( byte & 0b00111111 ) as u32 } input.get( 0 ) .and_then( |first_byte| { bytesFollowing( *first_byte ).and_then( |num_following| { let mut codepoint: u32 = codepointBitsFromLeadingByte( *first_byte ) << 6 * num_following; for i in 1 .. num_following + 1 { match input.get( i ) { Some( byte ) if isContinuationByte( *byte ) => { codepoint |= codepointBitsFromContinuationByte( *byte ) << 6 * ( num_following - i ); } _ => return None } } char::from_u32( codepoint ) }) }) } pub fn bytesFollowing( byte: u8 ) -> Option< usize > { if isAscii( byte ) { Some( 0 ) } else if byte & 0b11100000 == UTF8_1BYTE_FOLLOWING { Some( 1 ) } else if byte & 0b11110000 == UTF8_2BYTE_FOLLOWING { Some( 2 ) } else if byte & 0b11111000 == UTF8_3BYTE_FOLLOWING { Some( 3 ) } else { None } } pub fn isAscii( byte: u8 ) -> bool { return byte & 0b10000000 == 0; } } #[doc(hidden)] #[derive(Debug, Clone, PartialEq, Copy)] pub struct ParseState<'a> { pub input: &'a [u8], pub offset: usize } impl<'a> ParseState<'a> { fn advanceTo( &self, new_offset: usize ) -> ParseState<'a> { let mut clone = self.clone(); clone.input = &clone.input[ new_offset - clone.offset .. ]; clone.offset = new_offset; clone } fn sliceTo( &self, new_offset: usize ) -> &'a [u8] { &self.input[ .. new_offset - self.offset ] } fn offsetToResult( &self, new_offset: usize ) -> Option< ParseResult<'a> > { Some( ParseResult::oneNode( Node::withoutName( self.offset, new_offset, Data( self.sliceTo( new_offset ) ) ), self.advanceTo( new_offset ) ) ) } } #[doc(hidden)] pub struct ParseResult<'a> { pub nodes: Vec< Node<'a> >, pub parse_state: ParseState<'a> } impl<'a> ParseResult<'a> { pub fn oneNode( node: Node<'a>, parse_state: ParseState<'a> ) -> ParseResult<'a> { ParseResult { nodes: vec!( node ), parse_state: parse_state } } pub fn fromParseState( parse_state: ParseState<'a> ) -> ParseResult<'a> { ParseResult { nodes: vec!(), parse_state: parse_state } } } pub trait Expression { fn apply<'a>( &self, parse_state: &ParseState<'a> ) -> Option< ParseResult<'a> >; } pub type Rule = for<'a> fn( &ParseState<'a> ) -> Option< ParseResult<'a> >; } macro_rules! rule( ( $name:ident <- $body:expr ) => ( pub fn $name<'a>( parse_state: &base::ParseState<'a> ) -> std::option::Option< base::ParseResult<'a> > { use base::Expression; use base::Node; use base::ParseResult; use std::clone::Clone; use std::option::Option::{Some, None}; match $body.apply( parse_state ) { Some( result ) => { let state = result.parse_state.clone(); Some( ParseResult::oneNode( Node::withChildren( stringify!( $name ), result.nodes ), state ) ) } _ => None } } ); ); #[cfg(not(test))] pub fn parse<'a>( input: &'a [u8] ) -> Option< Node<'a> > { let parse_state = ParseState { input: input, offset: 0 }; match rules::Grammar( &parse_state ) { Some( result ) => Some( result.nodes.into_iter().next().unwrap() ), _ => None } } mod rules { #![no_implicit_prelude] use base; use std; rule!( Grammar <- seq!( ex!( Spacing ), plus!( ex!( Definition ) ), ex!( EndOfFile ) ) ); rule!( Definition <- seq!( ex!( Identifier ), ex!( ARROW ), ex!( Expression ) ) ); rule!( Expression <- seq!( ex!( Sequence ), star!( seq!( ex!( SLASH ), ex!( Sequence ) ) ) ) ); rule!( Sequence <- star!( ex!( Prefix ) ) ); rule!( Prefix <- seq!( opt!( or!( ex!( AND ), ex!( NOT ), ex!( FUSE ) ) ), ex!( Suffix ) ) ); rule!( Suffix <- seq!( ex!( Primary ), opt!( or!( ex!( QUESTION ), ex!( STAR ), ex!( PLUS ) ) ) ) ); rule!( Primary <- or!( seq!( ex!( Identifier ), not!( ex!( ARROW ) ) ), seq!( ex!( OPEN ), ex!( Expression ), ex!( CLOSE ) ), ex!( Literal ), ex!( Class ), ex!( DOT ) ) ); rule!( Identifier <- seq!( fuse!( seq!( ex!( IdentStart ), star!( ex!( IdentCont ) ) ) ), ex!( Spacing ) ) ); rule!( IdentStart <- class!( "a-zA-Z_" ) ); rule!( IdentCont <- or!( ex!( IdentStart ), class!( "0-9" ) ) ); rule!( Literal <- seq!( fuse!( or!( seq!( class!( "'" ), star!( seq!( not!( class!( "'" ) ), ex!( Char ) ) ), class!( "'" ) ), seq!( class!( "\"" ), star!( seq!( not!( class!( "\"" ) ), ex!( Char ) ) ), class!( "\"" ) ) ) ), ex!( Spacing ) ) ); rule!( Class <- seq!( lit!( "[" ), star!( seq!( not!( lit!( "]" ) ), ex!( Range ) ) ), lit!( "]" ), ex!( Spacing ) ) ); rule!( Range <- or!( seq!( ex!( Char ), lit!( "-" ), ex!( Char ) ), ex!( Char ) ) ); rule!( Char <- or!( seq!( lit!( "\\" ), class!( "nrt'\"[]\\" ) ), seq!( lit!( "\\" ), class!( "0-2" ), class!( "0-7" ), class!( "0-7" ) ), seq!( lit!( "\\" ), class!( "0-7" ), opt!( class!( "0-7" ) ) ), seq!( not!( lit!( "\\" ) ), &base::Dot ) ) ); rule!( ARROW <- or!( ex!( FUSEARROW ), ex!( LEFTARROW ) ) ); rule!( LEFTARROW <- seq!( lit!( "<-" ), ex!( Spacing ) ) ); rule!( FUSEARROW <- seq!( lit!( "<~" ), ex!( Spacing ) ) ); rule!( SLASH <- seq!( lit!( "/" ), ex!( Spacing ) ) ); rule!( AND <- seq!( lit!( "&" ), ex!( Spacing ) ) ); rule!( NOT <- seq!( lit!( "!" ), ex!( Spacing ) ) ); rule!( QUESTION <- seq!( lit!( "?" ), ex!( Spacing ) ) ); rule!( STAR <- seq!( lit!( "*" ), ex!( Spacing ) ) ); rule!( PLUS <- seq!( lit!( "+" ), ex!( Spacing ) ) ); rule!( OPEN <- seq!( lit!( "(" ), ex!( Spacing ) ) ); rule!( CLOSE <- seq!( lit!( ")" ), ex!( Spacing ) ) ); rule!( DOT <- seq!( lit!( "." ), ex!( Spacing ) ) ); rule!( FUSE <- seq!( lit!( "~" ), ex!( Spacing ) ) ); rule!( Spacing <- fuse!( star!( or!( ex!( Space ), ex!( Comment ) ) ) ) ); rule!( Comment <- fuse!( seq!( lit!( "#" ), star!( seq!( not!( ex!( EndOfLine ) ), &base::Dot ) ), ex!( EndOfLine ) ) ) ); rule!( Space <- or!( lit!( " " ), lit!( "\t" ), ex!( EndOfLine ) ) ); rule!( EndOfLine <- or!( lit!( "\r\n" ), lit!( "\n" ), lit!( "\r" ) ) ); rule!( EndOfFile <- not!( &base::Dot ) ); }
28.841251
250
0.492045
5d2eb8ac1f1b0a533a8d3d7ba2a4c6ae1d7ebdb3
5,105
use std::{ io::{BufRead, BufReader, Write}, sync::{Arc, Mutex}, }; use crate::{console, errors}; use super::Error; pub struct Prompter { writer: Arc<Mutex<dyn Write + Send>>, reader: Arc<Mutex<dyn BufRead + Send>>, } impl Prompter { pub fn new() -> Self { Self { writer: Arc::new(Mutex::new(console::output::output())), reader: Arc::new(Mutex::new(BufReader::new(console::input::input()))), } } pub fn prompt<V>(&mut self, message: &str, validate: V) -> Result<Option<String>, Error> where V: Fn(&str) -> bool, { let mut line = String::default(); for _i in 0..3 { let mut writer = self .writer .lock() .map_err(|_| errors::system( "We could not acquire a synchronization lock on the terminal stdout stream when attempting to prompt you for input.", "Please try again and if this does not resolve the problem, create a GitHub issue explaining how to reproduce the issue so that we can investigate further."))?; write!(writer, "{}", message)?; writer.flush()?; let mut reader = self.reader.lock().map_err(|_| errors::system( "We could not acquire a synchronization lock on the terminal's stdin stream when attempting to prompt you for input.", "Please try again and if this does not resolve the problem, create a GitHub issue explaining how to reproduce the issue so that we can investigate further."))?; let n = reader.read_line(&mut line)?; if n == 0 { return Ok(None); } if !validate(&line.trim()) { line.clear() } else { return Ok(Some(line.trim().into())); } } Ok(None) } pub fn prompt_bool( &mut self, message: &str, default: Option<bool>, ) -> Result<Option<bool>, Error> { if let Some(answer) = self.prompt(message, |l| { l.to_lowercase() == "y" || l.to_lowercase() == "n" })? { Ok(Some(answer.to_lowercase() == "y")) } else { Ok(default) } } } #[cfg(test)] mod tests { use super::*; #[test] fn prompt_for_any() { console::input::mock("123\n"); let output = console::output::mock(); let mut prompter = Prompter::new(); assert_eq!( prompter .prompt("Enter a number: ", |l| { let n: Option<u32> = l.parse().ok(); n.is_some() }) .unwrap(), Some("123".into()), ); assert_eq!(output.to_string(), "Enter a number: "); } #[test] fn prompt_eof() { console::input::mock(""); let output = console::output::mock(); let mut prompter = Prompter::new(); assert_eq!( prompter .prompt("Enter a number: ", |l| { let n: Option<u32> = l.parse().ok(); n.is_some() }) .unwrap(), None, ); assert_eq!(output.to_string(), "Enter a number: "); } #[test] fn prompt_retry() { console::input::mock("\nnan\n123\n"); let output = console::output::mock(); let mut prompter = Prompter::new(); assert_eq!( prompter .prompt("Enter a number: ", |l| { let n: Option<u32> = l.parse().ok(); n.is_some() }) .unwrap(), Some("123".into()), ); assert_eq!( output.to_string(), "Enter a number: Enter a number: Enter a number: " ); } #[test] fn prompt_multiple() { console::input::mock("a\nb\n"); let output = console::output::mock(); let mut prompter = Prompter::new(); assert_eq!( prompter.prompt("First prompt: ", |_| true).unwrap(), Some("a".into()), ); assert_eq!( prompter.prompt("Second prompt: ", |_| true).unwrap(), Some("b".into()), ); assert_eq!(output.to_string(), "First prompt: Second prompt: "); } #[test] fn prompt_boolean() { console::input::mock("y\nn\n\n\n"); let output = console::output::mock(); let mut prompter = Prompter::new(); assert_eq!( prompter.prompt_bool("Works? [y/N]: ", Some(false)).unwrap(), Some(true), ); assert_eq!(output.to_string(), "Works? [y/N]: "); assert_eq!( prompter.prompt_bool("Works? [Y/n]: ", Some(true)).unwrap(), Some(false), ); assert_eq!( prompter.prompt_bool("Works? [Y/n]: ", Some(true)).unwrap(), Some(true), ); assert_eq!(prompter.prompt_bool("Works? [Y/n]: ", None).unwrap(), None,); } }
27.594595
180
0.482272
501d36886a4851d9ea2925e5fe05763373ccb92b
1,103
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // ignore-fast doesn't like extern crate extern crate libc; mod mlibc { use libc::{c_char, c_long, c_longlong}; extern { pub fn atol(x: *c_char) -> c_long; pub fn atoll(x: *c_char) -> c_longlong; } } fn atol(s: StrBuf) -> int { s.as_slice().with_c_str(|x| unsafe { mlibc::atol(x) as int }) } fn atoll(s: StrBuf) -> i64 { s.as_slice().with_c_str(|x| unsafe { mlibc::atoll(x) as i64 }) } pub fn main() { assert_eq!(atol("1024".to_strbuf()) * 10, atol("10240".to_strbuf())); assert!((atoll("11111111111111111".to_strbuf()) * 10) == atoll("111111111111111110".to_strbuf())); }
29.810811
73
0.660018
9ccdb846e111e9d2beb411c1a47c70210fb84993
2,196
//! Parsers for blocks. //! //! The [`block0`] parser is the top-level parser of the whole [`parser`] module. //! //! [`parser`]: crate::parser use nom::{ character::complete::{line_ending, multispace0}, combinator::{map, opt}, multi::{separated_list, separated_nonempty_list}, sequence::{preceded, tuple}, }; use nom_locate::position; use pijama_ast::{Block, Located, Location, Span}; use crate::parser::{ node::{comment, node}, IResult, }; /// Parser for [`Block`]s which may or may not be empty. /// /// Nodes in the block can be separated by at least one line break and optional spaces. /// /// The location of this element matches either the start of a comment or the first space /// or line break before the first `Node` of the `Block`. If there are no spaces or line /// breaks before the first `Node`, the start matches the start of the `Node`. The end /// of the location is handled in an analogous manner. pub fn block0(input: Span) -> IResult<Located<Block>> { map( tuple(( preceded(opt(comment::comment), position), separated_list(line_ending, preceded(multispace0, node)), position, )), |(sp1, block, sp2)| { (Location::from(sp1) + Location::from(sp2)).with_content(block.into_iter().collect()) }, )(input) } /// Parser for non-empty [`Block`]s. /// /// Nodes in the block can be separated by at least one line break and optional spaces. /// /// The location of this element matches either the start of a comment or the first space /// or line break before the first `Node` of the `Block`. If there are no spaces or line /// breaks before the first `Node`, the start matches the start of the `Node`. The end /// of the location is handled in an analogous manner. pub fn block1(input: Span) -> IResult<Located<Block>> { map( tuple(( preceded(opt(comment::comment), position), separated_nonempty_list(line_ending, preceded(multispace0, node)), position, )), |(sp1, block, sp2)| { (Location::from(sp1) + Location::from(sp2)).with_content(block.into_iter().collect()) }, )(input) }
34.857143
97
0.643443
1d0b106adff008e9f7b0015a7b2449c539e40fe4
15,856
use std::{ future::Future, mem, ops::Add, pin::Pin, sync::{Arc, Mutex, Weak}, task::{Context, Poll}, time::{Duration, Instant}, }; use futures_core::ready; use tokio::{ sync::{mpsc, Semaphore}, time::{sleep_until, Sleep}, }; use tower::Service; use tracing::{debug, trace}; use super::{ error::{Closed, ServiceError}, message::{Message, Tx}, BatchControl, }; /// Get the error out #[derive(Debug)] pub(crate) struct Handle { inner: Arc<Mutex<Option<ServiceError>>>, } /// Wrap `Service` channel for easier use through projections. #[derive(Debug)] struct Bridge<Fut, Request> { rx: mpsc::UnboundedReceiver<Message<Request, Fut>>, handle: Handle, current_message: Option<Message<Request, Fut>>, close: Option<Weak<Semaphore>>, failed: Option<ServiceError>, } #[derive(Debug)] struct Lot<Fut> { max_size: usize, max_time: Duration, responses: Vec<(Tx<Fut>, Result<Fut, ServiceError>)>, time_elapses: Option<Pin<Box<Sleep>>>, time_elapsed: bool, } pin_project_lite::pin_project! { #[project = StateProj] #[derive(Debug)] enum State<Fut> { Collecting, Flushing { reason: Option<String>, #[pin] flush_fut: Option<Fut>, }, Finished } } pin_project_lite::pin_project! { /// Task that handles processing the buffer. This type should not be used /// directly, instead `Batch` requires an `Executor` that can accept this task. /// /// The struct is `pub` in the private module and the type is *not* re-exported /// as part of the public API. This is the "sealed" pattern to include "private" /// types in public traits that are not meant for consumers of the library to /// implement (only call). #[derive(Debug)] pub struct Worker<T, Request> where T: Service<BatchControl<Request>>, T::Error: Into<crate::BoxError>, { service: T, bridge: Bridge<T::Future, Request>, lot: Lot<T::Future>, #[pin] state: State<T::Future>, } } // ===== impl Worker ===== impl<T, Request> Worker<T, Request> where T: Service<BatchControl<Request>>, T::Error: Into<crate::BoxError>, { pub(crate) fn new( rx: mpsc::UnboundedReceiver<Message<Request, T::Future>>, service: T, max_size: usize, max_time: Duration, semaphore: &Arc<Semaphore>, ) -> (Handle, Worker<T, Request>) { trace!("creating Batch worker"); let handle = Handle { inner: Arc::new(Mutex::new(None)), }; // The service and worker have a parent - child relationship, so we must // downgrade the Arc to Weak, to ensure a cycle between Arc pointers will // never be deallocated. let semaphore = Arc::downgrade(semaphore); let worker = Self { service, bridge: Bridge { rx, current_message: None, handle: handle.clone(), close: Some(semaphore), failed: None, }, lot: Lot::new(max_size, max_time), state: State::Collecting, }; (handle, worker) } } impl<T, Request> Future for Worker<T, Request> where T: Service<BatchControl<Request>>, T::Error: Into<crate::BoxError>, { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { trace!("polling worker"); let mut this = self.project(); // Flush if the max wait time is reached. if let Poll::Ready(Some(())) = this.lot.poll_max_time(cx) { this.state.set(State::flushing("time".to_owned(), None)) } loop { match this.state.as_mut().project() { StateProj::Collecting => { match ready!(this.bridge.poll_next_msg(cx)) { Some((msg, first)) => { let _guard = msg.span.enter(); trace!(resumed = !first, message = "worker received request"); // Wait for the service to be ready trace!(message = "waiting for service readiness"); match this.service.poll_ready(cx) { Poll::Ready(Ok(())) => { debug!(service.ready = true, message = "adding item"); let response = this.service.call(msg.request.into()); this.lot.add((msg.tx, Ok(response))); // Flush if the batch is full. if this.lot.is_full() { this.state.set(State::flushing("size".to_owned(), None)); } // Or flush if the max time has elapsed. if this.lot.poll_max_time(cx).is_ready() { this.state.set(State::flushing("time".to_owned(), None)); } } Poll::Pending => { drop(_guard); debug!(service.ready = false, message = "delay item addition"); this.bridge.return_msg(msg); return Poll::Pending; } Poll::Ready(Err(e)) => { drop(_guard); this.bridge.failed("item addition", e.into()); if let Some(ref e) = this.bridge.failed { // Ensure the current caller is notified too. this.lot.add((msg.tx, Err(e.clone()))); this.lot.notify(Some(e.clone())); } } } } None => { trace!("shutting down, no more requests _ever_"); this.state.set(State::Finished); return Poll::Ready(()); } } } StateProj::Flushing { reason, flush_fut } => match flush_fut.as_pin_mut() { None => { trace!( reason = reason.as_mut().unwrap().as_str(), message = "waiting for service readiness" ); match this.service.poll_ready(cx) { Poll::Ready(Ok(())) => { debug!( service.ready = true, reason = reason.as_mut().unwrap().as_str(), message = "flushing batch" ); let response = this.service.call(BatchControl::Flush); let reason = reason.take().expect("missing reason"); this.state.set(State::flushing(reason, Some(response))); } Poll::Pending => { debug!( service.ready = false, reason = reason.as_mut().unwrap().as_str(), message = "delay flush" ); return Poll::Pending; } Poll::Ready(Err(e)) => { this.bridge.failed("flush", e.into()); if let Some(ref e) = this.bridge.failed { this.lot.notify(Some(e.clone())); } } } } Some(future) => { match ready!(future.poll(cx)) { Ok(_) => { debug!(reason = reason.as_mut().unwrap().as_str(), "batch flushed"); this.lot.notify(None); this.state.set(State::Collecting) }, Err(e) => { this.bridge.failed("flush", e.into()); if let Some(ref e) = this.bridge.failed { this.lot.notify(Some(e.clone())); } this.state.set(State::Finished); return Poll::Ready(()); } } } }, StateProj::Finished => { // We've already received None and are shutting down return Poll::Ready(()); } } } } } // ===== impl State ===== impl<Fut> State<Fut> { fn flushing(reason: String, f: Option<Fut>) -> Self { Self::Flushing { reason: Some(reason), flush_fut: f, } } } // ===== impl Bridge ===== impl<Fut, Request> Drop for Bridge<Fut, Request> { fn drop(&mut self) { self.close_semaphore() } } impl<Fut, Request> Bridge<Fut, Request> { /// Closes the buffer's semaphore if it is still open, waking any pending tasks. fn close_semaphore(&mut self) { if let Some(close) = self .close .take() .as_ref() .and_then(Weak::<Semaphore>::upgrade) { debug!("buffer closing; waking pending tasks"); close.close(); } else { trace!("buffer already closed"); } } fn failed(&mut self, action: &str, error: crate::BoxError) { debug!(action, %error , "service failed"); // The underlying service failed when we called `poll_ready` on it with the given `error`. // We need to communicate this to all the `Buffer` handles. To do so, we wrap up the error // in an `Arc`, send that `Arc<E>` to all pending requests, and store it so that subsequent // requests will also fail with the same error. // Note that we need to handle the case where some handle is concurrently trying to send us // a request. We need to make sure that *either* the send of the request fails *or* it // receives an error on the `oneshot` it constructed. Specifically, we want to avoid the // case where we send errors to all outstanding requests, and *then* the caller sends its // request. We do this by *first* exposing the error, *then* closing the channel used to // send more requests (so the client will see the error when the send fails), and *then* // sending the error to all outstanding requests. let error = ServiceError::new(error); let mut inner = self.handle.inner.lock().unwrap(); if inner.is_some() { // Future::poll was called after we've already errored out! return; } *inner = Some(error.clone()); drop(inner); self.rx.close(); // Wake any tasks waiting on channel capacity. self.close_semaphore(); // By closing the mpsc::Receiver, we know that that the run() loop will drain all pending // requests. We just need to make sure that any requests that we receive before we've // exhausted the receiver receive the error: self.failed = Some(error); } /// Return the next queued Message that hasn't been canceled. /// /// If a `Message` is returned, the `bool` is true if this is the first time we received this /// message, and false otherwise (i.e., we tried to forward it to the backing service before). fn poll_next_msg( &mut self, cx: &mut Context<'_>, ) -> Poll<Option<(Message<Request, Fut>, bool)>> { trace!("worker polling for next message"); // Pick any delayed request first if let Some(msg) = self.current_message.take() { // If the oneshot sender is closed, then the receiver is dropped, and nobody cares about // the response. If this is the case, we should continue to the next request. if !msg.tx.is_closed() { trace!("resuming buffered request"); return Poll::Ready(Some((msg, false))); } trace!("dropping cancelled buffered request"); } // Get the next request while let Some(msg) = ready!(Pin::new(&mut self.rx).poll_recv(cx)) { if !msg.tx.is_closed() { trace!("processing new request"); return Poll::Ready(Some((msg, true))); } // Otherwise, request is canceled, so pop the next one. trace!("dropping cancelled request"); } Poll::Ready(None) } fn return_msg(&mut self, msg: Message<Request, Fut>) { self.current_message = Some(msg) } } // ===== impl Lot ===== impl<Fut> Lot<Fut> { fn new(max_size: usize, max_time: Duration) -> Self { Self { max_size, max_time, responses: Vec::with_capacity(max_size), time_elapses: None, time_elapsed: false, } } fn poll_max_time(&mut self, cx: &mut Context<'_>) -> Poll<Option<()>> { // When the Worker is polled and the time has elapsed, we return `Some` to let the Worker // know it's time to enter the Flushing state. Subsequent polls (e.g. by the Flush future) // will return None to prevent the Worker from getting stuck in an endless loop of entering // the Flushing state. if self.time_elapsed { return Poll::Ready(None); } if let Some(ref mut sleep) = self.time_elapses { if Pin::new(sleep).poll(cx).is_ready() { self.time_elapsed = true; return Poll::Ready(Some(())); } } Poll::Pending } fn is_full(&self) -> bool { self.responses.len() == self.max_size } fn add(&mut self, item: (Tx<Fut>, Result<Fut, ServiceError>)) { if self.responses.is_empty() { self.time_elapses = Some(Box::pin(sleep_until( Instant::now().add(self.max_time).into(), ))); } self.responses.push(item); } fn notify(&mut self, err: Option<ServiceError>) { for (tx, response) in mem::replace(&mut self.responses, Vec::with_capacity(self.max_size)) { if let Some(ref response) = err { let _ = tx.send(Err(response.clone())); } else { let _ = tx.send(response); } } self.time_elapses = None; self.time_elapsed = false; } } // ===== impl Handle ===== impl Handle { pub(crate) fn get_error_on_closed(&self) -> crate::BoxError { self.inner .lock() .unwrap() .as_ref() .map(|svc_err| svc_err.clone().into()) .unwrap_or_else(|| Closed::new().into()) } } impl Clone for Handle { fn clone(&self) -> Self { Handle { inner: self.inner.clone(), } } }
35.392857
100
0.47862
e836081acf2627a920a996ce3e49f8ec8c35f537
6,578
#[doc = "Register `tosdac_ctrl_hw2` reader"] pub struct R(crate::R<TOSDAC_CTRL_HW2_SPEC>); impl core::ops::Deref for R { type Target = crate::R<TOSDAC_CTRL_HW2_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::convert::From<crate::R<TOSDAC_CTRL_HW2_SPEC>> for R { fn from(reader: crate::R<TOSDAC_CTRL_HW2_SPEC>) -> Self { R(reader) } } #[doc = "Register `tosdac_ctrl_hw2` writer"] pub struct W(crate::W<TOSDAC_CTRL_HW2_SPEC>); impl core::ops::Deref for W { type Target = crate::W<TOSDAC_CTRL_HW2_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl core::convert::From<crate::W<TOSDAC_CTRL_HW2_SPEC>> for W { fn from(writer: crate::W<TOSDAC_CTRL_HW2_SPEC>) -> Self { W(writer) } } #[doc = "Field `tbb_tosdac_q_gc3` reader - "] pub struct TBB_TOSDAC_Q_GC3_R(crate::FieldReader<u8, u8>); impl TBB_TOSDAC_Q_GC3_R { pub(crate) fn new(bits: u8) -> Self { TBB_TOSDAC_Q_GC3_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for TBB_TOSDAC_Q_GC3_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `tbb_tosdac_q_gc3` writer - "] pub struct TBB_TOSDAC_Q_GC3_W<'a> { w: &'a mut W, } impl<'a> TBB_TOSDAC_Q_GC3_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x3f << 24)) | (((value as u32) & 0x3f) << 24); self.w } } #[doc = "Field `tbb_tosdac_i_gc3` reader - "] pub struct TBB_TOSDAC_I_GC3_R(crate::FieldReader<u8, u8>); impl TBB_TOSDAC_I_GC3_R { pub(crate) fn new(bits: u8) -> Self { TBB_TOSDAC_I_GC3_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for TBB_TOSDAC_I_GC3_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `tbb_tosdac_i_gc3` writer - "] pub struct TBB_TOSDAC_I_GC3_W<'a> { w: &'a mut W, } impl<'a> TBB_TOSDAC_I_GC3_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x3f << 16)) | (((value as u32) & 0x3f) << 16); self.w } } #[doc = "Field `tbb_tosdac_q_gc2` reader - "] pub struct TBB_TOSDAC_Q_GC2_R(crate::FieldReader<u8, u8>); impl TBB_TOSDAC_Q_GC2_R { pub(crate) fn new(bits: u8) -> Self { TBB_TOSDAC_Q_GC2_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for TBB_TOSDAC_Q_GC2_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `tbb_tosdac_q_gc2` writer - "] pub struct TBB_TOSDAC_Q_GC2_W<'a> { w: &'a mut W, } impl<'a> TBB_TOSDAC_Q_GC2_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x3f << 8)) | (((value as u32) & 0x3f) << 8); self.w } } #[doc = "Field `tbb_tosdac_i_gc2` reader - "] pub struct TBB_TOSDAC_I_GC2_R(crate::FieldReader<u8, u8>); impl TBB_TOSDAC_I_GC2_R { pub(crate) fn new(bits: u8) -> Self { TBB_TOSDAC_I_GC2_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for TBB_TOSDAC_I_GC2_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `tbb_tosdac_i_gc2` writer - "] pub struct TBB_TOSDAC_I_GC2_W<'a> { w: &'a mut W, } impl<'a> TBB_TOSDAC_I_GC2_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0x3f) | ((value as u32) & 0x3f); self.w } } impl R { #[doc = "Bits 24:29"] #[inline(always)] pub fn tbb_tosdac_q_gc3(&self) -> TBB_TOSDAC_Q_GC3_R { TBB_TOSDAC_Q_GC3_R::new(((self.bits >> 24) & 0x3f) as u8) } #[doc = "Bits 16:21"] #[inline(always)] pub fn tbb_tosdac_i_gc3(&self) -> TBB_TOSDAC_I_GC3_R { TBB_TOSDAC_I_GC3_R::new(((self.bits >> 16) & 0x3f) as u8) } #[doc = "Bits 8:13"] #[inline(always)] pub fn tbb_tosdac_q_gc2(&self) -> TBB_TOSDAC_Q_GC2_R { TBB_TOSDAC_Q_GC2_R::new(((self.bits >> 8) & 0x3f) as u8) } #[doc = "Bits 0:5"] #[inline(always)] pub fn tbb_tosdac_i_gc2(&self) -> TBB_TOSDAC_I_GC2_R { TBB_TOSDAC_I_GC2_R::new((self.bits & 0x3f) as u8) } } impl W { #[doc = "Bits 24:29"] #[inline(always)] pub fn tbb_tosdac_q_gc3(&mut self) -> TBB_TOSDAC_Q_GC3_W { TBB_TOSDAC_Q_GC3_W { w: self } } #[doc = "Bits 16:21"] #[inline(always)] pub fn tbb_tosdac_i_gc3(&mut self) -> TBB_TOSDAC_I_GC3_W { TBB_TOSDAC_I_GC3_W { w: self } } #[doc = "Bits 8:13"] #[inline(always)] pub fn tbb_tosdac_q_gc2(&mut self) -> TBB_TOSDAC_Q_GC2_W { TBB_TOSDAC_Q_GC2_W { w: self } } #[doc = "Bits 0:5"] #[inline(always)] pub fn tbb_tosdac_i_gc2(&mut self) -> TBB_TOSDAC_I_GC2_W { TBB_TOSDAC_I_GC2_W { w: self } } #[doc = "Writes raw bits to the register."] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "tosdac_ctrl_hw2.\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [tosdac_ctrl_hw2](index.html) module"] pub struct TOSDAC_CTRL_HW2_SPEC; impl crate::RegisterSpec for TOSDAC_CTRL_HW2_SPEC { type Ux = u32; } #[doc = "`read()` method returns [tosdac_ctrl_hw2::R](R) reader structure"] impl crate::Readable for TOSDAC_CTRL_HW2_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [tosdac_ctrl_hw2::W](W) writer structure"] impl crate::Writable for TOSDAC_CTRL_HW2_SPEC { type Writer = W; } #[doc = "`reset()` method sets tosdac_ctrl_hw2 to value 0"] impl crate::Resettable for TOSDAC_CTRL_HW2_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
31.625
412
0.61432
64d07603e071bae892dc8403cd652652dca60ba8
2,789
use crate::prelude::{Ident, Span, TokenTree}; use crate::{Error, Result}; use std::iter::Peekable; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum DataType { Enum, Struct, } impl DataType { pub fn take(input: &mut Peekable<impl Iterator<Item = TokenTree>>) -> Result<(Self, Ident)> { if let Some(TokenTree::Ident(ident)) = input.peek() { let result = match ident.to_string().as_str() { "struct" => DataType::Struct, "enum" => DataType::Enum, _ => return Err(Error::UnknownDataType(ident.span())), }; let ident = super::assume_ident(input.next()); return match input.next() { Some(TokenTree::Ident(ident)) => Ok((result, ident)), Some(t) => Err(Error::InvalidRustSyntax(t.span())), None => Err(Error::InvalidRustSyntax(ident.span())), }; } let span = input .peek() .map(|t| t.span()) .unwrap_or_else(Span::call_site); Err(Error::InvalidRustSyntax(span)) } } #[test] fn test_datatype_take() { use crate::token_stream; fn validate_output_eq(input: &str, expected_dt: DataType, expected_ident: &str) { let (dt, ident) = DataType::take(&mut token_stream(input)).unwrap_or_else(|e| { panic!("Could not parse tokenstream {:?}: {:?}", input, e); }); if dt != expected_dt || ident != expected_ident { println!("While parsing {:?}", input); panic!( "Expected {:?} {:?}, received {:?} {:?}", dt, ident, expected_dt, expected_ident ); } } assert!(DataType::take(&mut token_stream("enum")) .unwrap_err() .is_invalid_rust_syntax()); validate_output_eq("enum Foo", DataType::Enum, "Foo"); validate_output_eq("enum Foo { }", DataType::Enum, "Foo"); validate_output_eq("enum Foo { bar, baz }", DataType::Enum, "Foo"); validate_output_eq("enum Foo<'a, T> { bar, baz }", DataType::Enum, "Foo"); assert!(DataType::take(&mut token_stream("struct")) .unwrap_err() .is_invalid_rust_syntax()); validate_output_eq("struct Foo { }", DataType::Struct, "Foo"); validate_output_eq("struct Foo { bar: u32, baz: u32 }", DataType::Struct, "Foo"); validate_output_eq("struct Foo<'a, T> { bar: &'a T }", DataType::Struct, "Foo"); assert!(DataType::take(&mut token_stream("fn foo() {}")) .unwrap_err() .is_unknown_data_type()); assert!(DataType::take(&mut token_stream("() {}")) .unwrap_err() .is_invalid_rust_syntax()); assert!(DataType::take(&mut token_stream("")) .unwrap_err() .is_invalid_rust_syntax()); }
35.75641
97
0.563284
e53234bf4eae87e2084aa18ec6878d2fdef5c1bb
2,550
use vcell::VolatileCell; #[doc = r" Register block"] #[repr(C)] pub struct RegisterBlock { #[doc = "0x00 - control register 1"] pub cr1: Cr1, _reserved0: [u8; 8usize], #[doc = "0x0c - DMA/Interrupt enable register"] pub dier: Dier, #[doc = "0x10 - status register"] pub sr: Sr, #[doc = "0x14 - event generation register"] pub egr: Egr, #[doc = "0x18 - capture/compare mode register 1 (output mode)"] pub ccmr1_output: Ccmr1Output, _reserved1: [u8; 4usize], #[doc = "0x20 - capture/compare enable register"] pub ccer: Ccer, #[doc = "0x24 - counter"] pub cnt: Cnt, #[doc = "0x28 - prescaler"] pub psc: Psc, #[doc = "0x2c - auto-reload register"] pub arr: Arr, _reserved2: [u8; 4usize], #[doc = "0x34 - capture/compare register 1"] pub ccr1: Ccr1, _reserved3: [u8; 24usize], #[doc = "0x50 - option register"] pub or: Or, } #[doc = "control register 1"] pub struct Cr1 { register: VolatileCell<u32>, } #[doc = "control register 1"] pub mod cr1; #[doc = "DMA/Interrupt enable register"] pub struct Dier { register: VolatileCell<u32>, } #[doc = "DMA/Interrupt enable register"] pub mod dier; #[doc = "status register"] pub struct Sr { register: VolatileCell<u32>, } #[doc = "status register"] pub mod sr; #[doc = "event generation register"] pub struct Egr { register: VolatileCell<u32>, } #[doc = "event generation register"] pub mod egr; #[doc = "capture/compare mode register 1 (output mode)"] pub struct Ccmr1Output { register: VolatileCell<u32>, } #[doc = "capture/compare mode register 1 (output mode)"] pub mod ccmr1_output; #[doc = "capture/compare mode register 1 (input mode)"] pub struct Ccmr1Input { register: VolatileCell<u32>, } #[doc = "capture/compare mode register 1 (input mode)"] pub mod ccmr1_input; #[doc = "capture/compare enable register"] pub struct Ccer { register: VolatileCell<u32>, } #[doc = "capture/compare enable register"] pub mod ccer; #[doc = "counter"] pub struct Cnt { register: VolatileCell<u32>, } #[doc = "counter"] pub mod cnt; #[doc = "prescaler"] pub struct Psc { register: VolatileCell<u32>, } #[doc = "prescaler"] pub mod psc; #[doc = "auto-reload register"] pub struct Arr { register: VolatileCell<u32>, } #[doc = "auto-reload register"] pub mod arr; #[doc = "capture/compare register 1"] pub struct Ccr1 { register: VolatileCell<u32>, } #[doc = "capture/compare register 1"] pub mod ccr1; #[doc = "option register"] pub struct Or { register: VolatileCell<u32>, } #[doc = "option register"] pub mod or;
27.419355
98
0.66
90e0a0cca99b6275eac9b5b5f6ec503896e1bdcd
2,621
#![allow(clippy::redundant_slicing)] use codec::{Decode, Encode}; use sp_io::{crypto::secp256k1_ecdsa_recover, hashing::keccak_256}; use sp_std::vec::Vec; #[cfg(feature = "std")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, Default, Debug)] pub struct EthereumAddress(pub [u8; 20]); #[cfg(feature = "std")] impl Serialize for EthereumAddress { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let hex: String = rustc_hex::ToHex::to_hex(&self.0[..]); serializer.serialize_str(&format!("0x{}", hex)) } } #[cfg(feature = "std")] impl<'de> Deserialize<'de> for EthereumAddress { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { let base_string = String::deserialize(deserializer)?; let offset = if base_string.starts_with("0x") { 2 } else { 0 }; let s = &base_string[offset..]; if s.len() != 40 { return Err(serde::de::Error::custom( "Bad length of Ethereum address (should be 42 including '0x')", )); } let raw: Vec<u8> = rustc_hex::FromHex::from_hex(s).map_err(|e| serde::de::Error::custom(format!("{:?}", e)))?; let mut r = Self::default(); r.0.copy_from_slice(&raw); Ok(r) } } #[derive(Encode, Decode, Clone)] pub struct EcdsaSignature(pub [u8; 65]); impl PartialEq for EcdsaSignature { fn eq(&self, other: &Self) -> bool { self.0 == other.0 } } impl sp_std::fmt::Debug for EcdsaSignature { fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { write!(f, "EcdsaSignature({:?})", &self.0[..]) } } impl EcdsaSignature { // Constructs the message that Ethereum RPC's `personal_sign` and `eth_sign` would sign. fn ethereum_signable_message(what: &[u8], prefix: &'static [u8]) -> Vec<u8> { //let prefix = T::Prefix::get(); let mut l = prefix.len() + what.len(); let mut rev = Vec::new(); while l > 0 { rev.push(b'0' + (l % 10) as u8); l /= 10; } let mut v = b"\x19Ethereum Signed Message:\n".to_vec(); v.extend(rev.into_iter().rev()); v.extend_from_slice(&prefix[..]); v.extend_from_slice(what); v } // Attempts to recover the Ethereum address from a message signature signed by using // the Ethereum RPC's `personal_sign` and `eth_sign`. pub fn recover(&self, what: &[u8], prefix: &'static [u8]) -> Option<EthereumAddress> { let msg = keccak_256(&Self::ethereum_signable_message(what, prefix)); let mut res = EthereumAddress::default(); res.0 .copy_from_slice(&keccak_256(&secp256k1_ecdsa_recover(&self.0, &msg).ok()?[..])[12..]); Some(res) } }
29.784091
112
0.655475
23026a2c1f3d755a34201de1e12db55d7e0134c2
56,387
use crate::gl; use std::cmp; use std::collections::HashMap; use std::hash::BuildHasherDefault; use std::ffi; use std::ptr; use std::os::raw; use fnv::FnvHasher; use crate::context::CommandContext; use crate::version::Version; use crate::version::Api; use crate::uniforms::UniformType; use crate::vertex::AttributeType; use crate::program; use crate::Handle; /// Information about a uniform (except its name). #[derive(Debug, Copy, Clone)] pub struct Uniform { /// The location of the uniform. /// /// This is internal information, you probably don't need to use it. pub location: i32, /// Type of the uniform. pub ty: UniformType, /// If it is an array, the number of elements. pub size: Option<usize>, } /// Information about a uniform block (except its name). #[derive(Debug, Clone)] pub struct UniformBlock { /// Identifier of the block. /// /// This is internal information, you probably don't need to use it. pub id: i32, /// Initial bind point of the block. /// /// This is internal information, you probably don't need to use it. pub initial_binding: i32, /// Size in bytes of the data in the block. pub size: usize, /// Layout of the block. pub layout: BlockLayout, } /// Layout of a shader storage buffer or a uniform buffer. #[derive(Debug, Clone, PartialEq, Eq)] pub enum BlockLayout { /// Multiple elements, each having a name. Struct { /// The list of elements, with `name`/`layout` pairs. members: Vec<(String, BlockLayout)>, }, /// A basic element. BasicType { /// Type of data. ty: UniformType, /// Offset of this element in bytes from the start of the buffer. offset_in_buffer: usize, }, /// A fixed-size array. /// /// For example: /// /// ```notrust /// uint data[12]; /// ``` Array { /// Type of data of each element. content: Box<BlockLayout>, /// Number of elements in the array. length: usize, }, /// An array whose size isn't known at compile-time. Can only be used as the last element of /// a buffer. /// /// Its actual size depends on the size of the buffer. /// /// For example: /// /// ```notrust /// buffer MyBuffer { /// uint data[]; /// } /// ``` DynamicSizedArray { /// Type of data of each element. content: Box<BlockLayout>, }, } /// Information about an attribute of a program (except its name). /// /// Internal struct. Not public. #[derive(Debug, Copy, Clone)] pub struct Attribute { /// The index of the uniform. /// /// This is internal information, you probably don't need to use it. pub location: i32, /// Type of the attribute. pub ty: AttributeType, /// Number of elements of the attribute. pub size: usize, } /// Describes the layout of a buffer that can receive transform feedback output. #[derive(Debug, Clone, PartialEq, Eq)] pub struct TransformFeedbackBuffer { /// Slot of this buffer. /// /// This is internal information, you probably don't need to use it. pub id: i32, /// List of elements inside the buffer. pub elements: Vec<TransformFeedbackVarying>, /// Size in bytes between two consecutive elements. pub stride: usize, } /// Describes a varying that is being output with transform feedback. #[derive(Debug, Clone, PartialEq, Eq)] pub struct TransformFeedbackVarying { /// Name of the variable. pub name: String, /// Number of bytes between the start of the first element and the start of this one. pub offset: usize, /// Size in bytes of this value. pub size: usize, /// Type of the value. pub ty: AttributeType, } /// Type of transform feedback. Only used with the legacy interface. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum TransformFeedbackMode { /// Each value is interleaved in the same buffer. Interleaved, /// Each value will go in a separate buffer. Separate, } /// Type of primitives that is being output by transform feedback. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum OutputPrimitives { /// Points. Points, /// Lines. Lines, /// Triangles. Triangles, /// Quads. Quads, } /// Returns a list of uniforms and a list of atomic counters of a program. pub unsafe fn reflect_uniforms(ctxt: &mut CommandContext<'_>, program: Handle) -> (HashMap<String, Uniform, BuildHasherDefault<FnvHasher>>, HashMap<String, UniformBlock, BuildHasherDefault<FnvHasher>>) { // number of active uniforms let active_uniforms = { let mut active_uniforms: gl::types::GLint = 0; match program { Handle::Id(program) => { assert!(ctxt.version >= &Version(Api::Gl, 2, 0) || ctxt.version >= &Version(Api::GlEs, 2, 0)); ctxt.gl.GetProgramiv(program, gl::ACTIVE_UNIFORMS, &mut active_uniforms); }, Handle::Handle(program) => { assert!(ctxt.extensions.gl_arb_shader_objects); ctxt.gl.GetObjectParameterivARB(program, gl::OBJECT_ACTIVE_UNIFORMS_ARB, &mut active_uniforms); } }; active_uniforms }; let query_atomic_counters = ctxt.version >= &Version(Api::Gl, 4, 2) || ctxt.version >= &Version(Api::GlEs, 3, 1) || (ctxt.extensions.gl_arb_program_interface_query && ctxt.extensions.gl_arb_shader_atomic_counters); let mut active_atomic_counters: gl::types::GLint = 0; if query_atomic_counters { let program = if let Handle::Id(program) = program { ctxt.gl.GetProgramiv(program, gl::ACTIVE_ATOMIC_COUNTER_BUFFERS, &mut active_atomic_counters); }; } // the result of this function let mut uniforms = HashMap::with_hasher(BuildHasherDefault::<FnvHasher>::default()); uniforms.reserve((active_uniforms - active_atomic_counters) as usize); let mut atomic_counters = HashMap::with_hasher(Default::default()); atomic_counters.reserve(active_atomic_counters as usize); for uniform_id in 0 .. active_uniforms { let mut uniform_name_tmp: Vec<u8> = Vec::with_capacity(64); let mut uniform_name_tmp_len = 63; let mut data_type: gl::types::GLenum = 0; let mut data_size: gl::types::GLint = 0; match program { Handle::Id(program) => { assert!(ctxt.version >= &Version(Api::Gl, 2, 0) || ctxt.version >= &Version(Api::GlEs, 2, 0)); ctxt.gl.GetActiveUniform(program, uniform_id as gl::types::GLuint, uniform_name_tmp_len, &mut uniform_name_tmp_len, &mut data_size, &mut data_type, uniform_name_tmp.as_mut_ptr() as *mut gl::types::GLchar); }, Handle::Handle(program) => { assert!(ctxt.extensions.gl_arb_shader_objects); ctxt.gl.GetActiveUniformARB(program, uniform_id as gl::types::GLuint, uniform_name_tmp_len, &mut uniform_name_tmp_len, &mut data_size, &mut data_type, uniform_name_tmp.as_mut_ptr() as *mut gl::types::GLchar); } }; uniform_name_tmp.set_len(uniform_name_tmp_len as usize); let uniform_name = String::from_utf8(uniform_name_tmp).unwrap(); let location = match program { Handle::Id(program) => { assert!(ctxt.version >= &Version(Api::Gl, 2, 0) || ctxt.version >= &Version(Api::GlEs, 2, 0)); ctxt.gl.GetUniformLocation(program, ffi::CString::new(uniform_name.as_bytes()).unwrap() .as_bytes_with_nul().as_ptr() as *const raw::c_char) }, Handle::Handle(program) => { assert!(ctxt.extensions.gl_arb_shader_objects); ctxt.gl.GetUniformLocationARB(program, ffi::CString::new(uniform_name.as_bytes()).unwrap() .as_bytes_with_nul().as_ptr() as *const raw::c_char) } }; if data_type == gl::UNSIGNED_INT_ATOMIC_COUNTER { assert!(query_atomic_counters); let mut atomic_counter_id: gl::types::GLint = 0; let mut atomic_counter_buffer_bind_point: gl::types::GLint = 0; match program { Handle::Id(program) => { ctxt.gl.GetActiveUniformsiv(program, 1, &(uniform_id as gl::types::GLuint), gl::UNIFORM_ATOMIC_COUNTER_BUFFER_INDEX, &mut atomic_counter_id); ctxt.gl.GetActiveAtomicCounterBufferiv(program, atomic_counter_id as gl::types::GLuint, gl::ATOMIC_COUNTER_BUFFER_BINDING, &mut atomic_counter_buffer_bind_point); }, Handle::Handle(_) => unreachable!(), } atomic_counters.insert(uniform_name, UniformBlock { id: atomic_counter_id, initial_binding: atomic_counter_buffer_bind_point, size: 4, layout: BlockLayout::BasicType { ty: UniformType::UnsignedInt, offset_in_buffer: 0, }, }); } else { uniforms.insert(uniform_name, Uniform { location: location as i32, ty: glenum_to_uniform_type(data_type), size: if data_size == 1 { None } else { Some(data_size as usize) }, }); } } // Flatten arrays let mut uniforms_flattened = HashMap::with_hasher(Default::default()); for uniform in uniforms { // If this is a normal non-array element, just move it over if !uniform.0.ends_with("[0]") { assert!(uniform.1.size.is_none()); uniforms_flattened.insert(uniform.0, uniform.1); continue; } // We've got an array, first get the base of the name let name_base = uniform.0.split('[').next().unwrap(); let uniform_base = uniform.1; // Go over all the elements in the array for i in 0..uniform_base.size.unwrap() { let uniform = Uniform { size: None, location: uniform_base.location + (i as i32), .. uniform_base }; uniforms_flattened.insert(format!("{}[{}]", name_base, i), uniform); } } (uniforms_flattened, atomic_counters) } pub unsafe fn reflect_attributes(ctxt: &mut CommandContext<'_>, program: Handle) -> HashMap<String, Attribute, BuildHasherDefault<FnvHasher>> { // number of active attributes, and the max length of the attribute names let (active_attributes, attr_name_len_max) = { let mut active_attributes: gl::types::GLint = 0; let mut attr_name_len_max: gl::types::GLint = 0; match program { Handle::Id(program) => { assert!(ctxt.version >= &Version(Api::Gl, 2, 0) || ctxt.version >= &Version(Api::GlEs, 2, 0)); ctxt.gl.GetProgramiv(program, gl::ACTIVE_ATTRIBUTES, &mut active_attributes); ctxt.gl.GetProgramiv(program, gl::ACTIVE_ATTRIBUTE_MAX_LENGTH, &mut attr_name_len_max); }, Handle::Handle(program) => { assert!(ctxt.extensions.gl_arb_vertex_shader); ctxt.gl.GetObjectParameterivARB(program, gl::OBJECT_ACTIVE_ATTRIBUTES_ARB, &mut active_attributes); ctxt.gl.GetObjectParameterivARB(program, gl::OBJECT_ACTIVE_ATTRIBUTE_MAX_LENGTH_ARB, &mut attr_name_len_max); } }; // let's not trust the driver too much, and clamp the max_len to [63, 2047] attr_name_len_max = cmp::min(cmp::max(attr_name_len_max, 63), 2047); (active_attributes, attr_name_len_max) }; // the result of this function let mut attributes = HashMap::with_hasher(Default::default()); attributes.reserve(active_attributes as usize); for attribute_id in 0 .. active_attributes { let mut attr_name_tmp: Vec<u8> = Vec::with_capacity((attr_name_len_max + 1) as usize); //+1 for the nul-byte let mut attr_name_len = 0; let mut data_type: gl::types::GLenum = 0; let mut data_size: gl::types::GLint = 0; match program { Handle::Id(program) => { assert!(ctxt.version >= &Version(Api::Gl, 2, 0) || ctxt.version >= &Version(Api::GlEs, 2, 0)); ctxt.gl.GetActiveAttrib(program, attribute_id as gl::types::GLuint, attr_name_len_max, &mut attr_name_len, &mut data_size, &mut data_type, attr_name_tmp.as_mut_ptr() as *mut gl::types::GLchar); }, Handle::Handle(program) => { assert!(ctxt.extensions.gl_arb_vertex_shader); ctxt.gl.GetActiveAttribARB(program, attribute_id as gl::types::GLuint, attr_name_len_max, &mut attr_name_len, &mut data_size, &mut data_type, attr_name_tmp.as_mut_ptr() as *mut gl::types::GLchar); } }; attr_name_tmp.set_len(attr_name_len as usize); let attr_name = String::from_utf8(attr_name_tmp).unwrap(); if attr_name.starts_with("gl_") { // ignoring everything built-in continue; } if attr_name.is_empty() { // Some spirv compilers add an empty attribute to shaders. Most drivers // don't expose this attribute, but some do. // Since we can't do anything with empty attribute names, we simply skip // them in this reflection code. continue; } let location = match program { Handle::Id(program) => { assert!(ctxt.version >= &Version(Api::Gl, 2, 0) || ctxt.version >= &Version(Api::GlEs, 2, 0)); ctxt.gl.GetAttribLocation(program, ffi::CString::new(attr_name.as_bytes()).unwrap() .as_bytes_with_nul().as_ptr() as *const raw::c_char) }, Handle::Handle(program) => { assert!(ctxt.extensions.gl_arb_vertex_shader); ctxt.gl.GetAttribLocationARB(program, ffi::CString::new(attr_name.as_bytes()).unwrap() .as_bytes_with_nul().as_ptr() as *const raw::c_char) } }; attributes.insert(attr_name, Attribute { location, ty: glenum_to_attribute_type(data_type), size: data_size as usize, }); } attributes } pub unsafe fn reflect_uniform_blocks(ctxt: &mut CommandContext<'_>, program: Handle) -> HashMap<String, UniformBlock, BuildHasherDefault<FnvHasher>> { // uniform blocks are not supported, so there's none if !(ctxt.version >= &Version(Api::Gl, 3, 1) || ctxt.version >= &Version(Api::GlEs, 3, 0)) { return HashMap::with_hasher(Default::default()); } let program = match program { Handle::Id(id) => id, _ => unreachable!() }; let mut active_blocks: gl::types::GLint = 0; ctxt.gl.GetProgramiv(program, gl::ACTIVE_UNIFORM_BLOCKS, &mut active_blocks); // WORK-AROUND: AMD OpenGL ES drivers don't accept `GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH` // even though they report OpenGL ES 3.1. So we return early on if possible. // TODO: find a better work-around ^ if active_blocks == 0 { return HashMap::with_hasher(Default::default()); } let mut active_blocks_max_name_len: gl::types::GLint = 0; ctxt.gl.GetProgramiv(program, gl::ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH, &mut active_blocks_max_name_len); let mut blocks = HashMap::with_hasher(Default::default()); blocks.reserve(active_blocks as usize); for block_id in 0 .. active_blocks { // getting the name of the block let name = { let mut name_tmp: Vec<u8> = Vec::with_capacity(1 + active_blocks_max_name_len as usize); let mut name_tmp_len = active_blocks_max_name_len; ctxt.gl.GetActiveUniformBlockName(program, block_id as gl::types::GLuint, name_tmp_len, &mut name_tmp_len, name_tmp.as_mut_ptr() as *mut gl::types::GLchar); name_tmp.set_len(name_tmp_len as usize); String::from_utf8(name_tmp).unwrap() }; // binding point for this block let mut binding: gl::types::GLint = 0; ctxt.gl.GetActiveUniformBlockiv(program, block_id as gl::types::GLuint, gl::UNIFORM_BLOCK_BINDING, &mut binding); // number of bytes let mut block_size: gl::types::GLint = 0; ctxt.gl.GetActiveUniformBlockiv(program, block_id as gl::types::GLuint, gl::UNIFORM_BLOCK_DATA_SIZE, &mut block_size); // number of members let mut num_members: gl::types::GLint = 0; ctxt.gl.GetActiveUniformBlockiv(program, block_id as gl::types::GLuint, gl::UNIFORM_BLOCK_ACTIVE_UNIFORMS, &mut num_members); // indices of the members let mut members_indices = ::std::iter::repeat(0).take(num_members as usize) .collect::<Vec<gl::types::GLuint>>(); ctxt.gl.GetActiveUniformBlockiv(program, block_id as gl::types::GLuint, gl::UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES, members_indices.as_mut_ptr() as *mut gl::types::GLint); // getting the offsets of the members let mut member_offsets = ::std::iter::repeat(0).take(num_members as usize) .collect::<Vec<gl::types::GLint>>(); ctxt.gl.GetActiveUniformsiv(program, num_members, members_indices.as_ptr(), gl::UNIFORM_OFFSET, member_offsets.as_mut_ptr()); // getting the types of the members let mut member_types = ::std::iter::repeat(0).take(num_members as usize) .collect::<Vec<gl::types::GLint>>(); ctxt.gl.GetActiveUniformsiv(program, num_members, members_indices.as_ptr(), gl::UNIFORM_TYPE, member_types.as_mut_ptr()); // getting the array sizes of the members let mut member_size = ::std::iter::repeat(0).take(num_members as usize) .collect::<Vec<gl::types::GLint>>(); ctxt.gl.GetActiveUniformsiv(program, num_members, members_indices.as_ptr(), gl::UNIFORM_SIZE, member_size.as_mut_ptr()); // getting the length of the names of the members let mut member_name_len = ::std::iter::repeat(0).take(num_members as usize) .collect::<Vec<gl::types::GLint>>(); ctxt.gl.GetActiveUniformsiv(program, num_members, members_indices.as_ptr(), gl::UNIFORM_NAME_LENGTH, member_name_len.as_mut_ptr()); // getting the names of the members let member_names = member_name_len.iter().zip(members_indices.iter()) .map(|(&name_len, &index)| { let mut name_tmp: Vec<u8> = Vec::with_capacity(1 + name_len as usize); let mut name_len_tmp = name_len; ctxt.gl.GetActiveUniformName(program, index, name_len, &mut name_len_tmp, name_tmp.as_mut_ptr() as *mut gl::types::GLchar); name_tmp.set_len(name_len_tmp as usize); String::from_utf8(name_tmp).unwrap() }); // now computing the list of members let members = member_names.enumerate().map(|(index, name)| { (name, member_offsets[index] as usize, glenum_to_uniform_type(member_types[index] as gl::types::GLenum), member_size[index] as usize, None) }); // finally inserting into the blocks list blocks.insert(name, UniformBlock { id: block_id as i32, initial_binding: binding as i32, size: block_size as usize, layout: introspection_output_to_layout(members), }); } blocks } pub unsafe fn reflect_transform_feedback(ctxt: &mut CommandContext<'_>, program: Handle) -> Vec<TransformFeedbackBuffer> { let program = match program { // transform feedback not supported Handle::Handle(_) => return Vec::with_capacity(0), Handle::Id(id) => id }; // transform feedback not supported if !(ctxt.version >= &Version(Api::Gl, 3, 0)) && !ctxt.extensions.gl_ext_transform_feedback { return Vec::with_capacity(0); } // querying the number of varying let num_varyings = { let mut num_varyings: gl::types::GLint = 0; if ctxt.version >= &Version(Api::Gl, 3, 0) { ctxt.gl.GetProgramiv(program, gl::TRANSFORM_FEEDBACK_VARYINGS, &mut num_varyings); } else if ctxt.extensions.gl_ext_transform_feedback { ctxt.gl.GetProgramiv(program, gl::TRANSFORM_FEEDBACK_VARYINGS_EXT, &mut num_varyings); } else { unreachable!(); } num_varyings }; // no need to request other things if there are no varying if num_varyings == 0 { return Vec::with_capacity(0); } // querying "interleaved" or "separate" let buffer_mode = { let mut buffer_mode: gl::types::GLint = 0; if ctxt.version >= &Version(Api::Gl, 3, 0) { ctxt.gl.GetProgramiv(program, gl::TRANSFORM_FEEDBACK_BUFFER_MODE, &mut buffer_mode); } else if ctxt.extensions.gl_ext_transform_feedback { ctxt.gl.GetProgramiv(program, gl::TRANSFORM_FEEDBACK_BUFFER_MODE_EXT, &mut buffer_mode); } else { unreachable!(); } glenum_to_transform_feedback_mode(buffer_mode as gl::types::GLenum) }; // the max length includes the null terminator let mut max_buffer_len: gl::types::GLint = 0; if ctxt.version >= &Version(Api::Gl, 3, 0) { ctxt.gl.GetProgramiv(program, gl::TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH, &mut max_buffer_len); } else if ctxt.extensions.gl_ext_transform_feedback { ctxt.gl.GetProgramiv(program, gl::TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH_EXT, &mut max_buffer_len); } else { unreachable!(); } let mut result = Vec::with_capacity(num_varyings as usize); for index in 0 .. num_varyings as gl::types::GLuint { let mut name_tmp: Vec<u8> = Vec::with_capacity(max_buffer_len as usize); let mut name_tmp_len = max_buffer_len; let mut size = 0; let mut ty = 0; if ctxt.version >= &Version(Api::Gl, 3, 0) { ctxt.gl.GetTransformFeedbackVarying(program, index, name_tmp_len, &mut name_tmp_len, &mut size, &mut ty, name_tmp.as_mut_ptr() as *mut gl::types::GLchar); } else if ctxt.extensions.gl_ext_transform_feedback { ctxt.gl.GetTransformFeedbackVaryingEXT(program, index, name_tmp_len, &mut name_tmp_len, &mut size, &mut ty, name_tmp.as_mut_ptr() as *mut gl::types::GLchar); } else { unreachable!(); } name_tmp.set_len(name_tmp_len as usize); let name = String::from_utf8(name_tmp).unwrap(); if buffer_mode == TransformFeedbackMode::Interleaved { if result.len() == 0 { result.push(TransformFeedbackBuffer { id: 0, elements: vec![], stride: 0, }); } let ty = glenum_to_attribute_type(ty as gl::types::GLenum); let prev_size = result[0].stride; result[0].stride += size as usize * ty.get_size_bytes(); result[0].elements.push(TransformFeedbackVarying { // TODO: handle arrays name, size: size as usize * ty.get_size_bytes(), offset: prev_size, ty, }); } else if buffer_mode == TransformFeedbackMode::Separate { let id = result.len(); let ty = glenum_to_attribute_type(ty as gl::types::GLenum); result.push(TransformFeedbackBuffer { id: id as i32, elements: vec![ TransformFeedbackVarying { name, size: size as usize * ty.get_size_bytes(), offset: 0, ty, } ], stride: size as usize * ty.get_size_bytes(), }); } else { unreachable!(); } } result } /// Obtains the type of data that the geometry shader stage outputs. /// /// # Unsafety /// /// - `program` must be a valid handle to a program. /// - The program **must** contain a geometry shader. pub unsafe fn reflect_geometry_output_type(ctxt: &mut CommandContext<'_>, program: Handle) -> OutputPrimitives { let mut value = 0; match program { Handle::Id(program) => { assert!(ctxt.version >= &Version(Api::Gl, 2, 0) || ctxt.version >= &Version(Api::GlEs, 2, 0)); ctxt.gl.GetProgramiv(program, gl::GEOMETRY_OUTPUT_TYPE, &mut value); }, Handle::Handle(program) => { assert!(ctxt.extensions.gl_arb_vertex_shader); ctxt.gl.GetObjectParameterivARB(program, gl::GEOMETRY_OUTPUT_TYPE, &mut value); } }; match value as gl::types::GLenum { gl::POINTS => OutputPrimitives::Points, gl::LINE_STRIP => OutputPrimitives::Lines, gl::TRIANGLE_STRIP => OutputPrimitives::Triangles, _ => unreachable!() } } /// Obtains the type of data that the tessellation evaluation shader stage outputs. /// /// # Unsafety /// /// - `program` must be a valid handle to a program. /// - The program **must** contain a tessellation evaluation shader. pub unsafe fn reflect_tess_eval_output_type(ctxt: &mut CommandContext<'_>, program: Handle) -> OutputPrimitives { let mut value = 0; match program { Handle::Id(program) => { assert!(ctxt.version >= &Version(Api::Gl, 2, 0) || ctxt.version >= &Version(Api::GlEs, 2, 0)); ctxt.gl.GetProgramiv(program, gl::TESS_GEN_MODE, &mut value); }, Handle::Handle(program) => { assert!(ctxt.extensions.gl_arb_vertex_shader); ctxt.gl.GetObjectParameterivARB(program, gl::TESS_GEN_MODE, &mut value); } }; match value as gl::types::GLenum { gl::TRIANGLES => OutputPrimitives::Triangles, gl::ISOLINES => OutputPrimitives::Lines, gl::QUADS => OutputPrimitives::Quads, _ => unreachable!() } } /// Returns the list of shader storage blocks of a program. pub unsafe fn reflect_shader_storage_blocks(ctxt: &mut CommandContext<'_>, program: Handle) -> HashMap<String, UniformBlock, BuildHasherDefault<FnvHasher>> { if !(ctxt.version >= &Version(Api::Gl, 4, 3) || ctxt.version >= &Version(Api::GlEs, 3, 1) || (ctxt.extensions.gl_arb_program_interface_query && ctxt.extensions.gl_arb_shader_storage_buffer_object)) { // not supported return HashMap::with_hasher(Default::default()); } let program = match program { Handle::Id(program) => program, Handle::Handle(program) => return HashMap::with_hasher(Default::default()) }; // number of active SSBOs let active_blocks = { let mut active_blocks: gl::types::GLint = 0; ctxt.gl.GetProgramInterfaceiv(program, gl::SHADER_STORAGE_BLOCK, gl::ACTIVE_RESOURCES, &mut active_blocks); active_blocks as gl::types::GLuint }; // the result of this function let mut blocks = HashMap::with_hasher(Default::default()); blocks.reserve(active_blocks as usize); for block_id in 0 .. active_blocks { // getting basic infos let (name_len, num_variables, binding, total_size) = { let mut output: [gl::types::GLint; 4] = [0; 4]; ctxt.gl.GetProgramResourceiv(program, gl::SHADER_STORAGE_BLOCK, block_id, 4, [gl::NAME_LENGTH, gl::NUM_ACTIVE_VARIABLES, gl::BUFFER_BINDING, gl::BUFFER_DATA_SIZE].as_ptr(), 4, ptr::null_mut(), output.as_mut_ptr() as *mut _); (output[0] as usize, output[1] as usize, output[2], output[3] as usize) }; // getting the name of the block let name = { let mut name_tmp: Vec<u8> = Vec::with_capacity(1 + name_len); let mut name_tmp_len = name_len as gl::types::GLsizei; ctxt.gl.GetProgramResourceName(program, gl::SHADER_STORAGE_BLOCK, block_id, name_tmp_len, &mut name_tmp_len, name_tmp.as_mut_ptr() as *mut _); name_tmp.set_len(name_tmp_len as usize); String::from_utf8(name_tmp).unwrap() }; // indices of the active variables let active_variables: Vec<gl::types::GLint> = { let mut variables = Vec::with_capacity(num_variables); ctxt.gl.GetProgramResourceiv(program, gl::SHADER_STORAGE_BLOCK, block_id, 1, [gl::ACTIVE_VARIABLES].as_ptr(), num_variables as gl::types::GLsizei, ptr::null_mut(), variables.as_mut_ptr() as *mut _); variables.set_len(num_variables); variables }; // iterator over variables let members = active_variables.into_iter().map(|variable| { let (ty, array_size, offset, _array_stride, name_len, top_level_array_size) = { let mut output: [gl::types::GLint; 6] = [0; 6]; ctxt.gl.GetProgramResourceiv(program, gl::BUFFER_VARIABLE, variable as gl::types::GLuint, 6, [gl::TYPE, gl::ARRAY_SIZE, gl::OFFSET, gl::ARRAY_STRIDE, gl::NAME_LENGTH, gl::TOP_LEVEL_ARRAY_SIZE].as_ptr(), 6, ptr::null_mut(), output.as_mut_ptr() as *mut _); (glenum_to_uniform_type(output[0] as gl::types::GLenum), output[1] as usize, output[2] as usize, output[3] as usize, output[4] as usize, output[5] as usize) }; let name = { let mut name_tmp: Vec<u8> = Vec::with_capacity(1 + name_len); let mut name_tmp_len = name_len as gl::types::GLsizei; ctxt.gl.GetProgramResourceName(program, gl::BUFFER_VARIABLE, variable as gl::types::GLuint, name_tmp_len, &mut name_tmp_len, name_tmp.as_mut_ptr() as *mut _); name_tmp.set_len(name_tmp_len as usize); String::from_utf8(name_tmp).unwrap() }; (name, offset, ty, array_size, Some(top_level_array_size)) }); // finally inserting into the blocks list blocks.insert(name, UniformBlock { id: block_id as i32, initial_binding: binding as i32, size: total_size, layout: introspection_output_to_layout(members), }); } blocks } /// Takes a list of elements produced by OpenGL's introspection API and turns them into /// a `BlockLayout` object. /// /// The iterator must produce a list of `(name, offset, ty, array_size, top_level_array_size)`. /// The `top_level_array_size` can be `None` if unknown. /// /// # Panic /// /// Panic if the input doesn't conform to the OpenGL specs. /// fn introspection_output_to_layout<I>(elements: I) -> BlockLayout where I: Iterator<Item = (String, usize, UniformType, usize, Option<usize>)> { // `output` must be a BlockLayout::Struct, otherwise this function will panic fn process(output: &mut BlockLayout, name: &str, offset: usize, ty: UniformType, array_size: usize, top_level_array_size: Option<usize>) { let mut components = name.splitn(2, '.'); let current_component = components.next().unwrap(); let name_rest = components.next(); // finding the appropriate place in `output` to write the element let member = if let BlockLayout::Struct { ref mut members } = output { // splitting the name and array size let (current_component, array) = if current_component.ends_with(']') { let open_bracket_pos = current_component.rfind('[').unwrap(); let array = current_component[open_bracket_pos + 1 .. current_component.len() - 1] .parse().unwrap(); (&current_component[.. open_bracket_pos], Some(array)) } else { (current_component, None) }; // because of a bug in Rust's borrow checker, we have to loop twice instead of just // call `if let Some() { } else { }` let existing = members.iter_mut().any(|m| m.0 == current_component); if existing { let member = &mut members.iter_mut().find(|m| m.0 == current_component) .unwrap().1; if let Some(array) = array { match member { BlockLayout::Array { ref mut content, ref mut length } => { if *length <= array { *length = array + 1; } &mut **content }, BlockLayout::DynamicSizedArray { ref mut content } => { &mut **content }, _ => unreachable!() } } else { member } } else { // member doesn't exist yet in the output, adding it if let Some(array) = array { if top_level_array_size == Some(0) { members.push((current_component.to_owned(), BlockLayout::DynamicSizedArray { content: Box::new(BlockLayout::Struct { members: Vec::new() }), })); } else { members.push((current_component.to_owned(), BlockLayout::Array { content: Box::new(BlockLayout::Struct { members: Vec::new() }), length: if name_rest.is_some() { array } else { array_size }, })); } match &mut members.last_mut().unwrap().1 { BlockLayout::Array { ref mut content, .. } => &mut **content, BlockLayout::DynamicSizedArray { ref mut content } => &mut **content, _ => unreachable!() } } else { members.push((current_component.to_owned(), BlockLayout::Struct { members: Vec::new() })); &mut members.last_mut().unwrap().1 } } } else { unreachable!(); }; // now adding either the other elements or the final element itself if let Some(name_rest) = name_rest { process(member, name_rest, offset, ty, array_size, None); } else { // don't write over the offset in buffer match *member { BlockLayout::BasicType { ty: ty_ex, .. } if ty_ex == ty => (), _ => { *member = BlockLayout::BasicType { offset_in_buffer: offset, ty, }; } } } } // ↓ actual body of `introspection_output_to_layout` starts here ↓ let mut layout = BlockLayout::Struct { members: Vec::new() }; for (name, offset, ty, array_size, top_level_array_size) in elements { process(&mut layout, &name, offset, ty, array_size, top_level_array_size); } layout } #[inline] fn glenum_to_uniform_type(ty: gl::types::GLenum) -> UniformType { match ty { gl::FLOAT => UniformType::Float, gl::FLOAT_VEC2 => UniformType::FloatVec2, gl::FLOAT_VEC3 => UniformType::FloatVec3, gl::FLOAT_VEC4 => UniformType::FloatVec4, gl::DOUBLE => UniformType::Double, gl::DOUBLE_VEC2 => UniformType::DoubleVec2, gl::DOUBLE_VEC3 => UniformType::DoubleVec3, gl::DOUBLE_VEC4 => UniformType::DoubleVec4, gl::INT => UniformType::Int, gl::INT_VEC2 => UniformType::IntVec2, gl::INT_VEC3 => UniformType::IntVec3, gl::INT_VEC4 => UniformType::IntVec4, gl::UNSIGNED_INT => UniformType::UnsignedInt, gl::UNSIGNED_INT_VEC2 => UniformType::UnsignedIntVec2, gl::UNSIGNED_INT_VEC3 => UniformType::UnsignedIntVec3, gl::UNSIGNED_INT_VEC4 => UniformType::UnsignedIntVec4, gl::BOOL => UniformType::Bool, gl::BOOL_VEC2 => UniformType::BoolVec2, gl::BOOL_VEC3 => UniformType::BoolVec3, gl::BOOL_VEC4 => UniformType::BoolVec4, gl::FLOAT_MAT2 => UniformType::FloatMat2, gl::FLOAT_MAT3 => UniformType::FloatMat3, gl::FLOAT_MAT4 => UniformType::FloatMat4, gl::FLOAT_MAT2x3 => UniformType::FloatMat2x3, gl::FLOAT_MAT2x4 => UniformType::FloatMat2x4, gl::FLOAT_MAT3x2 => UniformType::FloatMat3x2, gl::FLOAT_MAT3x4 => UniformType::FloatMat3x4, gl::FLOAT_MAT4x2 => UniformType::FloatMat4x2, gl::FLOAT_MAT4x3 => UniformType::FloatMat4x3, gl::DOUBLE_MAT2 => UniformType::DoubleMat2, gl::DOUBLE_MAT3 => UniformType::DoubleMat3, gl::DOUBLE_MAT4 => UniformType::DoubleMat4, gl::DOUBLE_MAT2x3 => UniformType::DoubleMat2x3, gl::DOUBLE_MAT2x4 => UniformType::DoubleMat2x4, gl::DOUBLE_MAT3x2 => UniformType::DoubleMat3x2, gl::DOUBLE_MAT3x4 => UniformType::DoubleMat3x4, gl::DOUBLE_MAT4x2 => UniformType::DoubleMat4x2, gl::DOUBLE_MAT4x3 => UniformType::DoubleMat4x3, gl::SAMPLER_1D => UniformType::Sampler1d, gl::SAMPLER_2D => UniformType::Sampler2d, gl::SAMPLER_3D => UniformType::Sampler3d, gl::SAMPLER_CUBE => UniformType::SamplerCube, gl::SAMPLER_1D_SHADOW => UniformType::Sampler1dShadow, gl::SAMPLER_2D_SHADOW => UniformType::Sampler2dShadow, gl::SAMPLER_1D_ARRAY => UniformType::Sampler1dArray, gl::SAMPLER_2D_ARRAY => UniformType::Sampler2dArray, gl::SAMPLER_CUBE_MAP_ARRAY => UniformType::SamplerCubeArray, gl::SAMPLER_1D_ARRAY_SHADOW => UniformType::Sampler1dArrayShadow, gl::SAMPLER_2D_ARRAY_SHADOW => UniformType::Sampler2dArrayShadow, gl::SAMPLER_2D_MULTISAMPLE => UniformType::Sampler2dMultisample, gl::SAMPLER_2D_MULTISAMPLE_ARRAY => UniformType::Sampler2dMultisampleArray, gl::SAMPLER_CUBE_SHADOW => UniformType::SamplerCubeShadow, gl::SAMPLER_BUFFER => UniformType::SamplerBuffer, gl::SAMPLER_2D_RECT => UniformType::Sampler2dRect, gl::SAMPLER_2D_RECT_SHADOW => UniformType::Sampler2dRectShadow, gl::INT_SAMPLER_1D => UniformType::ISampler1d, gl::INT_SAMPLER_2D => UniformType::ISampler2d, gl::INT_SAMPLER_3D => UniformType::ISampler3d, gl::INT_SAMPLER_CUBE => UniformType::ISamplerCube, gl::INT_SAMPLER_1D_ARRAY => UniformType::ISampler1dArray, gl::INT_SAMPLER_2D_ARRAY => UniformType::ISampler2dArray, gl::INT_SAMPLER_CUBE_MAP_ARRAY => UniformType::ISamplerCubeArray, gl::INT_SAMPLER_2D_MULTISAMPLE => UniformType::ISampler2dMultisample, gl::INT_SAMPLER_2D_MULTISAMPLE_ARRAY => UniformType::ISampler2dMultisampleArray, gl::INT_SAMPLER_BUFFER => UniformType::ISamplerBuffer, gl::INT_SAMPLER_2D_RECT => UniformType::ISampler2dRect, gl::UNSIGNED_INT_SAMPLER_1D => UniformType::USampler1d, gl::UNSIGNED_INT_SAMPLER_2D => UniformType::USampler2d, gl::UNSIGNED_INT_SAMPLER_3D => UniformType::USampler3d, gl::UNSIGNED_INT_SAMPLER_CUBE => UniformType::USamplerCube, gl::UNSIGNED_INT_SAMPLER_1D_ARRAY => UniformType::USampler2dArray, gl::UNSIGNED_INT_SAMPLER_2D_ARRAY => UniformType::USampler2dArray, gl::UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY => UniformType::USamplerCubeArray, gl::UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE => UniformType::USampler2dMultisample, gl::UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY => UniformType::USampler2dMultisampleArray, gl::UNSIGNED_INT_SAMPLER_BUFFER => UniformType::USamplerBuffer, gl::UNSIGNED_INT_SAMPLER_2D_RECT => UniformType::USampler2dRect, gl::IMAGE_1D => UniformType::Image1d, gl::IMAGE_2D => UniformType::Image2d, gl::IMAGE_3D => UniformType::Image3d, gl::IMAGE_2D_RECT => UniformType::Image2dRect, gl::IMAGE_CUBE => UniformType::ImageCube, gl::IMAGE_BUFFER => UniformType::ImageBuffer, gl::IMAGE_1D_ARRAY => UniformType::Image1dArray, gl::IMAGE_2D_ARRAY => UniformType::Image2dArray, gl::IMAGE_2D_MULTISAMPLE => UniformType::Image2dMultisample, gl::IMAGE_2D_MULTISAMPLE_ARRAY => UniformType::Image2dMultisampleArray, gl::INT_IMAGE_1D => UniformType::IImage1d, gl::INT_IMAGE_2D => UniformType::IImage2d, gl::INT_IMAGE_3D => UniformType::IImage3d, gl::INT_IMAGE_2D_RECT => UniformType::IImage2dRect, gl::INT_IMAGE_CUBE => UniformType::IImageCube, gl::INT_IMAGE_BUFFER => UniformType::IImageBuffer, gl::INT_IMAGE_1D_ARRAY => UniformType::IImage1dArray, gl::INT_IMAGE_2D_ARRAY => UniformType::IImage2dArray, gl::INT_IMAGE_2D_MULTISAMPLE => UniformType::IImage2dMultisample, gl::INT_IMAGE_2D_MULTISAMPLE_ARRAY => UniformType::IImage2dMultisampleArray, gl::UNSIGNED_INT_IMAGE_1D => UniformType::UImage1d, gl::UNSIGNED_INT_IMAGE_2D => UniformType::UImage2d, gl::UNSIGNED_INT_IMAGE_3D => UniformType::UImage3d, gl::UNSIGNED_INT_IMAGE_2D_RECT => UniformType::UImage2dRect, gl::UNSIGNED_INT_IMAGE_CUBE => UniformType::UImageCube, gl::UNSIGNED_INT_IMAGE_BUFFER => UniformType::UImageBuffer, gl::UNSIGNED_INT_IMAGE_1D_ARRAY => UniformType::UImage1dArray, gl::UNSIGNED_INT_IMAGE_2D_ARRAY => UniformType::UImage2dArray, gl::UNSIGNED_INT_IMAGE_2D_MULTISAMPLE => UniformType::UImage2dMultisample, gl::UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY => UniformType::UImage2dMultisampleArray, gl::UNSIGNED_INT_ATOMIC_COUNTER => UniformType::AtomicCounterUint, v => panic!("Unknown value returned by OpenGL uniform type: {}", v) } } #[inline] fn glenum_to_attribute_type(value: gl::types::GLenum) -> AttributeType { match value { gl::FLOAT => AttributeType::F32, gl::FLOAT_VEC2 => AttributeType::F32F32, gl::FLOAT_VEC3 => AttributeType::F32F32F32, gl::FLOAT_VEC4 => AttributeType::F32F32F32F32, gl::INT => AttributeType::I32, gl::INT_VEC2 => AttributeType::I32I32, gl::INT_VEC3 => AttributeType::I32I32I32, gl::INT_VEC4 => AttributeType::I32I32I32I32, gl::UNSIGNED_INT => AttributeType::U32, gl::UNSIGNED_INT_VEC2 => AttributeType::U32U32, //gl::UNSIGNED_INT_VEC2_EXT => AttributeType::U32U32, gl::UNSIGNED_INT_VEC3 => AttributeType::U32U32U32, //gl::UNSIGNED_INT_VEC3_EXT => AttributeType::U32U32U32, gl::UNSIGNED_INT_VEC4 => AttributeType::U32U32U32U32, //gl::UNSIGNED_INT_VEC4_EXT => AttributeType::U32U32U32U32, gl::FLOAT_MAT2 => AttributeType::F32x2x2, gl::FLOAT_MAT3 => AttributeType::F32x3x3, gl::FLOAT_MAT4 => AttributeType::F32x4x4, gl::FLOAT_MAT2x3 => AttributeType::F32x2x3, gl::FLOAT_MAT2x4 => AttributeType::F32x2x4, gl::FLOAT_MAT3x2 => AttributeType::F32x3x2, gl::FLOAT_MAT3x4 => AttributeType::F32x3x4, gl::FLOAT_MAT4x2 => AttributeType::F32x4x2, gl::FLOAT_MAT4x3 => AttributeType::F32x4x3, gl::DOUBLE => AttributeType::F64, gl::DOUBLE_VEC2 => AttributeType::F64F64, gl::DOUBLE_VEC3 => AttributeType::F64F64F64, gl::DOUBLE_VEC4 => AttributeType::F64F64F64F64, v => panic!("Unknown value returned by OpenGL attribute type: {}", v) } } #[inline] fn glenum_to_transform_feedback_mode(value: gl::types::GLenum) -> TransformFeedbackMode { match value { gl::INTERLEAVED_ATTRIBS/* | gl::INTERLEAVED_ATTRIBS_EXT*/ => { TransformFeedbackMode::Interleaved }, gl::SEPARATE_ATTRIBS/* | gl::SEPARATE_ATTRIBS_EXT*/ => { TransformFeedbackMode::Separate }, v => panic!("Unknown value returned by OpenGL varying mode: {}", v) } } /// Contains all subroutine data of a program. #[derive(Debug, Clone)] pub struct SubroutineData { /// Number of subroutine uniform locations per shader stage. /// This is *not* equal to the number of subroutine uniforms per stage, /// because users can use `#layout(location=...)`. pub location_counts: HashMap<ShaderStage, usize, BuildHasherDefault<FnvHasher>>, /// The list of all subroutine uniforms of the program stored in a structured way to enable fast lookups. /// A subroutine uniform is uniquely defined by a name and a shader stage. pub subroutine_uniforms: HashMap<(String, ShaderStage), SubroutineUniform, BuildHasherDefault<FnvHasher>>, } /// Information about a Subroutine Uniform (except name) #[derive(Debug, Clone)] pub struct SubroutineUniform { /// The index of the subroutine uniform. /// Needed to query information from the OpenGL backend. pub index: u32, /// The location of the uniform. /// This is used to bind subroutines to this subroutine uniform. pub location: i32, /// If the uniform is an array, the size of the array. pub size: Option<usize>, /// A list of subroutines that can potentially be used with this uniform. pub compatible_subroutines: Vec<Subroutine>, } /// Information about a subroutine. #[derive(Debug, Clone)] pub struct Subroutine { /// The index of the subroutine, needed to bind this to a subroutine uniform. pub index: u32, /// The name of the subroutine. pub name: String, } /// The different stages of the program pipeline. #[allow(missing_docs)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub enum ShaderStage { Vertex, Fragment, TessellationControl, TessellationEvaluation, Geometry, // FIXME? According to https://www.opengl.org/sdk/docs/man/html/glUniformSubroutines.xhtml , // compute shaders are not supported. // Compute, } impl ShaderStage { /// Converts the `ShaderStage` to its GLenum equivalent pub fn to_gl_enum(&self) -> gl::types::GLenum { match *self { ShaderStage::Vertex => gl::VERTEX_SHADER, ShaderStage::Fragment => gl::FRAGMENT_SHADER, ShaderStage::TessellationControl => gl::TESS_CONTROL_SHADER, ShaderStage::TessellationEvaluation => gl::TESS_EVALUATION_SHADER, ShaderStage::Geometry => gl::GEOMETRY_SHADER, // Compute => gl::COMPUTE_SHADER, } } } fn get_shader_stages(has_geometry_shader: bool, has_tessellation_control_shader: bool, has_tessellation_evaluation_shader: bool) -> Vec<ShaderStage> { let mut stages = vec![ShaderStage::Vertex, ShaderStage::Fragment]; if has_tessellation_evaluation_shader { stages.push(ShaderStage::TessellationEvaluation); } if has_tessellation_control_shader { stages.push(ShaderStage::TessellationControl); } if has_geometry_shader { stages.push(ShaderStage::Geometry); } stages } /// Returns the data associated with a programs subroutines. pub unsafe fn reflect_subroutine_data(ctxt: &mut CommandContext<'_>, program: Handle, has_geometry_shader: bool, has_tessellation_control_shader: bool, has_tessellation_evaluation_shader: bool) -> SubroutineData { if !program::is_subroutine_supported(ctxt) { return SubroutineData { location_counts: HashMap::with_hasher(Default::default()), subroutine_uniforms: HashMap::with_hasher(Default::default()), } } let program = match program { // subroutines not supported. Handle::Handle(_) => return SubroutineData { location_counts: HashMap::with_hasher(Default::default()), subroutine_uniforms: HashMap::with_hasher(Default::default()), }, Handle::Id(id) => id }; let shader_stages = get_shader_stages(has_geometry_shader, has_tessellation_control_shader, has_tessellation_evaluation_shader); let mut subroutine_uniforms = HashMap::with_hasher(Default::default()); let mut location_counts = HashMap::with_hasher(Default::default()); for stage in shader_stages.iter() { let mut location_count: gl::types::GLint = 0; ctxt.gl.GetProgramStageiv(program, stage.to_gl_enum(), gl::ACTIVE_SUBROUTINE_UNIFORM_LOCATIONS, &mut location_count); location_counts.insert(*stage, location_count as usize); let mut subroutine_count: gl::types::GLint = 0; ctxt.gl.GetProgramStageiv(program, stage.to_gl_enum(), gl::ACTIVE_SUBROUTINE_UNIFORMS, &mut subroutine_count); for i in 0..subroutine_count { // Get the name of the uniform let mut uniform_name_tmp: Vec<u8> = vec![0; 64]; let mut name_len: gl::types::GLsizei = 0; ctxt.gl.GetActiveSubroutineUniformName(program, stage.to_gl_enum(), i as gl::types::GLuint, (uniform_name_tmp.len() - 1) as gl::types::GLint, &mut name_len, uniform_name_tmp.as_mut_ptr() as *mut gl::types::GLchar); let location = ctxt.gl.GetSubroutineUniformLocation(program, stage.to_gl_enum(), uniform_name_tmp.as_ptr() as *const gl::types::GLchar); uniform_name_tmp.set_len(name_len as usize); let uniform_name = String::from_utf8(uniform_name_tmp).unwrap(); let mut size: gl::types::GLint = 0; ctxt.gl.GetActiveSubroutineUniformiv(program, stage.to_gl_enum(), i as u32, gl::UNIFORM_SIZE, &mut size); let size = if size == 1 { None } else { Some(size as usize) }; // Get the number of compatible subroutines. let mut compatible_count: gl::types::GLint = 0; ctxt.gl.GetActiveSubroutineUniformiv(program, stage.to_gl_enum(), i as u32, gl::NUM_COMPATIBLE_SUBROUTINES, &mut compatible_count); // Get the indices of compatible subroutines. let mut compatible_sr_indices: Vec<gl::types::GLuint> = Vec::with_capacity(compatible_count as usize); ctxt.gl.GetActiveSubroutineUniformiv(program, stage.to_gl_enum(), i as gl::types::GLuint, gl::COMPATIBLE_SUBROUTINES, compatible_sr_indices.as_mut_ptr() as *mut gl::types::GLint); compatible_sr_indices.set_len(compatible_count as usize); let mut compatible_subroutines: Vec<Subroutine> = Vec::new(); for j in 0..compatible_count { // Get the names of compatible subroutines. let mut subroutine_name_tmp: Vec<u8> = vec![0; 64]; let mut name_len: gl::types::GLsizei = 0; ctxt.gl.GetActiveSubroutineName(program, stage.to_gl_enum(), compatible_sr_indices[j as usize], subroutine_name_tmp.len() as gl::types::GLint, &mut name_len, subroutine_name_tmp.as_mut_ptr() as *mut gl::types::GLchar); subroutine_name_tmp.set_len(name_len as usize); let subroutine_name = String::from_utf8(subroutine_name_tmp).unwrap(); compatible_subroutines.push( Subroutine { index: compatible_sr_indices[j as usize], name: subroutine_name, } ); } let subroutine_uniform = SubroutineUniform { index: i as u32, location, size, compatible_subroutines, }; subroutine_uniforms.insert((uniform_name, *stage), subroutine_uniform); } } SubroutineData { location_counts, subroutine_uniforms } }
42.782246
153
0.571479
c1f471788957d54c178e16fd1ecfc9e84168c630
11,640
use crate::common::{BinarySerializable, CountingWriter, FixedSize, VInt}; use crate::directory::error::Incompatibility; use crate::directory::read_only_source::ReadOnlySource; use crate::directory::{AntiCallToken, TerminatingWrite}; use crate::Version; use byteorder::{ByteOrder, LittleEndian, WriteBytesExt}; use crc32fast::Hasher; use std::io; use std::io::Write; type CrcHashU32 = u32; #[derive(Debug, Clone, PartialEq)] pub struct Footer { pub version: Version, pub meta: String, pub versioned_footer: VersionedFooter, } /// Serialises the footer to a byte-array /// - versioned_footer_len : 4 bytes ///- versioned_footer: variable bytes /// - meta_len: 4 bytes /// - meta: variable bytes /// - version_len: 4 bytes /// - version json: variable bytes impl BinarySerializable for Footer { fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> { BinarySerializable::serialize(&self.versioned_footer, writer)?; BinarySerializable::serialize(&self.meta, writer)?; let version_string = serde_json::to_string(&self.version).map_err(|_err| io::ErrorKind::InvalidInput)?; BinarySerializable::serialize(&version_string, writer)?; Ok(()) } fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> { let versioned_footer = VersionedFooter::deserialize(reader)?; let meta = String::deserialize(reader)?; let version_json = String::deserialize(reader)?; let version = serde_json::from_str(&version_json)?; Ok(Footer { version, meta, versioned_footer, }) } } impl Footer { pub fn new(versioned_footer: VersionedFooter) -> Self { let version = crate::VERSION.clone(); let meta = version.to_string(); Footer { version, meta, versioned_footer, } } pub fn append_footer<W: io::Write>(&self, mut write: &mut W) -> io::Result<()> { let mut counting_write = CountingWriter::wrap(&mut write); self.serialize(&mut counting_write)?; let written_len = counting_write.written_bytes(); write.write_u32::<LittleEndian>(written_len as u32)?; Ok(()) } pub fn extract_footer(source: ReadOnlySource) -> Result<(Footer, ReadOnlySource), io::Error> { if source.len() < 4 { return Err(io::Error::new( io::ErrorKind::UnexpectedEof, format!( "File corrupted. The file is smaller than 4 bytes (len={}).", source.len() ), )); } let (body_footer, footer_len_bytes) = source.split_from_end(u32::SIZE_IN_BYTES); let footer_len = LittleEndian::read_u32(footer_len_bytes.as_slice()) as usize; let body_len = body_footer.len() - footer_len; let (body, footer_data) = body_footer.split(body_len); let mut cursor = footer_data.as_slice(); let footer = Footer::deserialize(&mut cursor)?; Ok((footer, body)) } /// Confirms that the index will be read correctly by this version of tantivy /// Has to be called after `extract_footer` to make sure it's not accessing uninitialised memory pub fn is_compatible(&self) -> Result<(), Incompatibility> { let library_version = crate::version(); match &self.versioned_footer { VersionedFooter::V1 { crc32: _crc, store_compression: compression, } => { if &library_version.store_compression != compression { return Err(Incompatibility::CompressionMismatch { library_compression_format: library_version.store_compression.to_string(), index_compression_format: compression.to_string(), }); } Ok(()) } VersionedFooter::UnknownVersion => Err(Incompatibility::IndexMismatch { library_version: library_version.clone(), index_version: self.version.clone(), }), } } } /// Footer that includes a crc32 hash that enables us to checksum files in the index #[derive(Debug, Clone, PartialEq)] pub enum VersionedFooter { UnknownVersion, V1 { crc32: CrcHashU32, store_compression: String, }, } impl BinarySerializable for VersionedFooter { fn serialize<W: io::Write>(&self, writer: &mut W) -> io::Result<()> { let mut buf = Vec::new(); match self { VersionedFooter::V1 { crc32, store_compression: compression, } => { // Serializes a valid `VersionedFooter` or panics if the version is unknown // [ version | crc_hash | compression_mode ] // [ 0..4 | 4..8 | variable ] BinarySerializable::serialize(&1u32, &mut buf)?; BinarySerializable::serialize(crc32, &mut buf)?; BinarySerializable::serialize(compression, &mut buf)?; } VersionedFooter::UnknownVersion => { return Err(io::Error::new( io::ErrorKind::InvalidInput, "Cannot serialize an unknown versioned footer ", )); } } BinarySerializable::serialize(&VInt(buf.len() as u64), writer)?; writer.write_all(&buf[..])?; Ok(()) } fn deserialize<R: io::Read>(reader: &mut R) -> io::Result<Self> { let len = VInt::deserialize(reader)?.0 as usize; let mut buf = vec![0u8; len]; reader.read_exact(&mut buf[..])?; let mut cursor = &buf[..]; let version = u32::deserialize(&mut cursor)?; if version == 1 { let crc32 = u32::deserialize(&mut cursor)?; let compression = String::deserialize(&mut cursor)?; Ok(VersionedFooter::V1 { crc32, store_compression: compression, }) } else { Ok(VersionedFooter::UnknownVersion) } } } impl VersionedFooter { pub fn crc(&self) -> Option<CrcHashU32> { match self { VersionedFooter::V1 { crc32, .. } => Some(*crc32), VersionedFooter::UnknownVersion { .. } => None, } } } pub(crate) struct FooterProxy<W: TerminatingWrite> { /// always Some except after terminate call hasher: Option<Hasher>, /// always Some except after terminate call writer: Option<W>, } impl<W: TerminatingWrite> FooterProxy<W> { pub fn new(writer: W) -> Self { FooterProxy { hasher: Some(Hasher::new()), writer: Some(writer), } } } impl<W: TerminatingWrite> Write for FooterProxy<W> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { let count = self.writer.as_mut().unwrap().write(buf)?; self.hasher.as_mut().unwrap().update(&buf[..count]); Ok(count) } fn flush(&mut self) -> io::Result<()> { self.writer.as_mut().unwrap().flush() } } impl<W: TerminatingWrite> TerminatingWrite for FooterProxy<W> { fn terminate_ref(&mut self, _: AntiCallToken) -> io::Result<()> { let crc32 = self.hasher.take().unwrap().finalize(); let footer = Footer::new(VersionedFooter::V1 { crc32, store_compression: crate::store::COMPRESSION.to_string(), }); let mut writer = self.writer.take().unwrap(); footer.append_footer(&mut writer)?; writer.terminate() } } #[cfg(test)] mod tests { use super::CrcHashU32; use super::FooterProxy; use crate::common::BinarySerializable; use crate::directory::footer::{Footer, VersionedFooter}; use crate::directory::TerminatingWrite; use byteorder::{ByteOrder, LittleEndian}; use regex::Regex; #[test] fn test_versioned_footer() { let mut vec = Vec::new(); let footer_proxy = FooterProxy::new(&mut vec); assert!(footer_proxy.terminate().is_ok()); assert_eq!(vec.len(), 167); let footer = Footer::deserialize(&mut &vec[..]).unwrap(); if let VersionedFooter::V1 { crc32: _, store_compression, } = footer.versioned_footer { assert_eq!(store_compression, crate::store::COMPRESSION); } else { panic!("Versioned footer should be V1."); } assert_eq!(&footer.version, crate::version()); } #[test] fn test_serialize_deserialize_footer() { let mut buffer = Vec::new(); let crc32 = 123456u32; let footer: Footer = Footer::new(VersionedFooter::V1 { crc32, store_compression: "lz4".to_string(), }); footer.serialize(&mut buffer).unwrap(); let footer_deser = Footer::deserialize(&mut &buffer[..]).unwrap(); assert_eq!(footer_deser, footer); } #[test] fn footer_length() { let crc32 = 1111111u32; let versioned_footer = VersionedFooter::V1 { crc32, store_compression: "lz4".to_string(), }; let mut buf = Vec::new(); versioned_footer.serialize(&mut buf).unwrap(); assert_eq!(buf.len(), 13); let footer = Footer::new(versioned_footer); let regex_ptn = Regex::new( "tantivy v[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.{0,10}, index_format v[0-9]{1,5}", ) .unwrap(); assert!(regex_ptn.is_match(&footer.meta)); } #[test] fn versioned_footer_from_bytes() { let v_footer_bytes = vec![ // versionned footer length 12 | 128, // index format version 1, 0, 0, 0, // crc 32 12, 35, 89, 18, // compression format 3 | 128, b'l', b'z', b'4', ]; let mut cursor = &v_footer_bytes[..]; let versioned_footer = VersionedFooter::deserialize(&mut cursor).unwrap(); assert!(cursor.is_empty()); let expected_crc: u32 = LittleEndian::read_u32(&v_footer_bytes[5..9]) as CrcHashU32; let expected_versioned_footer: VersionedFooter = VersionedFooter::V1 { crc32: expected_crc, store_compression: "lz4".to_string(), }; assert_eq!(versioned_footer, expected_versioned_footer); let mut buffer = Vec::new(); assert!(versioned_footer.serialize(&mut buffer).is_ok()); assert_eq!(&v_footer_bytes[..], &buffer[..]); } #[test] fn versioned_footer_panic() { let v_footer_bytes = vec![6u8 | 128u8, 3u8, 0u8, 0u8, 1u8, 0u8, 0u8]; let mut b = &v_footer_bytes[..]; let versioned_footer = VersionedFooter::deserialize(&mut b).unwrap(); assert!(b.is_empty()); let expected_versioned_footer = VersionedFooter::UnknownVersion; assert_eq!(versioned_footer, expected_versioned_footer); let mut buf = Vec::new(); assert!(versioned_footer.serialize(&mut buf).is_err()); } #[test] #[cfg(not(feature = "lz4"))] fn compression_mismatch() { let crc32 = 1111111u32; let versioned_footer = VersionedFooter::V1 { crc32, store_compression: "lz4".to_string(), }; let footer = Footer::new(versioned_footer); let res = footer.is_compatible(); assert!(res.is_err()); } }
34.235294
100
0.571564
d66f068f357d1bbe8f1cb2bd6798bc1c2ee3d6b6
14,552
//! Conversion methods for dates and times. use chrono::{ format::{parse, Parsed, StrftimeItems}, Datelike, FixedOffset, NaiveDate, NaiveDateTime, NaiveTime, }; use crate::error::Result; use crate::{ array::{Offset, PrimitiveArray, Utf8Array}, error::ArrowError, }; use crate::{ datatypes::{DataType, TimeUnit}, types::months_days_ns, }; /// Number of seconds in a day pub const SECONDS_IN_DAY: i64 = 86_400; /// Number of milliseconds in a second pub const MILLISECONDS: i64 = 1_000; /// Number of microseconds in a second pub const MICROSECONDS: i64 = 1_000_000; /// Number of nanoseconds in a second pub const NANOSECONDS: i64 = 1_000_000_000; /// Number of milliseconds in a day pub const MILLISECONDS_IN_DAY: i64 = SECONDS_IN_DAY * MILLISECONDS; /// Number of days between 0001-01-01 and 1970-01-01 pub const EPOCH_DAYS_FROM_CE: i32 = 719_163; /// converts a `i32` representing a `date32` to [`NaiveDateTime`] #[inline] pub fn date32_to_datetime(v: i32) -> NaiveDateTime { NaiveDateTime::from_timestamp(v as i64 * SECONDS_IN_DAY, 0) } /// converts a `i32` representing a `date32` to [`NaiveDate`] #[inline] pub fn date32_to_date(days: i32) -> NaiveDate { NaiveDate::from_num_days_from_ce(EPOCH_DAYS_FROM_CE + days) } /// converts a `i64` representing a `date64` to [`NaiveDateTime`] #[inline] pub fn date64_to_datetime(v: i64) -> NaiveDateTime { NaiveDateTime::from_timestamp( // extract seconds from milliseconds v / MILLISECONDS, // discard extracted seconds and convert milliseconds to nanoseconds (v % MILLISECONDS * MICROSECONDS) as u32, ) } /// converts a `i64` representing a `date64` to [`NaiveDate`] #[inline] pub fn date64_to_date(milliseconds: i64) -> NaiveDate { date64_to_datetime(milliseconds).date() } /// converts a `i32` representing a `time32(s)` to [`NaiveDateTime`] #[inline] pub fn time32s_to_time(v: i32) -> NaiveTime { NaiveTime::from_num_seconds_from_midnight(v as u32, 0) } /// converts a `i32` representing a `time32(ms)` to [`NaiveDateTime`] #[inline] pub fn time32ms_to_time(v: i32) -> NaiveTime { let v = v as i64; NaiveTime::from_num_seconds_from_midnight( // extract seconds from milliseconds (v / MILLISECONDS) as u32, // discard extracted seconds and convert milliseconds to // nanoseconds (v % MILLISECONDS * MICROSECONDS) as u32, ) } /// converts a `i64` representing a `time64(us)` to [`NaiveDateTime`] #[inline] pub fn time64us_to_time(v: i64) -> NaiveTime { NaiveTime::from_num_seconds_from_midnight( // extract seconds from microseconds (v / MICROSECONDS) as u32, // discard extracted seconds and convert microseconds to // nanoseconds (v % MICROSECONDS * MILLISECONDS) as u32, ) } /// converts a `i64` representing a `time64(ns)` to [`NaiveDateTime`] #[inline] pub fn time64ns_to_time(v: i64) -> NaiveTime { NaiveTime::from_num_seconds_from_midnight( // extract seconds from nanoseconds (v / NANOSECONDS) as u32, // discard extracted seconds (v % NANOSECONDS) as u32, ) } /// converts a `i64` representing a `timestamp(s)` to [`NaiveDateTime`] #[inline] pub fn timestamp_s_to_datetime(seconds: i64) -> NaiveDateTime { NaiveDateTime::from_timestamp(seconds, 0) } /// converts a `i64` representing a `timestamp(ms)` to [`NaiveDateTime`] #[inline] pub fn timestamp_ms_to_datetime(v: i64) -> NaiveDateTime { NaiveDateTime::from_timestamp( // extract seconds from milliseconds v / MILLISECONDS, // discard extracted seconds and convert milliseconds to nanoseconds (v % MILLISECONDS * MICROSECONDS) as u32, ) } /// converts a `i64` representing a `timestamp(us)` to [`NaiveDateTime`] #[inline] pub fn timestamp_us_to_datetime(v: i64) -> NaiveDateTime { NaiveDateTime::from_timestamp( // extract seconds from microseconds v / MICROSECONDS, // discard extracted seconds and convert microseconds to nanoseconds (v % MICROSECONDS * MILLISECONDS) as u32, ) } /// converts a `i64` representing a `timestamp(ns)` to [`NaiveDateTime`] #[inline] pub fn timestamp_ns_to_datetime(v: i64) -> NaiveDateTime { NaiveDateTime::from_timestamp( // extract seconds from nanoseconds v / NANOSECONDS, // discard extracted seconds (v % NANOSECONDS) as u32, ) } /// Converts a timestamp in `time_unit` and `timezone` into [`chrono::DateTime`]. #[inline] pub fn timestamp_to_naive_datetime(timestamp: i64, time_unit: TimeUnit) -> chrono::NaiveDateTime { match time_unit { TimeUnit::Second => timestamp_s_to_datetime(timestamp), TimeUnit::Millisecond => timestamp_ms_to_datetime(timestamp), TimeUnit::Microsecond => timestamp_us_to_datetime(timestamp), TimeUnit::Nanosecond => timestamp_ns_to_datetime(timestamp), } } /// Converts a timestamp in `time_unit` and `timezone` into [`chrono::DateTime`]. #[inline] pub fn timestamp_to_datetime<T: chrono::TimeZone>( timestamp: i64, time_unit: TimeUnit, timezone: &T, ) -> chrono::DateTime<T> { timezone.from_utc_datetime(&timestamp_to_naive_datetime(timestamp, time_unit)) } /// Calculates the scale factor between two TimeUnits. The function returns the /// scale that should multiply the TimeUnit "b" to have the same time scale as /// the TimeUnit "a". pub fn timeunit_scale(a: TimeUnit, b: TimeUnit) -> f64 { match (a, b) { (TimeUnit::Second, TimeUnit::Second) => 1.0, (TimeUnit::Second, TimeUnit::Millisecond) => 0.001, (TimeUnit::Second, TimeUnit::Microsecond) => 0.000_001, (TimeUnit::Second, TimeUnit::Nanosecond) => 0.000_000_001, (TimeUnit::Millisecond, TimeUnit::Second) => 1_000.0, (TimeUnit::Millisecond, TimeUnit::Millisecond) => 1.0, (TimeUnit::Millisecond, TimeUnit::Microsecond) => 0.001, (TimeUnit::Millisecond, TimeUnit::Nanosecond) => 0.000_001, (TimeUnit::Microsecond, TimeUnit::Second) => 1_000_000.0, (TimeUnit::Microsecond, TimeUnit::Millisecond) => 1_000.0, (TimeUnit::Microsecond, TimeUnit::Microsecond) => 1.0, (TimeUnit::Microsecond, TimeUnit::Nanosecond) => 0.001, (TimeUnit::Nanosecond, TimeUnit::Second) => 1_000_000_000.0, (TimeUnit::Nanosecond, TimeUnit::Millisecond) => 1_000_000.0, (TimeUnit::Nanosecond, TimeUnit::Microsecond) => 1_000.0, (TimeUnit::Nanosecond, TimeUnit::Nanosecond) => 1.0, } } /// Parses an offset of the form `"+WX:YZ"` or `"UTC"` into [`FixedOffset`]. /// # Errors /// If the offset is not in any of the allowed forms. pub fn parse_offset(offset: &str) -> Result<FixedOffset> { if offset == "UTC" { return Ok(FixedOffset::east(0)); } let error = "timezone offset must be of the form [-]00:00"; let mut a = offset.split(':'); let first = a .next() .map(Ok) .unwrap_or_else(|| Err(ArrowError::InvalidArgumentError(error.to_string())))?; let last = a .next() .map(Ok) .unwrap_or_else(|| Err(ArrowError::InvalidArgumentError(error.to_string())))?; let hours: i32 = first .parse() .map_err(|_| ArrowError::InvalidArgumentError(error.to_string()))?; let minutes: i32 = last .parse() .map_err(|_| ArrowError::InvalidArgumentError(error.to_string()))?; Ok(FixedOffset::east(hours * 60 * 60 + minutes * 60)) } /// Parses `value` to `Option<i64>` consistent with the Arrow's definition of timestamp with timezone. /// `tz` must be built from `timezone` (either via [`parse_offset`] or `chrono-tz`). #[inline] pub fn utf8_to_timestamp_ns_scalar<T: chrono::TimeZone>( value: &str, fmt: &str, tz: &T, ) -> Option<i64> { let mut parsed = Parsed::new(); let fmt = StrftimeItems::new(fmt); let r = parse(&mut parsed, value, fmt).ok(); if r.is_some() { parsed .to_datetime() .map(|x| x.naive_utc()) .map(|x| tz.from_utc_datetime(&x)) .map(|x| x.timestamp_nanos()) .ok() } else { None } } /// Parses `value` to `Option<i64>` consistent with the Arrow's definition of timestamp without timezone. #[inline] pub fn utf8_to_naive_timestamp_ns_scalar(value: &str, fmt: &str) -> Option<i64> { let fmt = StrftimeItems::new(fmt); let mut parsed = Parsed::new(); parse(&mut parsed, value, fmt.clone()).ok(); parsed .to_naive_datetime_with_offset(0) .map(|x| x.timestamp_nanos()) .ok() } fn utf8_to_timestamp_ns_impl<O: Offset, T: chrono::TimeZone>( array: &Utf8Array<O>, fmt: &str, timezone: String, tz: T, ) -> PrimitiveArray<i64> { let iter = array .iter() .map(|x| x.and_then(|x| utf8_to_timestamp_ns_scalar(x, fmt, &tz))); PrimitiveArray::from_trusted_len_iter(iter) .to(DataType::Timestamp(TimeUnit::Nanosecond, Some(timezone))) } /// Parses `value` to a [`chrono_tz::Tz`] with the Arrow's definition of timestamp with a timezone. #[cfg(feature = "chrono-tz")] #[cfg_attr(docsrs, doc(cfg(feature = "chrono-tz")))] pub fn parse_offset_tz(timezone: &str) -> Result<chrono_tz::Tz> { timezone.parse::<chrono_tz::Tz>().map_err(|_| { ArrowError::InvalidArgumentError(format!("timezone \"{}\" cannot be parsed", timezone)) }) } #[cfg(feature = "chrono-tz")] #[cfg_attr(docsrs, doc(cfg(feature = "chrono-tz")))] fn chrono_tz_utf_to_timestamp_ns<O: Offset>( array: &Utf8Array<O>, fmt: &str, timezone: String, ) -> Result<PrimitiveArray<i64>> { let tz = parse_offset_tz(&timezone)?; Ok(utf8_to_timestamp_ns_impl(array, fmt, timezone, tz)) } #[cfg(not(feature = "chrono-tz"))] fn chrono_tz_utf_to_timestamp_ns<O: Offset>( _: &Utf8Array<O>, _: &str, timezone: String, ) -> Result<PrimitiveArray<i64>> { Err(ArrowError::InvalidArgumentError(format!( "timezone \"{}\" cannot be parsed (feature chrono-tz is not active)", timezone ))) } /// Parses a [`Utf8Array`] to a timeozone-aware timestamp, i.e. [`PrimitiveArray<i64>`] with type `Timestamp(Nanosecond, Some(timezone))`. /// # Implementation /// * parsed values with timezone other than `timezone` are converted to `timezone`. /// * parsed values without timezone are null. Use [`utf8_to_naive_timestamp_ns`] to parse naive timezones. /// * Null elements remain null; non-parsable elements are null. /// The feature `"chrono-tz"` enables IANA and zoneinfo formats for `timezone`. /// # Error /// This function errors iff `timezone` is not parsable to an offset. pub fn utf8_to_timestamp_ns<O: Offset>( array: &Utf8Array<O>, fmt: &str, timezone: String, ) -> Result<PrimitiveArray<i64>> { let tz = parse_offset(timezone.as_str()); if let Ok(tz) = tz { Ok(utf8_to_timestamp_ns_impl(array, fmt, timezone, tz)) } else { chrono_tz_utf_to_timestamp_ns(array, fmt, timezone) } } /// Parses a [`Utf8Array`] to naive timestamp, i.e. /// [`PrimitiveArray<i64>`] with type `Timestamp(Nanosecond, None)`. /// Timezones are ignored. /// Null elements remain null; non-parsable elements are set to null. pub fn utf8_to_naive_timestamp_ns<O: Offset>( array: &Utf8Array<O>, fmt: &str, ) -> PrimitiveArray<i64> { let iter = array .iter() .map(|x| x.and_then(|x| utf8_to_naive_timestamp_ns_scalar(x, fmt))); PrimitiveArray::from_trusted_len_iter(iter).to(DataType::Timestamp(TimeUnit::Nanosecond, None)) } fn add_month(year: i32, month: u32, months: i32) -> chrono::NaiveDate { let new_year = (year * 12 + (month - 1) as i32 + months) / 12; let new_month = (year * 12 + (month - 1) as i32 + months) % 12 + 1; chrono::NaiveDate::from_ymd(new_year, new_month as u32, 1) } fn get_days_between_months(year: i32, month: u32, months: i32) -> i64 { add_month(year, month, months) .signed_duration_since(chrono::NaiveDate::from_ymd(year, month, 1)) .num_days() } /// Adds an `interval` to a `timestamp` in `time_unit` units without timezone. #[inline] pub fn add_naive_interval(timestamp: i64, time_unit: TimeUnit, interval: months_days_ns) -> i64 { // convert seconds to a DateTime of a given offset. let datetime = match time_unit { TimeUnit::Second => timestamp_s_to_datetime(timestamp), TimeUnit::Millisecond => timestamp_ms_to_datetime(timestamp), TimeUnit::Microsecond => timestamp_us_to_datetime(timestamp), TimeUnit::Nanosecond => timestamp_ns_to_datetime(timestamp), }; // compute the number of days in the interval, which depends on the particular year and month (leap days) let delta_days = get_days_between_months(datetime.year(), datetime.month(), interval.months()) + interval.days() as i64; // add; no leap hours are considered let new_datetime_tz = datetime + chrono::Duration::nanoseconds(delta_days * 24 * 60 * 60 * 1_000_000_000 + interval.ns()); // convert back to the target unit match time_unit { TimeUnit::Second => new_datetime_tz.timestamp_millis() / 1000, TimeUnit::Millisecond => new_datetime_tz.timestamp_millis(), TimeUnit::Microsecond => new_datetime_tz.timestamp_nanos() / 1000, TimeUnit::Nanosecond => new_datetime_tz.timestamp_nanos(), } } /// Adds an `interval` to a `timestamp` in `time_unit` units and timezone `timezone`. #[inline] pub fn add_interval<T: chrono::TimeZone>( timestamp: i64, time_unit: TimeUnit, interval: months_days_ns, timezone: &T, ) -> i64 { // convert seconds to a DateTime of a given offset. let datetime_tz = timestamp_to_datetime(timestamp, time_unit, timezone); // compute the number of days in the interval, which depends on the particular year and month (leap days) let delta_days = get_days_between_months(datetime_tz.year(), datetime_tz.month(), interval.months()) + interval.days() as i64; // add; tz will take care of leap hours let new_datetime_tz = datetime_tz + chrono::Duration::nanoseconds(delta_days * 24 * 60 * 60 * 1_000_000_000 + interval.ns()); // convert back to the target unit match time_unit { TimeUnit::Second => new_datetime_tz.timestamp_millis() / 1000, TimeUnit::Millisecond => new_datetime_tz.timestamp_millis(), TimeUnit::Microsecond => new_datetime_tz.timestamp_nanos() / 1000, TimeUnit::Nanosecond => new_datetime_tz.timestamp_nanos(), } }
36.38
138
0.668774
bbed177f385bd582cbb15721ed556ffd76bab97a
2,278
// Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 use crypto::{ ed25519::{Ed25519PrivateKey, Ed25519PublicKey}, test_utils::KeyPair, traits::Uniform, }; use futures::{future::Future, stream::Stream}; use libra_types::account_address::AccountAddress; use rand::prelude::*; use sgnetwork::NetworkMessage; pub use sgnetwork::{build_network_service, get_unix_ts, NetworkComponent, NetworkService}; use std::time::{Duration, Instant}; use tokio::{runtime::Runtime, timer::Interval}; fn main() { env_logger::init(); ::logger::init_for_e2e_testing(); let mut peer_id = "".to_string(); let (seeds, port) = match std::env::args().nth(1) { Some(seed) => { peer_id = seed.clone(); (vec![format!("/ip4/127.0.0.1/tcp/7000/p2p/{}", seed)], 7001) } None => (vec![], 7000), }; let config = sg_config::config::NetworkConfig { listen: format!("/ip4/127.0.0.1/tcp/{}", port), seeds, }; let key_pair = { let mut rng: StdRng = SeedableRng::seed_from_u64(get_unix_ts() as u64); KeyPair::<Ed25519PrivateKey, Ed25519PublicKey>::generate_for_testing(&mut rng) }; let (net_srv, tx, rx, _close_tx) = build_network_service(&config, key_pair); println!( "the network identify is {:?}", hex::encode(net_srv.identify()) ); let rt = Runtime::new().unwrap(); let executor = rt.executor(); if peer_id.len() == 0 { let receive_fut = rx.for_each(|_| Ok(())); executor.spawn(receive_fut); } else { let sender_fut = Interval::new(Instant::now(), Duration::from_millis(10)) .take(1000) .map_err(|_e| ()) .for_each(move |_| { let random_bytes: Vec<u8> = (0..10240).map(|_| rand::random::<u8>()).collect(); let peer_id_hex = format!("0x{}", &peer_id); let peer_id = AccountAddress::from_hex_literal(&peer_id_hex).unwrap(); let _ = tx.unbounded_send(NetworkMessage { peer_id, data: random_bytes, }); Ok(()) }); executor.spawn(sender_fut); } rt.shutdown_on_idle().wait().unwrap(); }
34
95
0.579456
5012503f4bab2ce4244e38a0516828c6c942a25f
1,317
// This file is part of file-descriptors. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/file-descriptors/master/COPYRIGHT. No part of file-descriptors, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file. // Copyright © 2019 The developers of file-descriptors. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/file-descriptors/master/COPYRIGHT. /// An error that can in a terminal. #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub enum TerminalSettingsError { /// Not a terminal. NotATerminal(Errno), /// Could not set terminal attributes. CouldNotSetTerminalAttributes(Errno), } impl Display for TerminalSettingsError { #[inline(always)] fn fmt(&self, f: &mut Formatter) -> fmt::Result { <TerminalSettingsError as Debug>::fmt(self, f) } } impl error::Error for TerminalSettingsError { #[inline(always)] fn source(&self) -> Option<&(dyn error::Error + 'static)> { use self::TerminalSettingsError::*; match self { NotATerminal(_) => None, CouldNotSetTerminalAttributes(_) => None, } } }
32.925
403
0.747153
48395dc4ea4643808e3b4e398c0a3e51aeddb299
274,573
//! Lowering rules for X64. use crate::data_value::DataValue; use crate::ir::{ condcodes::FloatCC, condcodes::IntCC, types, AbiParam, ArgumentPurpose, ExternalName, Inst as IRInst, InstructionData, LibCall, Opcode, Signature, Type, }; use crate::isa::x64::abi::*; use crate::isa::x64::inst::args::*; use crate::isa::x64::inst::*; use crate::isa::{x64::settings as x64_settings, x64::X64Backend, CallConv}; use crate::machinst::lower::*; use crate::machinst::*; use crate::result::CodegenResult; use crate::settings::{Flags, TlsModel}; use alloc::boxed::Box; use alloc::vec::Vec; use cranelift_codegen_shared::condcodes::CondCode; use log::trace; use regalloc::{Reg, RegClass, Writable}; use smallvec::{smallvec, SmallVec}; use std::convert::TryFrom; use target_lexicon::Triple; //============================================================================= // Helpers for instruction lowering. fn is_int_or_ref_ty(ty: Type) -> bool { match ty { types::I8 | types::I16 | types::I32 | types::I64 | types::R64 => true, types::B1 | types::B8 | types::B16 | types::B32 | types::B64 => true, types::R32 => panic!("shouldn't have 32-bits refs on x64"), _ => false, } } fn is_bool_ty(ty: Type) -> bool { match ty { types::B1 | types::B8 | types::B16 | types::B32 | types::B64 => true, types::R32 => panic!("shouldn't have 32-bits refs on x64"), _ => false, } } /// This is target-word-size dependent. And it excludes booleans and reftypes. fn is_valid_atomic_transaction_ty(ty: Type) -> bool { match ty { types::I8 | types::I16 | types::I32 | types::I64 => true, _ => false, } } /// Returns whether the given specified `input` is a result produced by an instruction with Opcode /// `op`. // TODO investigate failures with checking against the result index. fn matches_input<C: LowerCtx<I = Inst>>( ctx: &mut C, input: InsnInput, op: Opcode, ) -> Option<IRInst> { let inputs = ctx.get_input_as_source_or_const(input.insn, input.input); inputs.inst.and_then(|(src_inst, _)| { let data = ctx.data(src_inst); if data.opcode() == op { return Some(src_inst); } None }) } /// Returns whether the given specified `input` is a result produced by an instruction with any of /// the opcodes specified in `ops`. fn matches_input_any<C: LowerCtx<I = Inst>>( ctx: &mut C, input: InsnInput, ops: &[Opcode], ) -> Option<IRInst> { let inputs = ctx.get_input_as_source_or_const(input.insn, input.input); inputs.inst.and_then(|(src_inst, _)| { let data = ctx.data(src_inst); for &op in ops { if data.opcode() == op { return Some(src_inst); } } None }) } /// Emits instruction(s) to generate the given 64-bit constant value into a newly-allocated /// temporary register, returning that register. fn generate_constant<C: LowerCtx<I = Inst>>(ctx: &mut C, ty: Type, c: u64) -> ValueRegs<Reg> { let from_bits = ty_bits(ty); let masked = if from_bits < 64 { c & ((1u64 << from_bits) - 1) } else { c }; let cst_copy = ctx.alloc_tmp(ty); for inst in Inst::gen_constant(cst_copy, masked as u128, ty, |ty| { ctx.alloc_tmp(ty).only_reg().unwrap() }) .into_iter() { ctx.emit(inst); } non_writable_value_regs(cst_copy) } /// Put the given input into possibly multiple registers, and mark it as used (side-effect). fn put_input_in_regs<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput) -> ValueRegs<Reg> { let ty = ctx.input_ty(spec.insn, spec.input); let input = ctx.get_input_as_source_or_const(spec.insn, spec.input); if let Some(c) = input.constant { // Generate constants fresh at each use to minimize long-range register pressure. generate_constant(ctx, ty, c) } else { ctx.put_input_in_regs(spec.insn, spec.input) } } /// Put the given input into a register, and mark it as used (side-effect). fn put_input_in_reg<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput) -> Reg { put_input_in_regs(ctx, spec) .only_reg() .expect("Multi-register value not expected") } /// Determines whether a load operation (indicated by `src_insn`) can be merged /// into the current lowering point. If so, returns the address-base source (as /// an `InsnInput`) and an offset from that address from which to perform the /// load. fn is_mergeable_load<C: LowerCtx<I = Inst>>( ctx: &mut C, src_insn: IRInst, ) -> Option<(InsnInput, i32)> { let insn_data = ctx.data(src_insn); let inputs = ctx.num_inputs(src_insn); if inputs != 1 { return None; } let load_ty = ctx.output_ty(src_insn, 0); if ty_bits(load_ty) < 32 { // Narrower values are handled by ALU insts that are at least 32 bits // wide, which is normally OK as we ignore upper buts; but, if we // generate, e.g., a direct-from-memory 32-bit add for a byte value and // the byte is the last byte in a page, the extra data that we load is // incorrectly accessed. So we only allow loads to merge for // 32-bit-and-above widths. return None; } // Just testing the opcode is enough, because the width will always match if // the type does (and the type should match if the CLIF is properly // constructed). if insn_data.opcode() == Opcode::Load { let offset = insn_data .load_store_offset() .expect("load should have offset"); Some(( InsnInput { insn: src_insn, input: 0, }, offset, )) } else { None } } /// Put the given input into a register or a memory operand. /// Effectful: may mark the given input as used, when returning the register form. fn input_to_reg_mem<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput) -> RegMem { let inputs = ctx.get_input_as_source_or_const(spec.insn, spec.input); if let Some(c) = inputs.constant { // Generate constants fresh at each use to minimize long-range register pressure. let ty = ctx.input_ty(spec.insn, spec.input); return RegMem::reg(generate_constant(ctx, ty, c).only_reg().unwrap()); } if let Some((src_insn, 0)) = inputs.inst { if let Some((addr_input, offset)) = is_mergeable_load(ctx, src_insn) { ctx.sink_inst(src_insn); let amode = lower_to_amode(ctx, addr_input, offset); return RegMem::mem(amode); } } RegMem::reg( ctx.put_input_in_regs(spec.insn, spec.input) .only_reg() .unwrap(), ) } /// An extension specification for `extend_input_to_reg`. #[derive(Clone, Copy)] enum ExtSpec { ZeroExtendTo32, ZeroExtendTo64, SignExtendTo32, #[allow(dead_code)] // not used just yet but may be used in the future! SignExtendTo64, } /// Put the given input into a register, marking it as used, and do a zero- or signed- extension if /// required. (This obviously causes side-effects.) fn extend_input_to_reg<C: LowerCtx<I = Inst>>( ctx: &mut C, spec: InsnInput, ext_spec: ExtSpec, ) -> Reg { let requested_size = match ext_spec { ExtSpec::ZeroExtendTo32 | ExtSpec::SignExtendTo32 => 32, ExtSpec::ZeroExtendTo64 | ExtSpec::SignExtendTo64 => 64, }; let input_size = ctx.input_ty(spec.insn, spec.input).bits(); let requested_ty = if requested_size == 32 { types::I32 } else { types::I64 }; let ext_mode = match (input_size, requested_size) { (a, b) if a == b => return put_input_in_reg(ctx, spec), (1, 8) => return put_input_in_reg(ctx, spec), (a, b) => ExtMode::new(a, b).expect(&format!("invalid extension: {} -> {}", a, b)), }; let src = input_to_reg_mem(ctx, spec); let dst = ctx.alloc_tmp(requested_ty).only_reg().unwrap(); match ext_spec { ExtSpec::ZeroExtendTo32 | ExtSpec::ZeroExtendTo64 => { ctx.emit(Inst::movzx_rm_r(ext_mode, src, dst)) } ExtSpec::SignExtendTo32 | ExtSpec::SignExtendTo64 => { ctx.emit(Inst::movsx_rm_r(ext_mode, src, dst)) } } dst.to_reg() } /// Returns whether the given input is an immediate that can be properly sign-extended, without any /// possible side-effect. fn non_reg_input_to_sext_imm(input: NonRegInput, input_ty: Type) -> Option<u32> { input.constant.and_then(|x| { // For i64 instructions (prefixed with REX.W), require that the immediate will sign-extend // to 64 bits. For other sizes, it doesn't matter and we can just use the plain // constant. if input_ty.bytes() != 8 || low32_will_sign_extend_to_64(x) { Some(x as u32) } else { None } }) } fn input_to_imm<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput) -> Option<u64> { ctx.get_input_as_source_or_const(spec.insn, spec.input) .constant } /// Put the given input into an immediate, a register or a memory operand. /// Effectful: may mark the given input as used, when returning the register form. fn input_to_reg_mem_imm<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput) -> RegMemImm { let input = ctx.get_input_as_source_or_const(spec.insn, spec.input); let input_ty = ctx.input_ty(spec.insn, spec.input); match non_reg_input_to_sext_imm(input, input_ty) { Some(x) => RegMemImm::imm(x), None => match input_to_reg_mem(ctx, spec) { RegMem::Reg { reg } => RegMemImm::reg(reg), RegMem::Mem { addr } => RegMemImm::mem(addr), }, } } /// Emit an instruction to insert a value `src` into a lane of `dst`. fn emit_insert_lane<C: LowerCtx<I = Inst>>( ctx: &mut C, src: RegMem, dst: Writable<Reg>, lane: u8, ty: Type, ) { if !ty.is_float() { let (sse_op, size) = match ty.lane_bits() { 8 => (SseOpcode::Pinsrb, OperandSize::Size32), 16 => (SseOpcode::Pinsrw, OperandSize::Size32), 32 => (SseOpcode::Pinsrd, OperandSize::Size32), 64 => (SseOpcode::Pinsrd, OperandSize::Size64), _ => panic!("Unable to insertlane for lane size: {}", ty.lane_bits()), }; ctx.emit(Inst::xmm_rm_r_imm(sse_op, src, dst, lane, size)); } else if ty == types::F32 { let sse_op = SseOpcode::Insertps; // Insert 32-bits from replacement (at index 00, bits 7:8) to vector (lane // shifted into bits 5:6). let lane = 0b00_00_00_00 | lane << 4; ctx.emit(Inst::xmm_rm_r_imm( sse_op, src, dst, lane, OperandSize::Size32, )); } else if ty == types::F64 { let sse_op = match lane { // Move the lowest quadword in replacement to vector without changing // the upper bits. 0 => SseOpcode::Movsd, // Move the low 64 bits of replacement vector to the high 64 bits of the // vector. 1 => SseOpcode::Movlhps, _ => unreachable!(), }; // Here we use the `xmm_rm_r` encoding because it correctly tells the register // allocator how we are using `dst`: we are using `dst` as a `mod` whereas other // encoding formats like `xmm_unary_rm_r` treat it as a `def`. ctx.emit(Inst::xmm_rm_r(sse_op, src, dst)); } else { panic!("unable to emit insertlane for type: {}", ty) } } /// Emit an instruction to extract a lane of `src` into `dst`. fn emit_extract_lane<C: LowerCtx<I = Inst>>( ctx: &mut C, src: Reg, dst: Writable<Reg>, lane: u8, ty: Type, ) { if !ty.is_float() { let (sse_op, size) = match ty.lane_bits() { 8 => (SseOpcode::Pextrb, OperandSize::Size32), 16 => (SseOpcode::Pextrw, OperandSize::Size32), 32 => (SseOpcode::Pextrd, OperandSize::Size32), 64 => (SseOpcode::Pextrd, OperandSize::Size64), _ => panic!("Unable to extractlane for lane size: {}", ty.lane_bits()), }; let src = RegMem::reg(src); ctx.emit(Inst::xmm_rm_r_imm(sse_op, src, dst, lane, size)); } else if ty == types::F32 || ty == types::F64 { if lane == 0 { // Remove the extractlane instruction, leaving the float where it is. The upper // bits will remain unchanged; for correctness, this relies on Cranelift type // checking to avoid using those bits. ctx.emit(Inst::gen_move(dst, src, ty)); } else { // Otherwise, shuffle the bits in `lane` to the lowest lane. let sse_op = SseOpcode::Pshufd; let mask = match ty { // Move the value at `lane` to lane 0, copying existing value at lane 0 to // other lanes. Again, this relies on Cranelift type checking to avoid // using those bits. types::F32 => { assert!(lane > 0 && lane < 4); 0b00_00_00_00 | lane } // Move the value at `lane` 1 (we know it must be 1 because of the `if` // statement above) to lane 0 and leave lane 1 unchanged. The Cranelift type // checking assumption also applies here. types::F64 => { assert!(lane == 1); 0b11_10_11_10 } _ => unreachable!(), }; let src = RegMem::reg(src); ctx.emit(Inst::xmm_rm_r_imm( sse_op, src, dst, mask, OperandSize::Size32, )); } } else { panic!("unable to emit extractlane for type: {}", ty) } } /// Emits an int comparison instruction. /// /// Note: make sure that there are no instructions modifying the flags between a call to this /// function and the use of the flags! /// /// Takes the condition code that will be tested, and returns /// the condition code that should be used. This allows us to /// synthesize comparisons out of multiple instructions for /// special cases (e.g., 128-bit integers). fn emit_cmp<C: LowerCtx<I = Inst>>(ctx: &mut C, insn: IRInst, cc: IntCC) -> IntCC { let ty = ctx.input_ty(insn, 0); let inputs = [InsnInput { insn, input: 0 }, InsnInput { insn, input: 1 }]; if ty == types::I128 { // We need to compare both halves and combine the results appropriately. let cmp1 = ctx.alloc_tmp(types::I64).only_reg().unwrap(); let cmp2 = ctx.alloc_tmp(types::I64).only_reg().unwrap(); let lhs = put_input_in_regs(ctx, inputs[0]); let lhs_lo = lhs.regs()[0]; let lhs_hi = lhs.regs()[1]; let rhs = put_input_in_regs(ctx, inputs[1]); let rhs_lo = RegMemImm::reg(rhs.regs()[0]); let rhs_hi = RegMemImm::reg(rhs.regs()[1]); match cc { IntCC::Equal => { ctx.emit(Inst::cmp_rmi_r(OperandSize::Size64, rhs_hi, lhs_hi)); ctx.emit(Inst::setcc(CC::Z, cmp1)); ctx.emit(Inst::cmp_rmi_r(OperandSize::Size64, rhs_lo, lhs_lo)); ctx.emit(Inst::setcc(CC::Z, cmp2)); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::And, RegMemImm::reg(cmp1.to_reg()), cmp2, )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::And, RegMemImm::imm(1), cmp2, )); IntCC::NotEqual } IntCC::NotEqual => { ctx.emit(Inst::cmp_rmi_r(OperandSize::Size64, rhs_hi, lhs_hi)); ctx.emit(Inst::setcc(CC::NZ, cmp1)); ctx.emit(Inst::cmp_rmi_r(OperandSize::Size64, rhs_lo, lhs_lo)); ctx.emit(Inst::setcc(CC::NZ, cmp2)); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Or, RegMemImm::reg(cmp1.to_reg()), cmp2, )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::And, RegMemImm::imm(1), cmp2, )); IntCC::NotEqual } IntCC::SignedLessThan | IntCC::SignedLessThanOrEqual | IntCC::SignedGreaterThan | IntCC::SignedGreaterThanOrEqual | IntCC::UnsignedLessThan | IntCC::UnsignedLessThanOrEqual | IntCC::UnsignedGreaterThan | IntCC::UnsignedGreaterThanOrEqual => { // Result = (lhs_hi <> rhs_hi) || // (lhs_hi == rhs_hi && lhs_lo <> rhs_lo) let cmp3 = ctx.alloc_tmp(types::I64).only_reg().unwrap(); ctx.emit(Inst::cmp_rmi_r(OperandSize::Size64, rhs_hi, lhs_hi)); ctx.emit(Inst::setcc(CC::from_intcc(cc.without_equal()), cmp1)); ctx.emit(Inst::setcc(CC::Z, cmp2)); ctx.emit(Inst::cmp_rmi_r(OperandSize::Size64, rhs_lo, lhs_lo)); ctx.emit(Inst::setcc(CC::from_intcc(cc.unsigned()), cmp3)); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::And, RegMemImm::reg(cmp2.to_reg()), cmp3, )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Or, RegMemImm::reg(cmp1.to_reg()), cmp3, )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::And, RegMemImm::imm(1), cmp3, )); IntCC::NotEqual } _ => panic!("Unhandled IntCC in I128 comparison: {:?}", cc), } } else { // TODO Try to commute the operands (and invert the condition) if one is an immediate. let lhs = put_input_in_reg(ctx, inputs[0]); // We force the RHS into a register, and disallow load-op fusion, because we // do not have a transitive guarantee that this cmp-site will be the sole // user of the value. Consider: the icmp might be the only user of a load, // but there may be multiple users of the icmp (e.g. select or bint // instructions) that each invoke `emit_cmp()`. If we were to allow a load // to sink to the *latest* one, but other sites did not permit sinking, then // we would be missing the load for other cmp-sites. let rhs = put_input_in_reg(ctx, inputs[1]); // Cranelift's icmp semantics want to compare lhs - rhs, while Intel gives // us dst - src at the machine instruction level, so invert operands. ctx.emit(Inst::cmp_rmi_r( OperandSize::from_ty(ty), RegMemImm::reg(rhs), lhs, )); cc } } /// A specification for a fcmp emission. enum FcmpSpec { /// Normal flow. Normal, /// Avoid emitting Equal at all costs by inverting it to NotEqual, and indicate when that /// happens with `InvertedEqualOrConditions`. /// /// This is useful in contexts where it is hard/inefficient to produce a single instruction (or /// sequence of instructions) that check for an "AND" combination of condition codes; see for /// instance lowering of Select. InvertEqual, } /// This explains how to interpret the results of an fcmp instruction. enum FcmpCondResult { /// The given condition code must be set. Condition(CC), /// Both condition codes must be set. AndConditions(CC, CC), /// Either of the conditions codes must be set. OrConditions(CC, CC), /// The associated spec was set to `FcmpSpec::InvertEqual` and Equal has been inverted. Either /// of the condition codes must be set, and the user must invert meaning of analyzing the /// condition code results. When the spec is set to `FcmpSpec::Normal`, then this case can't be /// reached. InvertedEqualOrConditions(CC, CC), } /// Emits a float comparison instruction. /// /// Note: make sure that there are no instructions modifying the flags between a call to this /// function and the use of the flags! fn emit_fcmp<C: LowerCtx<I = Inst>>( ctx: &mut C, insn: IRInst, mut cond_code: FloatCC, spec: FcmpSpec, ) -> FcmpCondResult { let (flip_operands, inverted_equal) = match cond_code { FloatCC::LessThan | FloatCC::LessThanOrEqual | FloatCC::UnorderedOrGreaterThan | FloatCC::UnorderedOrGreaterThanOrEqual => { cond_code = cond_code.reverse(); (true, false) } FloatCC::Equal => { let inverted_equal = match spec { FcmpSpec::Normal => false, FcmpSpec::InvertEqual => { cond_code = FloatCC::NotEqual; // same as .inverse() true } }; (false, inverted_equal) } _ => (false, false), }; // The only valid CC constructed with `from_floatcc` can be put in the flag // register with a direct float comparison; do this here. let op = match ctx.input_ty(insn, 0) { types::F32 => SseOpcode::Ucomiss, types::F64 => SseOpcode::Ucomisd, _ => panic!("Bad input type to Fcmp"), }; let inputs = &[InsnInput { insn, input: 0 }, InsnInput { insn, input: 1 }]; let (lhs_input, rhs_input) = if flip_operands { (inputs[1], inputs[0]) } else { (inputs[0], inputs[1]) }; let lhs = put_input_in_reg(ctx, lhs_input); // See above in `emit_cmp()`. We must only use the reg/reg form of the // comparison in order to avoid issues with merged loads. let rhs = put_input_in_reg(ctx, rhs_input); ctx.emit(Inst::xmm_cmp_rm_r(op, RegMem::reg(rhs), lhs)); let cond_result = match cond_code { FloatCC::Equal => FcmpCondResult::AndConditions(CC::NP, CC::Z), FloatCC::NotEqual if inverted_equal => { FcmpCondResult::InvertedEqualOrConditions(CC::P, CC::NZ) } FloatCC::NotEqual if !inverted_equal => FcmpCondResult::OrConditions(CC::P, CC::NZ), _ => FcmpCondResult::Condition(CC::from_floatcc(cond_code)), }; cond_result } fn emit_bitrev<C: LowerCtx<I = Inst>>(ctx: &mut C, src: Reg, dst: Writable<Reg>, ty: Type) { let bits = ty.bits(); let const_mask = if bits == 64 { 0xffff_ffff_ffff_ffff } else { (1u64 << bits) - 1 }; let tmp0 = ctx.alloc_tmp(types::I64).only_reg().unwrap(); let tmp1 = ctx.alloc_tmp(types::I64).only_reg().unwrap(); let tmp2 = ctx.alloc_tmp(types::I64).only_reg().unwrap(); ctx.emit(Inst::gen_move(tmp0, src, types::I64)); // Swap 1-bit units. // tmp1 = src ctx.emit(Inst::gen_move(tmp1, tmp0.to_reg(), types::I64)); // tmp2 = 0b0101.. ctx.emit(Inst::imm( OperandSize::Size64, 0x5555_5555_5555_5555 & const_mask, tmp2, )); // tmp1 = src >> 1 ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftRightLogical, Some(1), tmp1, )); // tmp1 = (src >> 1) & 0b0101.. ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::And, RegMemImm::reg(tmp2.to_reg()), tmp1, )); // tmp2 = src & 0b0101.. ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::And, RegMemImm::reg(tmp0.to_reg()), tmp2, )); // tmp2 = (src & 0b0101..) << 1 ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftLeft, Some(1), tmp2, )); // tmp0 = (src >> 1) & 0b0101.. | (src & 0b0101..) << 1 ctx.emit(Inst::gen_move(tmp0, tmp2.to_reg(), types::I64)); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Or, RegMemImm::reg(tmp1.to_reg()), tmp0, )); // Swap 2-bit units. ctx.emit(Inst::gen_move(tmp1, tmp0.to_reg(), types::I64)); ctx.emit(Inst::imm( OperandSize::Size64, 0x3333_3333_3333_3333 & const_mask, tmp2, )); ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftRightLogical, Some(2), tmp1, )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::And, RegMemImm::reg(tmp2.to_reg()), tmp1, )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::And, RegMemImm::reg(tmp0.to_reg()), tmp2, )); ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftLeft, Some(2), tmp2, )); ctx.emit(Inst::gen_move(tmp0, tmp2.to_reg(), types::I64)); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Or, RegMemImm::reg(tmp1.to_reg()), tmp0, )); // Swap 4-bit units. ctx.emit(Inst::gen_move(tmp1, tmp0.to_reg(), types::I64)); ctx.emit(Inst::imm( OperandSize::Size64, 0x0f0f_0f0f_0f0f_0f0f & const_mask, tmp2, )); ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftRightLogical, Some(4), tmp1, )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::And, RegMemImm::reg(tmp2.to_reg()), tmp1, )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::And, RegMemImm::reg(tmp0.to_reg()), tmp2, )); ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftLeft, Some(4), tmp2, )); ctx.emit(Inst::gen_move(tmp0, tmp2.to_reg(), types::I64)); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Or, RegMemImm::reg(tmp1.to_reg()), tmp0, )); if bits > 8 { // Swap 8-bit units. ctx.emit(Inst::gen_move(tmp1, tmp0.to_reg(), types::I64)); ctx.emit(Inst::imm( OperandSize::Size64, 0x00ff_00ff_00ff_00ff & const_mask, tmp2, )); ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftRightLogical, Some(8), tmp1, )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::And, RegMemImm::reg(tmp2.to_reg()), tmp1, )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::And, RegMemImm::reg(tmp0.to_reg()), tmp2, )); ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftLeft, Some(8), tmp2, )); ctx.emit(Inst::gen_move(tmp0, tmp2.to_reg(), types::I64)); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Or, RegMemImm::reg(tmp1.to_reg()), tmp0, )); } if bits > 16 { // Swap 16-bit units. ctx.emit(Inst::gen_move(tmp1, tmp0.to_reg(), types::I64)); ctx.emit(Inst::imm( OperandSize::Size64, 0x0000_ffff_0000_ffff & const_mask, tmp2, )); ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftRightLogical, Some(16), tmp1, )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::And, RegMemImm::reg(tmp2.to_reg()), tmp1, )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::And, RegMemImm::reg(tmp0.to_reg()), tmp2, )); ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftLeft, Some(16), tmp2, )); ctx.emit(Inst::gen_move(tmp0, tmp2.to_reg(), types::I64)); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Or, RegMemImm::reg(tmp1.to_reg()), tmp0, )); } if bits > 32 { // Swap 32-bit units. ctx.emit(Inst::gen_move(tmp1, tmp0.to_reg(), types::I64)); ctx.emit(Inst::imm( OperandSize::Size64, 0x0000_0000_ffff_ffff & const_mask, tmp2, )); ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftRightLogical, Some(32), tmp1, )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::And, RegMemImm::reg(tmp2.to_reg()), tmp1, )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::And, RegMemImm::reg(tmp0.to_reg()), tmp2, )); ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftLeft, Some(32), tmp2, )); ctx.emit(Inst::gen_move(tmp0, tmp2.to_reg(), types::I64)); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Or, RegMemImm::reg(tmp1.to_reg()), tmp0, )); } ctx.emit(Inst::gen_move(dst, tmp0.to_reg(), types::I64)); } fn emit_shl_i128<C: LowerCtx<I = Inst>>( ctx: &mut C, src: ValueRegs<Reg>, dst: ValueRegs<Writable<Reg>>, amt_src: Reg, ) { let src_lo = src.regs()[0]; let src_hi = src.regs()[1]; let dst_lo = dst.regs()[0]; let dst_hi = dst.regs()[1]; // mov tmp1, src_lo // shl tmp1, amt_src // mov tmp2, src_hi // shl tmp2, amt_src // mov amt, 64 // sub amt, amt_src // mov tmp3, src_lo // shr tmp3, amt // xor dst_lo, dst_lo // test amt_src, 127 // cmovz tmp3, dst_lo // or tmp3, tmp2 // mov amt, amt_src // and amt, 64 // cmovz dst_hi, tmp3 // cmovz dst_lo, tmp1 // cmovnz dst_hi, tmp1 let tmp1 = ctx.alloc_tmp(types::I64).only_reg().unwrap(); let tmp2 = ctx.alloc_tmp(types::I64).only_reg().unwrap(); let tmp3 = ctx.alloc_tmp(types::I64).only_reg().unwrap(); let amt = ctx.alloc_tmp(types::I64).only_reg().unwrap(); ctx.emit(Inst::gen_move(tmp1, src_lo, types::I64)); ctx.emit(Inst::gen_move( Writable::from_reg(regs::rcx()), amt_src, types::I64, )); ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftLeft, None, tmp1, )); ctx.emit(Inst::gen_move(tmp2, src_hi, types::I64)); ctx.emit(Inst::gen_move( Writable::from_reg(regs::rcx()), amt_src, types::I64, )); ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftLeft, None, tmp2, )); ctx.emit(Inst::imm(OperandSize::Size64, 64, amt)); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Sub, RegMemImm::reg(amt_src), amt, )); ctx.emit(Inst::gen_move(tmp3, src_lo, types::I64)); ctx.emit(Inst::gen_move( Writable::from_reg(regs::rcx()), amt.to_reg(), types::I64, )); ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftRightLogical, None, tmp3, )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Xor, RegMemImm::reg(dst_lo.to_reg()), dst_lo, )); ctx.emit(Inst::test_rmi_r( OperandSize::Size64, RegMemImm::imm(127), amt_src, )); ctx.emit(Inst::cmove( OperandSize::Size64, CC::Z, RegMem::reg(dst_lo.to_reg()), tmp3, )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Or, RegMemImm::reg(tmp2.to_reg()), tmp3, )); // This isn't semantically necessary, but it keeps the // register allocator happy, because it cannot otherwise // infer that cmovz + cmovnz always defines dst_hi. ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Xor, RegMemImm::reg(dst_hi.to_reg()), dst_hi, )); ctx.emit(Inst::gen_move(amt, amt_src, types::I64)); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::And, RegMemImm::imm(64), amt, )); ctx.emit(Inst::cmove( OperandSize::Size64, CC::Z, RegMem::reg(tmp3.to_reg()), dst_hi, )); ctx.emit(Inst::cmove( OperandSize::Size64, CC::Z, RegMem::reg(tmp1.to_reg()), dst_lo, )); ctx.emit(Inst::cmove( OperandSize::Size64, CC::NZ, RegMem::reg(tmp1.to_reg()), dst_hi, )); } fn emit_shr_i128<C: LowerCtx<I = Inst>>( ctx: &mut C, src: ValueRegs<Reg>, dst: ValueRegs<Writable<Reg>>, amt_src: Reg, is_signed: bool, ) { let src_lo = src.regs()[0]; let src_hi = src.regs()[1]; let dst_lo = dst.regs()[0]; let dst_hi = dst.regs()[1]; // mov tmp1, src_hi // {u,s}shr tmp1, amt_src // mov tmp2, src_lo // ushr tmp2, amt_src // mov amt, 64 // sub amt, amt_src // mov tmp3, src_hi // shl tmp3, amt // xor dst_lo, dst_lo // test amt_src, 127 // cmovz tmp3, dst_lo // or tmp3, tmp2 // if is_signed: // mov dst_hi, src_hi // sshr dst_hi, 63 // get the sign bit // else: // xor dst_hi, dst_hi // mov amt, amt_src // and amt, 64 // cmovz dst_hi, tmp1 // cmovz dst_lo, tmp3 // cmovnz dst_lo, tmp1 let tmp1 = ctx.alloc_tmp(types::I64).only_reg().unwrap(); let tmp2 = ctx.alloc_tmp(types::I64).only_reg().unwrap(); let tmp3 = ctx.alloc_tmp(types::I64).only_reg().unwrap(); let amt = ctx.alloc_tmp(types::I64).only_reg().unwrap(); let shift_kind = if is_signed { ShiftKind::ShiftRightArithmetic } else { ShiftKind::ShiftRightLogical }; ctx.emit(Inst::gen_move(tmp1, src_hi, types::I64)); ctx.emit(Inst::gen_move( Writable::from_reg(regs::rcx()), amt_src, types::I64, )); ctx.emit(Inst::shift_r(OperandSize::Size64, shift_kind, None, tmp1)); ctx.emit(Inst::gen_move(tmp2, src_lo, types::I64)); ctx.emit(Inst::gen_move( Writable::from_reg(regs::rcx()), amt_src, types::I64, )); // N.B.: right-shift of *lower* half is *always* unsigned (its MSB is not a sign bit). ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftRightLogical, None, tmp2, )); ctx.emit(Inst::imm(OperandSize::Size64, 64, amt)); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Sub, RegMemImm::reg(amt_src), amt, )); ctx.emit(Inst::gen_move(tmp3, src_hi, types::I64)); ctx.emit(Inst::gen_move( Writable::from_reg(regs::rcx()), amt.to_reg(), types::I64, )); ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftLeft, None, tmp3, )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Xor, RegMemImm::reg(dst_lo.to_reg()), dst_lo, )); ctx.emit(Inst::test_rmi_r( OperandSize::Size64, RegMemImm::imm(127), amt_src, )); ctx.emit(Inst::cmove( OperandSize::Size64, CC::Z, RegMem::reg(dst_lo.to_reg()), tmp3, )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Or, RegMemImm::reg(tmp2.to_reg()), tmp3, )); if is_signed { ctx.emit(Inst::gen_move(dst_hi, src_hi, types::I64)); ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftRightArithmetic, Some(63), dst_hi, )); } else { ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Xor, RegMemImm::reg(dst_hi.to_reg()), dst_hi, )); } // This isn't semantically necessary, but it keeps the // register allocator happy, because it cannot otherwise // infer that cmovz + cmovnz always defines dst_lo. ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Xor, RegMemImm::reg(dst_lo.to_reg()), dst_lo, )); ctx.emit(Inst::gen_move(amt, amt_src, types::I64)); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::And, RegMemImm::imm(64), amt, )); ctx.emit(Inst::cmove( OperandSize::Size64, CC::Z, RegMem::reg(tmp1.to_reg()), dst_hi, )); ctx.emit(Inst::cmove( OperandSize::Size64, CC::Z, RegMem::reg(tmp3.to_reg()), dst_lo, )); ctx.emit(Inst::cmove( OperandSize::Size64, CC::NZ, RegMem::reg(tmp1.to_reg()), dst_lo, )); } fn make_libcall_sig<C: LowerCtx<I = Inst>>( ctx: &mut C, insn: IRInst, call_conv: CallConv, ptr_ty: Type, ) -> Signature { let mut sig = Signature::new(call_conv); for i in 0..ctx.num_inputs(insn) { sig.params.push(AbiParam::new(ctx.input_ty(insn, i))); } for i in 0..ctx.num_outputs(insn) { sig.returns.push(AbiParam::new(ctx.output_ty(insn, i))); } if call_conv.extends_baldrdash() { // Adds the special VMContext parameter to the signature. sig.params .push(AbiParam::special(ptr_ty, ArgumentPurpose::VMContext)); } sig } fn emit_vm_call<C: LowerCtx<I = Inst>>( ctx: &mut C, flags: &Flags, triple: &Triple, libcall: LibCall, insn: IRInst, inputs: SmallVec<[InsnInput; 4]>, outputs: SmallVec<[InsnOutput; 2]>, ) -> CodegenResult<()> { let extname = ExternalName::LibCall(libcall); let dist = if flags.use_colocated_libcalls() { RelocDistance::Near } else { RelocDistance::Far }; // TODO avoid recreating signatures for every single Libcall function. let call_conv = CallConv::for_libcall(flags, CallConv::triple_default(triple)); let sig = make_libcall_sig(ctx, insn, call_conv, types::I64); let caller_conv = ctx.abi().call_conv(); let mut abi = X64ABICaller::from_func(&sig, &extname, dist, caller_conv, flags)?; abi.emit_stack_pre_adjust(ctx); let vm_context = if call_conv.extends_baldrdash() { 1 } else { 0 }; assert_eq!(inputs.len() + vm_context, abi.num_args()); for (i, input) in inputs.iter().enumerate() { let arg_reg = put_input_in_reg(ctx, *input); abi.emit_copy_regs_to_arg(ctx, i, ValueRegs::one(arg_reg)); } if call_conv.extends_baldrdash() { let vm_context_vreg = ctx .get_vm_context() .expect("should have a VMContext to pass to libcall funcs"); abi.emit_copy_regs_to_arg(ctx, inputs.len(), ValueRegs::one(vm_context_vreg)); } abi.emit_call(ctx); for (i, output) in outputs.iter().enumerate() { let retval_reg = get_output_reg(ctx, *output).only_reg().unwrap(); abi.emit_copy_retval_to_regs(ctx, i, ValueRegs::one(retval_reg)); } abi.emit_stack_post_adjust(ctx); Ok(()) } /// Returns whether the given input is a shift by a constant value less or equal than 3. /// The goal is to embed it within an address mode. fn matches_small_constant_shift<C: LowerCtx<I = Inst>>( ctx: &mut C, spec: InsnInput, ) -> Option<(InsnInput, u8)> { matches_input(ctx, spec, Opcode::Ishl).and_then(|shift| { match input_to_imm( ctx, InsnInput { insn: shift, input: 1, }, ) { Some(shift_amt) if shift_amt <= 3 => Some(( InsnInput { insn: shift, input: 0, }, shift_amt as u8, )), _ => None, } }) } /// Lowers an instruction to one of the x86 addressing modes. /// /// Note: the 32-bit offset in Cranelift has to be sign-extended, which maps x86's behavior. fn lower_to_amode<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput, offset: i32) -> Amode { let flags = ctx .memflags(spec.insn) .expect("Instruction with amode should have memflags"); // We now either have an add that we must materialize, or some other input; as well as the // final offset. if let Some(add) = matches_input(ctx, spec, Opcode::Iadd) { debug_assert_eq!(ctx.output_ty(add, 0), types::I64); let add_inputs = &[ InsnInput { insn: add, input: 0, }, InsnInput { insn: add, input: 1, }, ]; // TODO heap_addr legalization generates a uext64 *after* the shift, so these optimizations // aren't happening in the wasm case. We could do better, given some range analysis. let (base, index, shift) = if let Some((shift_input, shift_amt)) = matches_small_constant_shift(ctx, add_inputs[0]) { ( put_input_in_reg(ctx, add_inputs[1]), put_input_in_reg(ctx, shift_input), shift_amt, ) } else if let Some((shift_input, shift_amt)) = matches_small_constant_shift(ctx, add_inputs[1]) { ( put_input_in_reg(ctx, add_inputs[0]), put_input_in_reg(ctx, shift_input), shift_amt, ) } else { for i in 0..=1 { // Try to pierce through uextend. if let Some(uextend) = matches_input( ctx, InsnInput { insn: add, input: i, }, Opcode::Uextend, ) { if let Some(cst) = ctx.get_input_as_source_or_const(uextend, 0).constant { // Zero the upper bits. let input_size = ctx.input_ty(uextend, 0).bits() as u64; let shift: u64 = 64 - input_size; let uext_cst: u64 = (cst << shift) >> shift; let final_offset = (offset as i64).wrapping_add(uext_cst as i64); if low32_will_sign_extend_to_64(final_offset as u64) { let base = put_input_in_reg(ctx, add_inputs[1 - i]); return Amode::imm_reg(final_offset as u32, base).with_flags(flags); } } } // If it's a constant, add it directly! if let Some(cst) = ctx.get_input_as_source_or_const(add, i).constant { let final_offset = (offset as i64).wrapping_add(cst as i64); if low32_will_sign_extend_to_64(final_offset as u64) { let base = put_input_in_reg(ctx, add_inputs[1 - i]); return Amode::imm_reg(final_offset as u32, base).with_flags(flags); } } } ( put_input_in_reg(ctx, add_inputs[0]), put_input_in_reg(ctx, add_inputs[1]), 0, ) }; return Amode::imm_reg_reg_shift(offset as u32, base, index, shift).with_flags(flags); } let input = put_input_in_reg(ctx, spec); Amode::imm_reg(offset as u32, input).with_flags(flags) } fn emit_moves<C: LowerCtx<I = Inst>>( ctx: &mut C, dst: ValueRegs<Writable<Reg>>, src: ValueRegs<Reg>, ty: Type, ) { let (_, tys) = Inst::rc_for_type(ty).unwrap(); for ((dst, src), ty) in dst.regs().iter().zip(src.regs().iter()).zip(tys.iter()) { ctx.emit(Inst::gen_move(*dst, *src, *ty)); } } fn emit_cmoves<C: LowerCtx<I = Inst>>( ctx: &mut C, size: u8, cc: CC, src: ValueRegs<Reg>, dst: ValueRegs<Writable<Reg>>, ) { let size = size / src.len() as u8; let size = u8::max(size, 4); // at least 32 bits for (dst, src) in dst.regs().iter().zip(src.regs().iter()) { ctx.emit(Inst::cmove( OperandSize::from_bytes(size.into()), cc, RegMem::reg(*src), *dst, )); } } fn emit_clz<C: LowerCtx<I = Inst>>( ctx: &mut C, orig_ty: Type, ty: Type, src: Reg, dst: Writable<Reg>, ) { let src = RegMem::reg(src); let tmp = ctx.alloc_tmp(ty).only_reg().unwrap(); ctx.emit(Inst::imm(OperandSize::from_ty(ty), u64::max_value(), dst)); ctx.emit(Inst::unary_rm_r( OperandSize::from_ty(ty), UnaryRmROpcode::Bsr, src, tmp, )); ctx.emit(Inst::cmove( OperandSize::from_ty(ty), CC::Z, RegMem::reg(dst.to_reg()), tmp, )); ctx.emit(Inst::imm( OperandSize::from_ty(ty), orig_ty.bits() as u64 - 1, dst, )); ctx.emit(Inst::alu_rmi_r( if ty == types::I64 { OperandSize::Size64 } else { OperandSize::Size32 }, AluRmiROpcode::Sub, RegMemImm::reg(tmp.to_reg()), dst, )); } fn emit_ctz<C: LowerCtx<I = Inst>>( ctx: &mut C, orig_ty: Type, ty: Type, src: Reg, dst: Writable<Reg>, ) { let src = RegMem::reg(src); let tmp = ctx.alloc_tmp(ty).only_reg().unwrap(); ctx.emit(Inst::imm(OperandSize::Size32, orig_ty.bits() as u64, tmp)); ctx.emit(Inst::unary_rm_r( OperandSize::from_ty(ty), UnaryRmROpcode::Bsf, src, dst, )); ctx.emit(Inst::cmove( OperandSize::from_ty(ty), CC::Z, RegMem::reg(tmp.to_reg()), dst, )); } //============================================================================= // Top-level instruction lowering entry point, for one instruction. /// Actually codegen an instruction's results into registers. fn lower_insn_to_regs<C: LowerCtx<I = Inst>>( ctx: &mut C, insn: IRInst, flags: &Flags, isa_flags: &x64_settings::Flags, triple: &Triple, ) -> CodegenResult<()> { let op = ctx.data(insn).opcode(); let inputs: SmallVec<[InsnInput; 4]> = (0..ctx.num_inputs(insn)) .map(|i| InsnInput { insn, input: i }) .collect(); let outputs: SmallVec<[InsnOutput; 2]> = (0..ctx.num_outputs(insn)) .map(|i| InsnOutput { insn, output: i }) .collect(); let ty = if outputs.len() > 0 { Some(ctx.output_ty(insn, 0)) } else { None }; match op { Opcode::Iconst | Opcode::Bconst | Opcode::Null => { let value = ctx .get_constant(insn) .expect("constant value for iconst et al"); let dst = get_output_reg(ctx, outputs[0]); for inst in Inst::gen_constant(dst, value as u128, ty.unwrap(), |ty| { ctx.alloc_tmp(ty).only_reg().unwrap() }) { ctx.emit(inst); } } Opcode::Iadd | Opcode::IaddIfcout | Opcode::SaddSat | Opcode::UaddSat | Opcode::Isub | Opcode::SsubSat | Opcode::UsubSat | Opcode::AvgRound | Opcode::Band | Opcode::Bor | Opcode::Bxor => { let ty = ty.unwrap(); if ty.lane_count() > 1 { let sse_op = match op { Opcode::Iadd => match ty { types::I8X16 => SseOpcode::Paddb, types::I16X8 => SseOpcode::Paddw, types::I32X4 => SseOpcode::Paddd, types::I64X2 => SseOpcode::Paddq, _ => panic!("Unsupported type for packed iadd instruction: {}", ty), }, Opcode::SaddSat => match ty { types::I8X16 => SseOpcode::Paddsb, types::I16X8 => SseOpcode::Paddsw, _ => panic!("Unsupported type for packed sadd_sat instruction: {}", ty), }, Opcode::UaddSat => match ty { types::I8X16 => SseOpcode::Paddusb, types::I16X8 => SseOpcode::Paddusw, _ => panic!("Unsupported type for packed uadd_sat instruction: {}", ty), }, Opcode::Isub => match ty { types::I8X16 => SseOpcode::Psubb, types::I16X8 => SseOpcode::Psubw, types::I32X4 => SseOpcode::Psubd, types::I64X2 => SseOpcode::Psubq, _ => panic!("Unsupported type for packed isub instruction: {}", ty), }, Opcode::SsubSat => match ty { types::I8X16 => SseOpcode::Psubsb, types::I16X8 => SseOpcode::Psubsw, _ => panic!("Unsupported type for packed ssub_sat instruction: {}", ty), }, Opcode::UsubSat => match ty { types::I8X16 => SseOpcode::Psubusb, types::I16X8 => SseOpcode::Psubusw, _ => panic!("Unsupported type for packed usub_sat instruction: {}", ty), }, Opcode::AvgRound => match ty { types::I8X16 => SseOpcode::Pavgb, types::I16X8 => SseOpcode::Pavgw, _ => panic!("Unsupported type for packed avg_round instruction: {}", ty), }, Opcode::Band => match ty { types::F32X4 => SseOpcode::Andps, types::F64X2 => SseOpcode::Andpd, _ => SseOpcode::Pand, }, Opcode::Bor => match ty { types::F32X4 => SseOpcode::Orps, types::F64X2 => SseOpcode::Orpd, _ => SseOpcode::Por, }, Opcode::Bxor => match ty { types::F32X4 => SseOpcode::Xorps, types::F64X2 => SseOpcode::Xorpd, _ => SseOpcode::Pxor, }, _ => panic!("Unsupported packed instruction: {}", op), }; let lhs = put_input_in_reg(ctx, inputs[0]); let rhs = input_to_reg_mem(ctx, inputs[1]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); // Move the `lhs` to the same register as `dst`. ctx.emit(Inst::gen_move(dst, lhs, ty)); ctx.emit(Inst::xmm_rm_r(sse_op, rhs, dst)); } else if ty == types::I128 || ty == types::B128 { let alu_ops = match op { Opcode::Iadd => (AluRmiROpcode::Add, AluRmiROpcode::Adc), Opcode::Isub => (AluRmiROpcode::Sub, AluRmiROpcode::Sbb), Opcode::Band => (AluRmiROpcode::And, AluRmiROpcode::And), Opcode::Bor => (AluRmiROpcode::Or, AluRmiROpcode::Or), Opcode::Bxor => (AluRmiROpcode::Xor, AluRmiROpcode::Xor), _ => panic!("Unsupported opcode with 128-bit integers: {:?}", op), }; let lhs = put_input_in_regs(ctx, inputs[0]); let rhs = put_input_in_regs(ctx, inputs[1]); let dst = get_output_reg(ctx, outputs[0]); assert_eq!(lhs.len(), 2); assert_eq!(rhs.len(), 2); assert_eq!(dst.len(), 2); // For add, sub, and, or, xor: just do ops on lower then upper // half. Carry-flag propagation is implicit (add/adc, sub/sbb). ctx.emit(Inst::gen_move(dst.regs()[0], lhs.regs()[0], types::I64)); ctx.emit(Inst::gen_move(dst.regs()[1], lhs.regs()[1], types::I64)); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, alu_ops.0, RegMemImm::reg(rhs.regs()[0]), dst.regs()[0], )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, alu_ops.1, RegMemImm::reg(rhs.regs()[1]), dst.regs()[1], )); } else { let size = if ty == types::I64 { OperandSize::Size64 } else { OperandSize::Size32 }; let alu_op = match op { Opcode::Iadd | Opcode::IaddIfcout => AluRmiROpcode::Add, Opcode::Isub => AluRmiROpcode::Sub, Opcode::Band => AluRmiROpcode::And, Opcode::Bor => AluRmiROpcode::Or, Opcode::Bxor => AluRmiROpcode::Xor, _ => unreachable!(), }; let (lhs, rhs) = match op { Opcode::Iadd | Opcode::IaddIfcout | Opcode::Band | Opcode::Bor | Opcode::Bxor => { // For commutative operations, try to commute operands if one is an // immediate or direct memory reference. Do so by converting LHS to RMI; if // reg, then always convert RHS to RMI; else, use LHS as RMI and convert // RHS to reg. let lhs = input_to_reg_mem_imm(ctx, inputs[0]); if let RegMemImm::Reg { reg: lhs_reg } = lhs { let rhs = input_to_reg_mem_imm(ctx, inputs[1]); (lhs_reg, rhs) } else { let rhs_reg = put_input_in_reg(ctx, inputs[1]); (rhs_reg, lhs) } } Opcode::Isub => ( put_input_in_reg(ctx, inputs[0]), input_to_reg_mem_imm(ctx, inputs[1]), ), _ => unreachable!(), }; let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); ctx.emit(Inst::mov_r_r(OperandSize::Size64, lhs, dst)); ctx.emit(Inst::alu_rmi_r(size, alu_op, rhs, dst)); } } Opcode::Imul => { let ty = ty.unwrap(); if ty == types::I64X2 { // Eventually one of these should be `input_to_reg_mem` (TODO). let lhs = put_input_in_reg(ctx, inputs[0]); let rhs = put_input_in_reg(ctx, inputs[1]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); if isa_flags.use_avx512f_simd() || isa_flags.use_avx512vl_simd() { // With the right AVX512 features (VL, DQ) this operation // can lower to a single operation. ctx.emit(Inst::xmm_rm_r_evex( Avx512Opcode::Vpmullq, RegMem::reg(rhs), lhs, dst, )); } else { // Otherwise, for I64X2 multiplication we describe a lane A as being // composed of a 32-bit upper half "Ah" and a 32-bit lower half // "Al". The 32-bit long hand multiplication can then be written // as: // Ah Al // * Bh Bl // ----- // Al * Bl // + (Ah * Bl) << 32 // + (Al * Bh) << 32 // // So for each lane we will compute: // A * B = (Al * Bl) + ((Ah * Bl) + (Al * Bh)) << 32 // // Note, the algorithm will use pmuldq which operates directly // on the lower 32-bit (Al or Bl) of a lane and writes the // result to the full 64-bits of the lane of the destination. // For this reason we don't need shifts to isolate the lower // 32-bits, however, we will need to use shifts to isolate the // high 32-bits when doing calculations, i.e., Ah == A >> 32. // // The full sequence then is as follows: // A' = A // A' = A' >> 32 // A' = Ah' * Bl // B' = B // B' = B' >> 32 // B' = Bh' * Al // B' = B' + A' // B' = B' << 32 // A' = A // A' = Al' * Bl // A' = A' + B' // dst = A' // A' = A let rhs_1 = ctx.alloc_tmp(types::I64X2).only_reg().unwrap(); ctx.emit(Inst::gen_move(rhs_1, rhs, ty)); // A' = A' >> 32 // A' = Ah' * Bl ctx.emit(Inst::xmm_rmi_reg( SseOpcode::Psrlq, RegMemImm::imm(32), rhs_1, )); ctx.emit(Inst::xmm_rm_r( SseOpcode::Pmuludq, RegMem::reg(lhs.clone()), rhs_1, )); // B' = B let lhs_1 = ctx.alloc_tmp(types::I64X2).only_reg().unwrap(); ctx.emit(Inst::gen_move(lhs_1, lhs, ty)); // B' = B' >> 32 // B' = Bh' * Al ctx.emit(Inst::xmm_rmi_reg( SseOpcode::Psrlq, RegMemImm::imm(32), lhs_1, )); ctx.emit(Inst::xmm_rm_r(SseOpcode::Pmuludq, RegMem::reg(rhs), lhs_1)); // B' = B' + A' // B' = B' << 32 ctx.emit(Inst::xmm_rm_r( SseOpcode::Paddq, RegMem::reg(rhs_1.to_reg()), lhs_1, )); ctx.emit(Inst::xmm_rmi_reg( SseOpcode::Psllq, RegMemImm::imm(32), lhs_1, )); // A' = A // A' = Al' * Bl // A' = A' + B' // dst = A' ctx.emit(Inst::gen_move(rhs_1, rhs, ty)); ctx.emit(Inst::xmm_rm_r( SseOpcode::Pmuludq, RegMem::reg(lhs.clone()), rhs_1, )); ctx.emit(Inst::xmm_rm_r( SseOpcode::Paddq, RegMem::reg(lhs_1.to_reg()), rhs_1, )); ctx.emit(Inst::gen_move(dst, rhs_1.to_reg(), ty)); } } else if ty.lane_count() > 1 { // Emit single instruction lowerings for the remaining vector // multiplications. let sse_op = match ty { types::I16X8 => SseOpcode::Pmullw, types::I32X4 => SseOpcode::Pmulld, _ => panic!("Unsupported type for packed imul instruction: {}", ty), }; let lhs = put_input_in_reg(ctx, inputs[0]); let rhs = input_to_reg_mem(ctx, inputs[1]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); // Move the `lhs` to the same register as `dst`. ctx.emit(Inst::gen_move(dst, lhs, ty)); ctx.emit(Inst::xmm_rm_r(sse_op, rhs, dst)); } else if ty == types::I128 || ty == types::B128 { // Handle 128-bit multiplications. let lhs = put_input_in_regs(ctx, inputs[0]); let rhs = put_input_in_regs(ctx, inputs[1]); let dst = get_output_reg(ctx, outputs[0]); assert_eq!(lhs.len(), 2); assert_eq!(rhs.len(), 2); assert_eq!(dst.len(), 2); // mul: // dst_lo = lhs_lo * rhs_lo // dst_hi = umulhi(lhs_lo, rhs_lo) + lhs_lo * rhs_hi + lhs_hi * rhs_lo // // so we emit: // mov dst_lo, lhs_lo // mul dst_lo, rhs_lo // mov dst_hi, lhs_lo // mul dst_hi, rhs_hi // mov tmp, lhs_hi // mul tmp, rhs_lo // add dst_hi, tmp // mov rax, lhs_lo // umulhi rhs_lo // implicit rax arg/dst // add dst_hi, rax let tmp = ctx.alloc_tmp(types::I64).only_reg().unwrap(); ctx.emit(Inst::gen_move(dst.regs()[0], lhs.regs()[0], types::I64)); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Mul, RegMemImm::reg(rhs.regs()[0]), dst.regs()[0], )); ctx.emit(Inst::gen_move(dst.regs()[1], lhs.regs()[0], types::I64)); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Mul, RegMemImm::reg(rhs.regs()[1]), dst.regs()[1], )); ctx.emit(Inst::gen_move(tmp, lhs.regs()[1], types::I64)); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Mul, RegMemImm::reg(rhs.regs()[0]), tmp, )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Add, RegMemImm::reg(tmp.to_reg()), dst.regs()[1], )); ctx.emit(Inst::gen_move( Writable::from_reg(regs::rax()), lhs.regs()[0], types::I64, )); ctx.emit(Inst::mul_hi( OperandSize::Size64, /* signed = */ false, RegMem::reg(rhs.regs()[0]), )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Add, RegMemImm::reg(regs::rdx()), dst.regs()[1], )); } else { let size = if ty == types::I64 { OperandSize::Size64 } else { OperandSize::Size32 }; let alu_op = AluRmiROpcode::Mul; // For commutative operations, try to commute operands if one is // an immediate or direct memory reference. Do so by converting // LHS to RMI; if reg, then always convert RHS to RMI; else, use // LHS as RMI and convert RHS to reg. let lhs = input_to_reg_mem_imm(ctx, inputs[0]); let (lhs, rhs) = if let RegMemImm::Reg { reg: lhs_reg } = lhs { let rhs = input_to_reg_mem_imm(ctx, inputs[1]); (lhs_reg, rhs) } else { let rhs_reg = put_input_in_reg(ctx, inputs[1]); (rhs_reg, lhs) }; let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); ctx.emit(Inst::mov_r_r(OperandSize::Size64, lhs, dst)); ctx.emit(Inst::alu_rmi_r(size, alu_op, rhs, dst)); } } Opcode::BandNot => { let ty = ty.unwrap(); debug_assert!(ty.is_vector() && ty.bytes() == 16); let lhs = input_to_reg_mem(ctx, inputs[0]); let rhs = put_input_in_reg(ctx, inputs[1]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let sse_op = match ty { types::F32X4 => SseOpcode::Andnps, types::F64X2 => SseOpcode::Andnpd, _ => SseOpcode::Pandn, }; // Note the flipping of operands: the `rhs` operand is used as the destination instead // of the `lhs` as in the other bit operations above (e.g. `band`). ctx.emit(Inst::gen_move(dst, rhs, ty)); ctx.emit(Inst::xmm_rm_r(sse_op, lhs, dst)); } Opcode::Iabs => { let src = input_to_reg_mem(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let ty = ty.unwrap(); if ty == types::I64X2 { if isa_flags.use_avx512f_simd() || isa_flags.use_avx512vl_simd() { ctx.emit(Inst::xmm_unary_rm_r_evex(Avx512Opcode::Vpabsq, src, dst)); } else { // If `VPABSQ` from AVX512 is unavailable, we use a separate register, `tmp`, to // contain the results of `0 - src` and then blend in those results with // `BLENDVPD` if the MSB of `tmp` was set to 1 (i.e. if `tmp` was negative or, // conversely, if `src` was originally positive). // Emit all 0s into the `tmp` register. let tmp = ctx.alloc_tmp(ty).only_reg().unwrap(); ctx.emit(Inst::xmm_rm_r(SseOpcode::Pxor, RegMem::from(tmp), tmp)); // Subtract the lanes from 0 and set up `dst`. ctx.emit(Inst::xmm_rm_r(SseOpcode::Psubq, src.clone(), tmp)); ctx.emit(Inst::gen_move(dst, tmp.to_reg(), ty)); // Choose the subtracted lanes when `tmp` has an MSB of 1. BLENDVPD's semantics // require the "choice" mask to be in XMM0. ctx.emit(Inst::gen_move( Writable::from_reg(regs::xmm0()), tmp.to_reg(), ty, )); ctx.emit(Inst::xmm_rm_r(SseOpcode::Blendvpd, src, dst)); } } else if ty.is_vector() { let opcode = match ty { types::I8X16 => SseOpcode::Pabsb, types::I16X8 => SseOpcode::Pabsw, types::I32X4 => SseOpcode::Pabsd, _ => panic!("Unsupported type for packed iabs instruction: {}", ty), }; ctx.emit(Inst::xmm_unary_rm_r(opcode, src, dst)); } else { unimplemented!("iabs is unimplemented for non-vector type: {}", ty); } } Opcode::Imax | Opcode::Umax | Opcode::Imin | Opcode::Umin => { let lhs = put_input_in_reg(ctx, inputs[0]); let rhs = input_to_reg_mem(ctx, inputs[1]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let ty = ty.unwrap(); if ty.is_vector() { let sse_op = match op { Opcode::Imax => match ty { types::I8X16 => SseOpcode::Pmaxsb, types::I16X8 => SseOpcode::Pmaxsw, types::I32X4 => SseOpcode::Pmaxsd, _ => panic!("Unsupported type for packed {} instruction: {}", op, ty), }, Opcode::Umax => match ty { types::I8X16 => SseOpcode::Pmaxub, types::I16X8 => SseOpcode::Pmaxuw, types::I32X4 => SseOpcode::Pmaxud, _ => panic!("Unsupported type for packed {} instruction: {}", op, ty), }, Opcode::Imin => match ty { types::I8X16 => SseOpcode::Pminsb, types::I16X8 => SseOpcode::Pminsw, types::I32X4 => SseOpcode::Pminsd, _ => panic!("Unsupported type for packed {} instruction: {}", op, ty), }, Opcode::Umin => match ty { types::I8X16 => SseOpcode::Pminub, types::I16X8 => SseOpcode::Pminuw, types::I32X4 => SseOpcode::Pminud, _ => panic!("Unsupported type for packed {} instruction: {}", op, ty), }, _ => unreachable!("This is a bug: the external and internal `match op` should be over the same opcodes."), }; // Move the `lhs` to the same register as `dst`. ctx.emit(Inst::gen_move(dst, lhs, ty)); ctx.emit(Inst::xmm_rm_r(sse_op, rhs, dst)); } else { panic!("Unsupported type for {} instruction: {}", op, ty); } } Opcode::Bnot => { let ty = ty.unwrap(); if ty.is_vector() { let src = put_input_in_reg(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); ctx.emit(Inst::gen_move(dst, src, ty)); let tmp = ctx.alloc_tmp(ty).only_reg().unwrap(); ctx.emit(Inst::equals(ty, RegMem::from(tmp), tmp)); ctx.emit(Inst::xor(ty, RegMem::from(tmp), dst)); } else if ty == types::I128 || ty == types::B128 { let src = put_input_in_regs(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]); ctx.emit(Inst::gen_move(dst.regs()[0], src.regs()[0], types::I64)); ctx.emit(Inst::not(OperandSize::Size64, dst.regs()[0])); ctx.emit(Inst::gen_move(dst.regs()[1], src.regs()[1], types::I64)); ctx.emit(Inst::not(OperandSize::Size64, dst.regs()[1])); } else if ty.is_bool() { unimplemented!("bool bnot") } else { let src = put_input_in_reg(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); ctx.emit(Inst::gen_move(dst, src, ty)); ctx.emit(Inst::not(OperandSize::from_ty(ty), dst)); } } Opcode::Bitselect => { let ty = ty.unwrap(); let condition = put_input_in_reg(ctx, inputs[0]); let if_true = put_input_in_reg(ctx, inputs[1]); let if_false = input_to_reg_mem(ctx, inputs[2]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); if ty.is_vector() { let tmp1 = ctx.alloc_tmp(ty).only_reg().unwrap(); ctx.emit(Inst::gen_move(tmp1, if_true, ty)); ctx.emit(Inst::and(ty, RegMem::reg(condition.clone()), tmp1)); let tmp2 = ctx.alloc_tmp(ty).only_reg().unwrap(); ctx.emit(Inst::gen_move(tmp2, condition, ty)); ctx.emit(Inst::and_not(ty, if_false, tmp2)); ctx.emit(Inst::gen_move(dst, tmp2.to_reg(), ty)); ctx.emit(Inst::or(ty, RegMem::from(tmp1), dst)); } else { unimplemented!("no lowering for scalar bitselect instruction") } } Opcode::Vselect => { let ty = ty.unwrap(); let condition = put_input_in_reg(ctx, inputs[0]); let condition_ty = ctx.input_ty(insn, 0); let if_true = input_to_reg_mem(ctx, inputs[1]); let if_false = put_input_in_reg(ctx, inputs[2]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); if ty.is_vector() { // `vselect` relies on the bit representation of the condition: // vector boolean types are defined in Cranelift to be all 1s or // all 0s. This lowering relies on that fact to use x86's // variable blend instructions, which look at the _high_bit_ of // the condition mask. All the bits of vector booleans will // match (all 1s or all 0s), so we can just use the high bit. assert!(condition_ty.lane_type().is_bool()); // Variable blend instructions expect the condition mask to be // in XMM0. let xmm0 = Writable::from_reg(regs::xmm0()); ctx.emit(Inst::gen_move(xmm0, condition, ty)); // Match up the source and destination registers for regalloc. ctx.emit(Inst::gen_move(dst, if_false, ty)); // Technically PBLENDVB would work in all cases (since the bytes // inside the mask will be all 1s or 0s we can blend // byte-by-byte instead of word-by-word, e.g.) but // type-specialized versions are included here for clarity when // troubleshooting and due to slight improvements in // latency/throughput on certain processor families. let opcode = match condition_ty { types::B64X2 => SseOpcode::Blendvpd, types::B32X4 => SseOpcode::Blendvps, types::B16X8 | types::B8X16 => SseOpcode::Pblendvb, _ => unimplemented!("unable lower vselect for type: {}", condition_ty), }; ctx.emit(Inst::xmm_rm_r(opcode, if_true, dst)); } else { unimplemented!("no lowering for scalar vselect instruction") } } Opcode::Ishl | Opcode::Ushr | Opcode::Sshr | Opcode::Rotl | Opcode::Rotr => { let dst_ty = ctx.output_ty(insn, 0); debug_assert_eq!(ctx.input_ty(insn, 0), dst_ty); if !dst_ty.is_vector() && dst_ty.bits() <= 64 { // Scalar shifts on x86 have various encodings: // - shift by one bit, e.g. `SAL r/m8, 1` (not used here) // - shift by an immediate amount, e.g. `SAL r/m8, imm8` // - shift by a dynamic amount but only from the CL register, e.g. `SAL r/m8, CL`. // This implementation uses the last two encoding methods. let (size, lhs) = match dst_ty { types::I8 | types::I16 => match op { Opcode::Ishl => (OperandSize::Size32, put_input_in_reg(ctx, inputs[0])), Opcode::Ushr => ( OperandSize::Size32, extend_input_to_reg(ctx, inputs[0], ExtSpec::ZeroExtendTo32), ), Opcode::Sshr => ( OperandSize::Size32, extend_input_to_reg(ctx, inputs[0], ExtSpec::SignExtendTo32), ), Opcode::Rotl | Opcode::Rotr => ( OperandSize::from_ty(dst_ty), put_input_in_reg(ctx, inputs[0]), ), _ => unreachable!(), }, types::I32 | types::I64 => ( OperandSize::from_ty(dst_ty), put_input_in_reg(ctx, inputs[0]), ), _ => unreachable!("unhandled output type for shift/rotates: {}", dst_ty), }; let (count, rhs) = if let Some(cst) = ctx.get_input_as_source_or_const(insn, 1).constant { // Mask count, according to Cranelift's semantics. let cst = (cst as u8) & (dst_ty.bits() as u8 - 1); (Some(cst), None) } else { // We can ignore upper registers if shift amount is multi-reg, because we // are taking the shift amount mod 2^(lhs_width) anyway. (None, Some(put_input_in_regs(ctx, inputs[1]).regs()[0])) }; let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let shift_kind = match op { Opcode::Ishl => ShiftKind::ShiftLeft, Opcode::Ushr => ShiftKind::ShiftRightLogical, Opcode::Sshr => ShiftKind::ShiftRightArithmetic, Opcode::Rotl => ShiftKind::RotateLeft, Opcode::Rotr => ShiftKind::RotateRight, _ => unreachable!(), }; let w_rcx = Writable::from_reg(regs::rcx()); ctx.emit(Inst::mov_r_r(OperandSize::Size64, lhs, dst)); if count.is_none() { ctx.emit(Inst::mov_r_r(OperandSize::Size64, rhs.unwrap(), w_rcx)); } ctx.emit(Inst::shift_r(size, shift_kind, count, dst)); } else if dst_ty == types::I128 { let amt_src = put_input_in_regs(ctx, inputs[1]).regs()[0]; let src = put_input_in_regs(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]); match op { Opcode::Ishl => { emit_shl_i128(ctx, src, dst, amt_src); } Opcode::Ushr => { emit_shr_i128(ctx, src, dst, amt_src, /* is_signed = */ false); } Opcode::Sshr => { emit_shr_i128(ctx, src, dst, amt_src, /* is_signed = */ true); } Opcode::Rotl => { // (mov tmp, src) // (shl.i128 tmp, amt) // (mov dst, src) // (ushr.i128 dst, 128-amt) // (or dst, tmp) let tmp = ctx.alloc_tmp(types::I128); emit_shl_i128(ctx, src, tmp, amt_src); let inv_amt = ctx.alloc_tmp(types::I64).only_reg().unwrap(); ctx.emit(Inst::imm(OperandSize::Size64, 128, inv_amt)); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Sub, RegMemImm::reg(amt_src), inv_amt, )); emit_shr_i128( ctx, src, dst, inv_amt.to_reg(), /* is_signed = */ false, ); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Or, RegMemImm::reg(tmp.regs()[0].to_reg()), dst.regs()[0], )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Or, RegMemImm::reg(tmp.regs()[1].to_reg()), dst.regs()[1], )); } Opcode::Rotr => { // (mov tmp, src) // (ushr.i128 tmp, amt) // (mov dst, src) // (shl.i128 dst, 128-amt) // (or dst, tmp) let tmp = ctx.alloc_tmp(types::I128); emit_shr_i128(ctx, src, tmp, amt_src, /* is_signed = */ false); let inv_amt = ctx.alloc_tmp(types::I64).only_reg().unwrap(); ctx.emit(Inst::imm(OperandSize::Size64, 128, inv_amt)); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Sub, RegMemImm::reg(amt_src), inv_amt, )); emit_shl_i128(ctx, src, dst, inv_amt.to_reg()); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Or, RegMemImm::reg(tmp.regs()[0].to_reg()), dst.regs()[0], )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Or, RegMemImm::reg(tmp.regs()[1].to_reg()), dst.regs()[1], )); } _ => unreachable!(), } } else if dst_ty == types::I8X16 && (op == Opcode::Ishl || op == Opcode::Ushr) { // Since the x86 instruction set does not have any 8x16 shift instructions (even in higher feature sets // like AVX), we lower the `ishl.i8x16` and `ushr.i8x16` to a sequence of instructions. The basic idea, // whether the `shift_by` amount is an immediate or not, is to use a 16x8 shift and then mask off the // incorrect bits to 0s (see below for handling signs in `sshr.i8x16`). let src = put_input_in_reg(ctx, inputs[0]); let shift_by = input_to_reg_mem_imm(ctx, inputs[1]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); // If necessary, move the shift index into the lowest bits of a vector register. let shift_by_moved = match &shift_by { RegMemImm::Imm { .. } => shift_by.clone(), RegMemImm::Reg { reg } => { let tmp_shift_by = ctx.alloc_tmp(dst_ty).only_reg().unwrap(); ctx.emit(Inst::gpr_to_xmm( SseOpcode::Movd, RegMem::reg(*reg), OperandSize::Size32, tmp_shift_by, )); RegMemImm::reg(tmp_shift_by.to_reg()) } RegMemImm::Mem { .. } => unimplemented!("load shift amount to XMM register"), }; // Shift `src` using 16x8. Unfortunately, a 16x8 shift will only be correct for half of the lanes; // the others must be fixed up with the mask below. let shift_opcode = match op { Opcode::Ishl => SseOpcode::Psllw, Opcode::Ushr => SseOpcode::Psrlw, _ => unimplemented!("{} is not implemented for type {}", op, dst_ty), }; ctx.emit(Inst::gen_move(dst, src, dst_ty)); ctx.emit(Inst::xmm_rmi_reg(shift_opcode, shift_by_moved, dst)); // Choose which mask to use to fixup the shifted lanes. Since we must use a 16x8 shift, we need to fix // up the bits that migrate from one half of the lane to the other. Each 16-byte mask (which rustfmt // forces to multiple lines) is indexed by the shift amount: e.g. if we shift right by 0 (no movement), // we want to retain all the bits so we mask with `0xff`; if we shift right by 1, we want to retain all // bits except the MSB so we mask with `0x7f`; etc. const USHR_MASKS: [u8; 128] = [ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, ]; const SHL_MASKS: [u8; 128] = [ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, ]; let mask = match op { Opcode::Ishl => &SHL_MASKS, Opcode::Ushr => &USHR_MASKS, _ => unimplemented!("{} is not implemented for type {}", op, dst_ty), }; // Figure out the address of the shift mask. let mask_address = match shift_by { RegMemImm::Imm { simm32 } => { // When the shift amount is known, we can statically (i.e. at compile time) determine the mask to // use and only emit that. debug_assert!(simm32 < 8); let mask_offset = simm32 as usize * 16; let mask_constant = ctx.use_constant(VCodeConstantData::WellKnown( &mask[mask_offset..mask_offset + 16], )); SyntheticAmode::ConstantOffset(mask_constant) } RegMemImm::Reg { reg } => { // Otherwise, we must emit the entire mask table and dynamically (i.e. at run time) find the correct // mask offset in the table. We do this use LEA to find the base address of the mask table and then // complex addressing to offset to the right mask: `base_address + shift_by * 4` let base_mask_address = ctx.alloc_tmp(types::I64).only_reg().unwrap(); let mask_offset = ctx.alloc_tmp(types::I64).only_reg().unwrap(); let mask_constant = ctx.use_constant(VCodeConstantData::WellKnown(mask)); ctx.emit(Inst::lea( SyntheticAmode::ConstantOffset(mask_constant), base_mask_address, )); ctx.emit(Inst::gen_move(mask_offset, reg, types::I64)); ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftLeft, Some(4), mask_offset, )); Amode::imm_reg_reg_shift( 0, base_mask_address.to_reg(), mask_offset.to_reg(), 0, ) .into() } RegMemImm::Mem { addr: _ } => unimplemented!("load mask address"), }; // Load the mask into a temporary register, `mask_value`. let mask_value = ctx.alloc_tmp(dst_ty).only_reg().unwrap(); ctx.emit(Inst::load(dst_ty, mask_address, mask_value, ExtKind::None)); // Remove the bits that would have disappeared in a true 8x16 shift. TODO in the future, // this AND instruction could be coalesced with the load above. let sse_op = match dst_ty { types::F32X4 => SseOpcode::Andps, types::F64X2 => SseOpcode::Andpd, _ => SseOpcode::Pand, }; ctx.emit(Inst::xmm_rm_r(sse_op, RegMem::from(mask_value), dst)); } else if dst_ty == types::I8X16 && op == Opcode::Sshr { // Since the x86 instruction set does not have an 8x16 shift instruction and the approach used for // `ishl` and `ushr` cannot be easily used (the masks do not preserve the sign), we use a different // approach here: separate the low and high lanes, shift them separately, and merge them into the final // result. Visually, this looks like the following, where `src.i8x16 = [s0, s1, ..., s15]: // low.i16x8 = [(s0, s0), (s1, s1), ..., (s7, s7)] // shifted_low.i16x8 = shift each lane of `low` // high.i16x8 = [(s8, s8), (s9, s9), ..., (s15, s15)] // shifted_high.i16x8 = shift each lane of `high` // dst.i8x16 = [s0'', s1'', ..., s15''] let src = put_input_in_reg(ctx, inputs[0]); let shift_by = input_to_reg_mem_imm(ctx, inputs[1]); let shift_by_ty = ctx.input_ty(insn, 1); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); // In order for PACKSSWB later to only use the high byte of each 16x8 lane, we shift right an extra 8 // bits, relying on PSRAW to fill in the upper bits appropriately. let bigger_shift_by = match shift_by { // When we know the shift amount at compile time, we add the extra shift amount statically. RegMemImm::Imm { simm32 } => RegMemImm::imm(simm32 + 8), // Otherwise we add instructions to add the extra shift amount and move the value into an XMM // register. RegMemImm::Reg { reg } => { let bigger_shift_by_gpr = ctx.alloc_tmp(shift_by_ty).only_reg().unwrap(); ctx.emit(Inst::mov_r_r(OperandSize::Size64, reg, bigger_shift_by_gpr)); let size = if shift_by_ty == types::I64 { OperandSize::Size64 } else { OperandSize::Size32 }; let imm = RegMemImm::imm(8); ctx.emit(Inst::alu_rmi_r( size, AluRmiROpcode::Add, imm, bigger_shift_by_gpr, )); let bigger_shift_by_xmm = ctx.alloc_tmp(dst_ty).only_reg().unwrap(); ctx.emit(Inst::gpr_to_xmm( SseOpcode::Movd, RegMem::from(bigger_shift_by_gpr), OperandSize::Size32, bigger_shift_by_xmm, )); RegMemImm::reg(bigger_shift_by_xmm.to_reg()) } RegMemImm::Mem { .. } => unimplemented!("load shift amount to XMM register"), }; // Unpack and shift the lower lanes of `src` into the `dst` register. ctx.emit(Inst::gen_move(dst, src, dst_ty)); ctx.emit(Inst::xmm_rm_r(SseOpcode::Punpcklbw, RegMem::from(dst), dst)); ctx.emit(Inst::xmm_rmi_reg( SseOpcode::Psraw, bigger_shift_by.clone(), dst, )); // Unpack and shift the upper lanes of `src` into a temporary register, `upper_lanes`. let upper_lanes = ctx.alloc_tmp(dst_ty).only_reg().unwrap(); ctx.emit(Inst::gen_move(upper_lanes, src, dst_ty)); ctx.emit(Inst::xmm_rm_r( SseOpcode::Punpckhbw, RegMem::from(upper_lanes), upper_lanes, )); ctx.emit(Inst::xmm_rmi_reg( SseOpcode::Psraw, bigger_shift_by, upper_lanes, )); // Merge the upper and lower shifted lanes into `dst`. ctx.emit(Inst::xmm_rm_r( SseOpcode::Packsswb, RegMem::from(upper_lanes), dst, )); } else if dst_ty == types::I64X2 && op == Opcode::Sshr { // The `sshr.i8x16` CLIF instruction has no single x86 instruction in the older feature sets; newer ones // like AVX512VL and AVX512F include VPSRAQ, a 128-bit instruction that would fit here, but this backend // does not currently have support for EVEX encodings (TODO when EVEX support is available, add an // alternate lowering here). To remedy this, we extract each 64-bit lane to a GPR, shift each using a // scalar instruction, and insert the shifted values back in the `dst` XMM register. let src = put_input_in_reg(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); ctx.emit(Inst::gen_move(dst, src, dst_ty)); // Extract the upper and lower lanes into temporary GPRs. let lower_lane = ctx.alloc_tmp(types::I64).only_reg().unwrap(); emit_extract_lane(ctx, src, lower_lane, 0, types::I64); let upper_lane = ctx.alloc_tmp(types::I64).only_reg().unwrap(); emit_extract_lane(ctx, src, upper_lane, 1, types::I64); // Shift each value. let mut shift = |reg: Writable<Reg>| { let kind = ShiftKind::ShiftRightArithmetic; if let Some(shift_by) = ctx.get_input_as_source_or_const(insn, 1).constant { // Mask the shift amount according to Cranelift's semantics. let shift_by = (shift_by as u8) & (types::I64.bits() as u8 - 1); ctx.emit(Inst::shift_r( OperandSize::Size64, kind, Some(shift_by), reg, )); } else { let dynamic_shift_by = put_input_in_reg(ctx, inputs[1]); let w_rcx = Writable::from_reg(regs::rcx()); ctx.emit(Inst::mov_r_r(OperandSize::Size64, dynamic_shift_by, w_rcx)); ctx.emit(Inst::shift_r(OperandSize::Size64, kind, None, reg)); }; }; shift(lower_lane); shift(upper_lane); // Insert the scalar values back into the `dst` vector. emit_insert_lane(ctx, RegMem::from(lower_lane), dst, 0, types::I64); emit_insert_lane(ctx, RegMem::from(upper_lane), dst, 1, types::I64); } else { // For the remaining packed shifts not covered above, x86 has implementations that can either: // - shift using an immediate // - shift using a dynamic value given in the lower bits of another XMM register. let src = put_input_in_reg(ctx, inputs[0]); let shift_by = input_to_reg_mem_imm(ctx, inputs[1]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let sse_op = match dst_ty { types::I16X8 => match op { Opcode::Ishl => SseOpcode::Psllw, Opcode::Ushr => SseOpcode::Psrlw, Opcode::Sshr => SseOpcode::Psraw, _ => unimplemented!("{} is not implemented for type {}", op, dst_ty), }, types::I32X4 => match op { Opcode::Ishl => SseOpcode::Pslld, Opcode::Ushr => SseOpcode::Psrld, Opcode::Sshr => SseOpcode::Psrad, _ => unimplemented!("{} is not implemented for type {}", op, dst_ty), }, types::I64X2 => match op { Opcode::Ishl => SseOpcode::Psllq, Opcode::Ushr => SseOpcode::Psrlq, _ => unimplemented!("{} is not implemented for type {}", op, dst_ty), }, _ => unreachable!(), }; // If necessary, move the shift index into the lowest bits of a vector register. let shift_by = match shift_by { RegMemImm::Imm { .. } => shift_by, RegMemImm::Reg { reg } => { let tmp_shift_by = ctx.alloc_tmp(dst_ty).only_reg().unwrap(); ctx.emit(Inst::gpr_to_xmm( SseOpcode::Movd, RegMem::reg(reg), OperandSize::Size32, tmp_shift_by, )); RegMemImm::reg(tmp_shift_by.to_reg()) } RegMemImm::Mem { .. } => unimplemented!("load shift amount to XMM register"), }; // Move the `src` to the same register as `dst`. ctx.emit(Inst::gen_move(dst, src, dst_ty)); ctx.emit(Inst::xmm_rmi_reg(sse_op, shift_by, dst)); } } Opcode::Ineg => { let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let ty = ty.unwrap(); if ty.is_vector() { // Zero's out a register and then does a packed subtraction // of the input from the register. let src = input_to_reg_mem(ctx, inputs[0]); let tmp = ctx.alloc_tmp(types::I32X4).only_reg().unwrap(); let subtract_opcode = match ty { types::I8X16 => SseOpcode::Psubb, types::I16X8 => SseOpcode::Psubw, types::I32X4 => SseOpcode::Psubd, types::I64X2 => SseOpcode::Psubq, _ => panic!("Unsupported type for Ineg instruction, found {}", ty), }; // Note we must zero out a tmp instead of using the destination register since // the desitnation could be an alias for the source input register ctx.emit(Inst::xmm_rm_r( SseOpcode::Pxor, RegMem::reg(tmp.to_reg()), tmp, )); ctx.emit(Inst::xmm_rm_r(subtract_opcode, src, tmp)); ctx.emit(Inst::xmm_unary_rm_r( SseOpcode::Movapd, RegMem::reg(tmp.to_reg()), dst, )); } else { let src = put_input_in_reg(ctx, inputs[0]); ctx.emit(Inst::gen_move(dst, src, ty)); ctx.emit(Inst::neg(OperandSize::from_ty(ty), dst)); } } Opcode::Clz => { let orig_ty = ty.unwrap(); if isa_flags.use_lzcnt() && (orig_ty == types::I32 || orig_ty == types::I64) { // We can use a plain lzcnt instruction here. Note no special handling is required // for zero inputs, because the machine instruction does what the CLIF expects for // zero, i.e. it returns zero. let src = input_to_reg_mem(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); ctx.emit(Inst::unary_rm_r( OperandSize::from_ty(orig_ty), UnaryRmROpcode::Lzcnt, src, dst, )); return Ok(()); } // General formula using bit-scan reverse (BSR): // mov -1, %dst // bsr %src, %tmp // cmovz %dst, %tmp // mov $(size_bits - 1), %dst // sub %tmp, %dst if orig_ty == types::I128 { // clz upper, tmp1 // clz lower, dst // add dst, 64 // cmp tmp1, 64 // cmovnz tmp1, dst let dsts = get_output_reg(ctx, outputs[0]); let dst = dsts.regs()[0]; let tmp1 = ctx.alloc_tmp(types::I64).only_reg().unwrap(); let srcs = put_input_in_regs(ctx, inputs[0]); let src_lo = srcs.regs()[0]; let src_hi = srcs.regs()[1]; emit_clz(ctx, types::I64, types::I64, src_hi, tmp1); emit_clz(ctx, types::I64, types::I64, src_lo, dst); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Add, RegMemImm::imm(64), dst, )); ctx.emit(Inst::cmp_rmi_r( OperandSize::Size64, RegMemImm::imm(64), tmp1.to_reg(), )); ctx.emit(Inst::cmove( OperandSize::Size64, CC::NZ, RegMem::reg(tmp1.to_reg()), dst, )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Xor, RegMemImm::reg(dsts.regs()[1].to_reg()), dsts.regs()[1], )); } else { let (ext_spec, ty) = match orig_ty { types::I8 | types::I16 => (Some(ExtSpec::ZeroExtendTo32), types::I32), a if a == types::I32 || a == types::I64 => (None, a), _ => unreachable!(), }; let src = if let Some(ext_spec) = ext_spec { extend_input_to_reg(ctx, inputs[0], ext_spec) } else { put_input_in_reg(ctx, inputs[0]) }; let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); emit_clz(ctx, orig_ty, ty, src, dst); } } Opcode::Ctz => { let orig_ty = ctx.input_ty(insn, 0); if isa_flags.use_bmi1() && (orig_ty == types::I32 || orig_ty == types::I64) { // We can use a plain tzcnt instruction here. Note no special handling is required // for zero inputs, because the machine instruction does what the CLIF expects for // zero, i.e. it returns zero. let src = input_to_reg_mem(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); ctx.emit(Inst::unary_rm_r( OperandSize::from_ty(orig_ty), UnaryRmROpcode::Tzcnt, src, dst, )); return Ok(()); } // General formula using bit-scan forward (BSF): // bsf %src, %dst // mov $(size_bits), %tmp // cmovz %tmp, %dst if orig_ty == types::I128 { // ctz src_lo, dst // ctz src_hi, tmp1 // add tmp1, 64 // cmp dst, 64 // cmovz tmp1, dst let dsts = get_output_reg(ctx, outputs[0]); let dst = dsts.regs()[0]; let tmp1 = ctx.alloc_tmp(types::I64).only_reg().unwrap(); let srcs = put_input_in_regs(ctx, inputs[0]); let src_lo = srcs.regs()[0]; let src_hi = srcs.regs()[1]; emit_ctz(ctx, types::I64, types::I64, src_lo, dst); emit_ctz(ctx, types::I64, types::I64, src_hi, tmp1); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Add, RegMemImm::imm(64), tmp1, )); ctx.emit(Inst::cmp_rmi_r( OperandSize::Size64, RegMemImm::imm(64), dst.to_reg(), )); ctx.emit(Inst::cmove( OperandSize::Size64, CC::Z, RegMem::reg(tmp1.to_reg()), dst, )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Xor, RegMemImm::reg(dsts.regs()[1].to_reg()), dsts.regs()[1], )); } else { let ty = if orig_ty.bits() < 32 { types::I32 } else { orig_ty }; debug_assert!(ty == types::I32 || ty == types::I64); let src = put_input_in_reg(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); emit_ctz(ctx, orig_ty, ty, src, dst); } } Opcode::Popcnt => { let ty_tmp = ty.unwrap(); if !ty_tmp.is_vector() { let (ext_spec, ty) = match ctx.input_ty(insn, 0) { types::I8 | types::I16 => (Some(ExtSpec::ZeroExtendTo32), types::I32), a if a == types::I32 || a == types::I64 || a == types::I128 => (None, a), _ => unreachable!(), }; if isa_flags.use_popcnt() { match ty { types::I32 | types::I64 => { let src = input_to_reg_mem(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); ctx.emit(Inst::unary_rm_r( OperandSize::from_ty(ty), UnaryRmROpcode::Popcnt, src, dst, )); return Ok(()); } types::I128 => { // The number of ones in a 128-bits value is the plain sum of the number of // ones in its low and high parts. No risk of overflow here. let dsts = get_output_reg(ctx, outputs[0]); let dst = dsts.regs()[0]; let tmp = ctx.alloc_tmp(types::I64).only_reg().unwrap(); let srcs = put_input_in_regs(ctx, inputs[0]); let src_lo = srcs.regs()[0]; let src_hi = srcs.regs()[1]; ctx.emit(Inst::unary_rm_r( OperandSize::Size64, UnaryRmROpcode::Popcnt, RegMem::reg(src_lo), dst, )); ctx.emit(Inst::unary_rm_r( OperandSize::Size64, UnaryRmROpcode::Popcnt, RegMem::reg(src_hi), tmp, )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Add, RegMemImm::reg(tmp.to_reg()), dst, )); // Zero the result's high component. ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Xor, RegMemImm::reg(dsts.regs()[1].to_reg()), dsts.regs()[1], )); return Ok(()); } _ => {} } } let (srcs, ty): (SmallVec<[RegMem; 2]>, Type) = if let Some(ext_spec) = ext_spec { ( smallvec![RegMem::reg(extend_input_to_reg(ctx, inputs[0], ext_spec))], ty, ) } else if ty == types::I128 { let regs = put_input_in_regs(ctx, inputs[0]); ( smallvec![RegMem::reg(regs.regs()[0]), RegMem::reg(regs.regs()[1])], types::I64, ) } else { // N.B.: explicitly put input in a reg here because the width of the instruction // into which this RM op goes may not match the width of the input type (in fact, // it won't for i32.popcnt), and we don't want a larger than necessary load. (smallvec![RegMem::reg(put_input_in_reg(ctx, inputs[0]))], ty) }; let mut dsts: SmallVec<[Reg; 2]> = smallvec![]; for src in srcs { let dst = ctx.alloc_tmp(types::I64).only_reg().unwrap(); dsts.push(dst.to_reg()); if ty == types::I64 { let tmp1 = ctx.alloc_tmp(types::I64).only_reg().unwrap(); let tmp2 = ctx.alloc_tmp(types::I64).only_reg().unwrap(); let cst = ctx.alloc_tmp(types::I64).only_reg().unwrap(); // mov src, tmp1 ctx.emit(Inst::mov64_rm_r(src.clone(), tmp1)); // shr $1, tmp1 ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftRightLogical, Some(1), tmp1, )); // mov 0x7777_7777_7777_7777, cst ctx.emit(Inst::imm(OperandSize::Size64, 0x7777777777777777, cst)); // andq cst, tmp1 ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::And, RegMemImm::reg(cst.to_reg()), tmp1, )); // mov src, tmp2 ctx.emit(Inst::mov64_rm_r(src, tmp2)); // sub tmp1, tmp2 ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Sub, RegMemImm::reg(tmp1.to_reg()), tmp2, )); // shr $1, tmp1 ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftRightLogical, Some(1), tmp1, )); // and cst, tmp1 ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::And, RegMemImm::reg(cst.to_reg()), tmp1, )); // sub tmp1, tmp2 ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Sub, RegMemImm::reg(tmp1.to_reg()), tmp2, )); // shr $1, tmp1 ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftRightLogical, Some(1), tmp1, )); // and cst, tmp1 ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::And, RegMemImm::reg(cst.to_reg()), tmp1, )); // sub tmp1, tmp2 ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Sub, RegMemImm::reg(tmp1.to_reg()), tmp2, )); // mov tmp2, dst ctx.emit(Inst::mov64_rm_r(RegMem::reg(tmp2.to_reg()), dst)); // shr $4, dst ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftRightLogical, Some(4), dst, )); // add tmp2, dst ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Add, RegMemImm::reg(tmp2.to_reg()), dst, )); // mov $0x0F0F_0F0F_0F0F_0F0F, cst ctx.emit(Inst::imm(OperandSize::Size64, 0x0F0F0F0F0F0F0F0F, cst)); // and cst, dst ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::And, RegMemImm::reg(cst.to_reg()), dst, )); // mov $0x0101_0101_0101_0101, cst ctx.emit(Inst::imm(OperandSize::Size64, 0x0101010101010101, cst)); // mul cst, dst ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Mul, RegMemImm::reg(cst.to_reg()), dst, )); // shr $56, dst ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftRightLogical, Some(56), dst, )); } else { assert_eq!(ty, types::I32); let tmp1 = ctx.alloc_tmp(types::I64).only_reg().unwrap(); let tmp2 = ctx.alloc_tmp(types::I64).only_reg().unwrap(); // mov src, tmp1 ctx.emit(Inst::mov64_rm_r(src.clone(), tmp1)); // shr $1, tmp1 ctx.emit(Inst::shift_r( OperandSize::Size32, ShiftKind::ShiftRightLogical, Some(1), tmp1, )); // andq $0x7777_7777, tmp1 ctx.emit(Inst::alu_rmi_r( OperandSize::Size32, AluRmiROpcode::And, RegMemImm::imm(0x77777777), tmp1, )); // mov src, tmp2 ctx.emit(Inst::mov64_rm_r(src, tmp2)); // sub tmp1, tmp2 ctx.emit(Inst::alu_rmi_r( OperandSize::Size32, AluRmiROpcode::Sub, RegMemImm::reg(tmp1.to_reg()), tmp2, )); // shr $1, tmp1 ctx.emit(Inst::shift_r( OperandSize::Size32, ShiftKind::ShiftRightLogical, Some(1), tmp1, )); // and 0x7777_7777, tmp1 ctx.emit(Inst::alu_rmi_r( OperandSize::Size32, AluRmiROpcode::And, RegMemImm::imm(0x77777777), tmp1, )); // sub tmp1, tmp2 ctx.emit(Inst::alu_rmi_r( OperandSize::Size32, AluRmiROpcode::Sub, RegMemImm::reg(tmp1.to_reg()), tmp2, )); // shr $1, tmp1 ctx.emit(Inst::shift_r( OperandSize::Size32, ShiftKind::ShiftRightLogical, Some(1), tmp1, )); // and $0x7777_7777, tmp1 ctx.emit(Inst::alu_rmi_r( OperandSize::Size32, AluRmiROpcode::And, RegMemImm::imm(0x77777777), tmp1, )); // sub tmp1, tmp2 ctx.emit(Inst::alu_rmi_r( OperandSize::Size32, AluRmiROpcode::Sub, RegMemImm::reg(tmp1.to_reg()), tmp2, )); // mov tmp2, dst ctx.emit(Inst::mov64_rm_r(RegMem::reg(tmp2.to_reg()), dst)); // shr $4, dst ctx.emit(Inst::shift_r( OperandSize::Size32, ShiftKind::ShiftRightLogical, Some(4), dst, )); // add tmp2, dst ctx.emit(Inst::alu_rmi_r( OperandSize::Size32, AluRmiROpcode::Add, RegMemImm::reg(tmp2.to_reg()), dst, )); // and $0x0F0F_0F0F, dst ctx.emit(Inst::alu_rmi_r( OperandSize::Size32, AluRmiROpcode::And, RegMemImm::imm(0x0F0F0F0F), dst, )); // mul $0x0101_0101, dst ctx.emit(Inst::alu_rmi_r( OperandSize::Size32, AluRmiROpcode::Mul, RegMemImm::imm(0x01010101), dst, )); // shr $24, dst ctx.emit(Inst::shift_r( OperandSize::Size32, ShiftKind::ShiftRightLogical, Some(24), dst, )); } } if dsts.len() == 1 { let final_dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); ctx.emit(Inst::gen_move(final_dst, dsts[0], types::I64)); } else { assert!(dsts.len() == 2); let final_dst = get_output_reg(ctx, outputs[0]); ctx.emit(Inst::gen_move(final_dst.regs()[0], dsts[0], types::I64)); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Add, RegMemImm::reg(dsts[1]), final_dst.regs()[0], )); ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Xor, RegMemImm::reg(final_dst.regs()[1].to_reg()), final_dst.regs()[1], )); } } else { // For SIMD 4.4 we use Mula's algroithm (https://arxiv.org/pdf/1611.07612.pdf) // //__m128i count_bytes ( __m128i v) { // __m128i lookup = _mm_setr_epi8(0 ,1 ,1 ,2 ,1 ,2 ,2 ,3 ,1 ,2 ,2 ,3 ,2 ,3 ,3 ,4) ; // __m128i low_mask = _mm_set1_epi8 (0 x0f ) ; // __m128i lo = _mm_and_si128 (v, low_mask ) ; // __m128i hi = _mm_and_si128 (_mm_srli_epi16 (v, 4) , low_mask ) ; // __m128i cnt1 = _mm_shuffle_epi8 (lookup , lo) ; // __m128i cnt2 = _mm_shuffle_epi8 (lookup , hi) ; // return _mm_add_epi8 (cnt1 , cnt2 ) ; //} // // Details of the above algorithm can be found in the reference noted above, but the basics // are to create a lookup table that pre populates the popcnt values for each number [0,15]. // The algorithm uses shifts to isolate 4 bit sections of the vector, pshufb as part of the // lookup process, and adds together the results. // Get input vector and destination let ty = ty.unwrap(); let lhs = put_input_in_reg(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); // __m128i lookup = _mm_setr_epi8(0 ,1 ,1 ,2 ,1 ,2 ,2 ,3 ,1 ,2 ,2 ,3 ,2 ,3 ,3 ,4); static POPCOUNT_4BIT: [u8; 16] = [ 0x00, 0x01, 0x01, 0x02, 0x01, 0x02, 0x02, 0x03, 0x01, 0x02, 0x02, 0x03, 0x02, 0x03, 0x03, 0x04, ]; let lookup = ctx.use_constant(VCodeConstantData::WellKnown(&POPCOUNT_4BIT)); // Create a mask for lower 4bits of each subword. static LOW_MASK: [u8; 16] = [0x0F; 16]; let low_mask_const = ctx.use_constant(VCodeConstantData::WellKnown(&LOW_MASK)); let low_mask = ctx.alloc_tmp(types::I8X16).only_reg().unwrap(); ctx.emit(Inst::xmm_load_const(low_mask_const, low_mask, ty)); // __m128i lo = _mm_and_si128 (v, low_mask ); let lo = ctx.alloc_tmp(types::I8X16).only_reg().unwrap(); ctx.emit(Inst::gen_move(lo, low_mask.to_reg(), types::I8X16)); ctx.emit(Inst::xmm_rm_r(SseOpcode::Pand, RegMem::reg(lhs), lo)); // __m128i hi = _mm_and_si128 (_mm_srli_epi16 (v, 4) , low_mask ) ; ctx.emit(Inst::gen_move(dst, lhs, ty)); ctx.emit(Inst::xmm_rmi_reg(SseOpcode::Psrlw, RegMemImm::imm(4), dst)); let tmp = ctx.alloc_tmp(types::I8X16).only_reg().unwrap(); ctx.emit(Inst::gen_move(tmp, low_mask.to_reg(), types::I8X16)); ctx.emit(Inst::xmm_rm_r( SseOpcode::Pand, RegMem::reg(dst.to_reg()), tmp, )); // __m128i cnt1 = _mm_shuffle_epi8 (lookup , lo) ; let tmp2 = ctx.alloc_tmp(types::I8X16).only_reg().unwrap(); ctx.emit(Inst::xmm_load_const(lookup, tmp2, ty)); ctx.emit(Inst::gen_move(dst, tmp2.to_reg(), types::I8X16)); ctx.emit(Inst::xmm_rm_r( SseOpcode::Pshufb, RegMem::reg(lo.to_reg()), dst, )); // __m128i cnt2 = _mm_shuffle_epi8 (lookup , hi) ; ctx.emit(Inst::xmm_rm_r( SseOpcode::Pshufb, RegMem::reg(tmp.to_reg()), tmp2, )); // return _mm_add_epi8 (cnt1 , cnt2 ) ; ctx.emit(Inst::xmm_rm_r( SseOpcode::Paddb, RegMem::reg(tmp2.to_reg()), dst, )); } } Opcode::Bitrev => { let ty = ctx.input_ty(insn, 0); assert!( ty == types::I8 || ty == types::I16 || ty == types::I32 || ty == types::I64 || ty == types::I128 ); if ty == types::I128 { let src = put_input_in_regs(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]); emit_bitrev(ctx, src.regs()[0], dst.regs()[1], types::I64); emit_bitrev(ctx, src.regs()[1], dst.regs()[0], types::I64); } else { let src = put_input_in_reg(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); emit_bitrev(ctx, src, dst, ty); } } Opcode::IsNull | Opcode::IsInvalid => { // Null references are represented by the constant value 0; invalid references are // represented by the constant value -1. See `define_reftypes()` in // `meta/src/isa/x86/encodings.rs` to confirm. let src = put_input_in_reg(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let ty = ctx.input_ty(insn, 0); let imm = match op { Opcode::IsNull => { // TODO could use tst src, src for IsNull 0 } Opcode::IsInvalid => { // We can do a 32-bit comparison even in 64-bits mode, as the constant is then // sign-extended. 0xffffffff } _ => unreachable!(), }; ctx.emit(Inst::cmp_rmi_r( OperandSize::from_ty(ty), RegMemImm::imm(imm), src, )); ctx.emit(Inst::setcc(CC::Z, dst)); } Opcode::Uextend | Opcode::Sextend | Opcode::Bint | Opcode::Breduce | Opcode::Bextend | Opcode::Ireduce => { let src_ty = ctx.input_ty(insn, 0); let dst_ty = ctx.output_ty(insn, 0); if src_ty == types::I128 { assert!(dst_ty.bits() <= 64); assert!(op == Opcode::Ireduce); let src = put_input_in_regs(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); ctx.emit(Inst::gen_move(dst, src.regs()[0], types::I64)); } else if dst_ty == types::I128 { assert!(src_ty.bits() <= 64); let src = put_input_in_reg(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]); assert!(op == Opcode::Uextend || op == Opcode::Sextend || op == Opcode::Bint); // Extend to 64 bits first. let ext_mode = ExtMode::new(src_ty.bits(), /* dst bits = */ 64); if let Some(ext_mode) = ext_mode { if op == Opcode::Sextend { ctx.emit(Inst::movsx_rm_r(ext_mode, RegMem::reg(src), dst.regs()[0])); } else { ctx.emit(Inst::movzx_rm_r(ext_mode, RegMem::reg(src), dst.regs()[0])); } } else { ctx.emit(Inst::mov64_rm_r(RegMem::reg(src), dst.regs()[0])); } // Now generate the top 64 bits. if op == Opcode::Sextend { // Sign-extend: move dst[0] into dst[1] and arithmetic-shift right by 63 bits // to spread the sign bit across all bits. ctx.emit(Inst::gen_move( dst.regs()[1], dst.regs()[0].to_reg(), types::I64, )); ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftRightArithmetic, Some(63), dst.regs()[1], )); } else { // Zero-extend: just zero the top word. ctx.emit(Inst::alu_rmi_r( OperandSize::Size64, AluRmiROpcode::Xor, RegMemImm::reg(dst.regs()[1].to_reg()), dst.regs()[1], )); } } else { // Sextend requires a sign-extended move, but all the other opcodes are simply a move // from a zero-extended source. Here is why this works, in each case: // // - Bint: Bool-to-int. We always represent a bool as a 0 or 1, so we merely need to // zero-extend here. // // - Breduce, Bextend: changing width of a boolean. We represent a bool as a 0 or 1, so // again, this is a zero-extend / no-op. // // - Ireduce: changing width of an integer. Smaller ints are stored with undefined // high-order bits, so we can simply do a copy. if src_ty == types::I32 && dst_ty == types::I64 && op != Opcode::Sextend { // As a particular x64 extra-pattern matching opportunity, all the ALU opcodes on // 32-bits will zero-extend the upper 32-bits, so we can even not generate a // zero-extended move in this case. // TODO add loads and shifts here. if let Some(_) = matches_input_any( ctx, inputs[0], &[ Opcode::Iadd, Opcode::IaddIfcout, Opcode::Isub, Opcode::Imul, Opcode::Band, Opcode::Bor, Opcode::Bxor, ], ) { let src = put_input_in_reg(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); ctx.emit(Inst::gen_move(dst, src, types::I64)); return Ok(()); } } let src = input_to_reg_mem(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let ext_mode = ExtMode::new(src_ty.bits(), dst_ty.bits()); assert_eq!( src_ty.bits() < dst_ty.bits(), ext_mode.is_some(), "unexpected extension: {} -> {}", src_ty, dst_ty ); if let Some(ext_mode) = ext_mode { if op == Opcode::Sextend { ctx.emit(Inst::movsx_rm_r(ext_mode, src, dst)); } else { ctx.emit(Inst::movzx_rm_r(ext_mode, src, dst)); } } else { ctx.emit(Inst::mov64_rm_r(src, dst)); } } } Opcode::Icmp => { let condcode = ctx.data(insn).cond_code().unwrap(); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let ty = ctx.input_ty(insn, 0); if !ty.is_vector() { let condcode = emit_cmp(ctx, insn, condcode); let cc = CC::from_intcc(condcode); ctx.emit(Inst::setcc(cc, dst)); } else { assert_eq!(ty.bits(), 128); let eq = |ty| match ty { types::I8X16 => SseOpcode::Pcmpeqb, types::I16X8 => SseOpcode::Pcmpeqw, types::I32X4 => SseOpcode::Pcmpeqd, types::I64X2 => SseOpcode::Pcmpeqq, _ => panic!( "Unable to find an instruction for {} for type: {}", condcode, ty ), }; let gt = |ty| match ty { types::I8X16 => SseOpcode::Pcmpgtb, types::I16X8 => SseOpcode::Pcmpgtw, types::I32X4 => SseOpcode::Pcmpgtd, types::I64X2 => SseOpcode::Pcmpgtq, _ => panic!( "Unable to find an instruction for {} for type: {}", condcode, ty ), }; let maxu = |ty| match ty { types::I8X16 => SseOpcode::Pmaxub, types::I16X8 => SseOpcode::Pmaxuw, types::I32X4 => SseOpcode::Pmaxud, _ => panic!( "Unable to find an instruction for {} for type: {}", condcode, ty ), }; let mins = |ty| match ty { types::I8X16 => SseOpcode::Pminsb, types::I16X8 => SseOpcode::Pminsw, types::I32X4 => SseOpcode::Pminsd, _ => panic!( "Unable to find an instruction for {} for type: {}", condcode, ty ), }; let minu = |ty| match ty { types::I8X16 => SseOpcode::Pminub, types::I16X8 => SseOpcode::Pminuw, types::I32X4 => SseOpcode::Pminud, _ => panic!( "Unable to find an instruction for {} for type: {}", condcode, ty ), }; // Here we decide which operand to use as the read/write `dst` (ModRM reg field) and // which to use as the read `input` (ModRM r/m field). In the normal case we use // Cranelift's first operand, the `lhs`, as `dst` but we flip the operands for the // less-than cases so that we can reuse the greater-than implementation. // // In a surprising twist, the operands for i64x2 `gte`/`sle` must also be flipped // from the normal order because of the special-case lowering for these instructions // (i.e. we use PCMPGTQ with flipped operands and negate the result). let input = match condcode { IntCC::SignedLessThanOrEqual if ty == types::I64X2 => { let lhs = put_input_in_reg(ctx, inputs[0]); let rhs = input_to_reg_mem(ctx, inputs[1]); ctx.emit(Inst::gen_move(dst, lhs, ty)); rhs } IntCC::SignedGreaterThanOrEqual if ty == types::I64X2 => { let lhs = input_to_reg_mem(ctx, inputs[0]); let rhs = put_input_in_reg(ctx, inputs[1]); ctx.emit(Inst::gen_move(dst, rhs, ty)); lhs } IntCC::SignedLessThan | IntCC::SignedLessThanOrEqual | IntCC::UnsignedLessThan | IntCC::UnsignedLessThanOrEqual => { let lhs = input_to_reg_mem(ctx, inputs[0]); let rhs = put_input_in_reg(ctx, inputs[1]); ctx.emit(Inst::gen_move(dst, rhs, ty)); lhs } _ => { let lhs = put_input_in_reg(ctx, inputs[0]); let rhs = input_to_reg_mem(ctx, inputs[1]); ctx.emit(Inst::gen_move(dst, lhs, ty)); rhs } }; match condcode { IntCC::Equal => ctx.emit(Inst::xmm_rm_r(eq(ty), input, dst)), IntCC::NotEqual => { ctx.emit(Inst::xmm_rm_r(eq(ty), input, dst)); // Emit all 1s into the `tmp` register. let tmp = ctx.alloc_tmp(ty).only_reg().unwrap(); ctx.emit(Inst::xmm_rm_r(eq(ty), RegMem::from(tmp), tmp)); // Invert the result of the `PCMPEQ*`. ctx.emit(Inst::xmm_rm_r(SseOpcode::Pxor, RegMem::from(tmp), dst)); } IntCC::SignedGreaterThan | IntCC::SignedLessThan => { ctx.emit(Inst::xmm_rm_r(gt(ty), input, dst)) } IntCC::SignedGreaterThanOrEqual | IntCC::SignedLessThanOrEqual if ty != types::I64X2 => { ctx.emit(Inst::xmm_rm_r(mins(ty), input.clone(), dst)); ctx.emit(Inst::xmm_rm_r(eq(ty), input, dst)) } IntCC::SignedGreaterThanOrEqual | IntCC::SignedLessThanOrEqual if ty == types::I64X2 => { // The PMINS* instruction is only available in AVX512VL/F so we must instead // compare with flipped operands and negate the result (emitting one more // instruction). ctx.emit(Inst::xmm_rm_r(gt(ty), input, dst)); // Emit all 1s into the `tmp` register. let tmp = ctx.alloc_tmp(ty).only_reg().unwrap(); ctx.emit(Inst::xmm_rm_r(eq(ty), RegMem::from(tmp), tmp)); // Invert the result of the `PCMPGT*`. ctx.emit(Inst::xmm_rm_r(SseOpcode::Pxor, RegMem::from(tmp), dst)); } IntCC::UnsignedGreaterThan | IntCC::UnsignedLessThan => { ctx.emit(Inst::xmm_rm_r(maxu(ty), input.clone(), dst)); ctx.emit(Inst::xmm_rm_r(eq(ty), input, dst)); // Emit all 1s into the `tmp` register. let tmp = ctx.alloc_tmp(ty).only_reg().unwrap(); ctx.emit(Inst::xmm_rm_r(eq(ty), RegMem::from(tmp), tmp)); // Invert the result of the `PCMPEQ*`. ctx.emit(Inst::xmm_rm_r(SseOpcode::Pxor, RegMem::from(tmp), dst)); } IntCC::UnsignedGreaterThanOrEqual | IntCC::UnsignedLessThanOrEqual => { ctx.emit(Inst::xmm_rm_r(minu(ty), input.clone(), dst)); ctx.emit(Inst::xmm_rm_r(eq(ty), input, dst)) } _ => unimplemented!("Unimplemented comparison code for icmp: {}", condcode), } } } Opcode::Fcmp => { let cond_code = ctx.data(insn).fp_cond_code().unwrap(); let input_ty = ctx.input_ty(insn, 0); if !input_ty.is_vector() { // Unordered is returned by setting ZF, PF, CF <- 111 // Greater than by ZF, PF, CF <- 000 // Less than by ZF, PF, CF <- 001 // Equal by ZF, PF, CF <- 100 // // Checking the result of comiss is somewhat annoying because you don't have setcc // instructions that explicitly check simultaneously for the condition (i.e. eq, le, // gt, etc) *and* orderedness. // // So that might mean we need more than one setcc check and then a logical "and" or // "or" to determine both, in some cases. However knowing that if the parity bit is // set, then the result was considered unordered and knowing that if the parity bit is // set, then both the ZF and CF flag bits must also be set we can get away with using // one setcc for most condition codes. let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); match emit_fcmp(ctx, insn, cond_code, FcmpSpec::Normal) { FcmpCondResult::Condition(cc) => { ctx.emit(Inst::setcc(cc, dst)); } FcmpCondResult::AndConditions(cc1, cc2) => { let tmp = ctx.alloc_tmp(types::I32).only_reg().unwrap(); ctx.emit(Inst::setcc(cc1, tmp)); ctx.emit(Inst::setcc(cc2, dst)); ctx.emit(Inst::alu_rmi_r( OperandSize::Size32, AluRmiROpcode::And, RegMemImm::reg(tmp.to_reg()), dst, )); } FcmpCondResult::OrConditions(cc1, cc2) => { let tmp = ctx.alloc_tmp(types::I32).only_reg().unwrap(); ctx.emit(Inst::setcc(cc1, tmp)); ctx.emit(Inst::setcc(cc2, dst)); ctx.emit(Inst::alu_rmi_r( OperandSize::Size32, AluRmiROpcode::Or, RegMemImm::reg(tmp.to_reg()), dst, )); } FcmpCondResult::InvertedEqualOrConditions(_, _) => unreachable!(), } } else { let op = match input_ty { types::F32X4 => SseOpcode::Cmpps, types::F64X2 => SseOpcode::Cmppd, _ => panic!("Bad input type to fcmp: {}", input_ty), }; // Since some packed comparisons are not available, some of the condition codes // must be inverted, with a corresponding `flip` of the operands. let (imm, flip) = match cond_code { FloatCC::GreaterThan => (FcmpImm::LessThan, true), FloatCC::GreaterThanOrEqual => (FcmpImm::LessThanOrEqual, true), FloatCC::UnorderedOrLessThan => (FcmpImm::UnorderedOrGreaterThan, true), FloatCC::UnorderedOrLessThanOrEqual => { (FcmpImm::UnorderedOrGreaterThanOrEqual, true) } FloatCC::OrderedNotEqual | FloatCC::UnorderedOrEqual => { panic!("unsupported float condition code: {}", cond_code) } _ => (FcmpImm::from(cond_code), false), }; // Determine the operands of the comparison, possibly by flipping them. let (lhs, rhs) = if flip { ( put_input_in_reg(ctx, inputs[1]), input_to_reg_mem(ctx, inputs[0]), ) } else { ( put_input_in_reg(ctx, inputs[0]), input_to_reg_mem(ctx, inputs[1]), ) }; // Move the `lhs` to the same register as `dst`; this may not emit an actual move // but ensures that the registers are the same to match x86's read-write operand // encoding. let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); ctx.emit(Inst::gen_move(dst, lhs, input_ty)); // Emit the comparison. ctx.emit(Inst::xmm_rm_r_imm( op, rhs, dst, imm.encode(), OperandSize::Size32, )); } } Opcode::FallthroughReturn | Opcode::Return => { for i in 0..ctx.num_inputs(insn) { let src_reg = put_input_in_regs(ctx, inputs[i]); let retval_reg = ctx.retval(i); let ty = ctx.input_ty(insn, i); assert!(src_reg.len() == retval_reg.len()); let (_, tys) = Inst::rc_for_type(ty)?; for ((&src, &dst), &ty) in src_reg .regs() .iter() .zip(retval_reg.regs().iter()) .zip(tys.iter()) { ctx.emit(Inst::gen_move(dst, src, ty)); } } // N.B.: the Ret itself is generated by the ABI. } Opcode::Call | Opcode::CallIndirect => { let caller_conv = ctx.abi().call_conv(); let (mut abi, inputs) = match op { Opcode::Call => { let (extname, dist) = ctx.call_target(insn).unwrap(); let sig = ctx.call_sig(insn).unwrap(); assert_eq!(inputs.len(), sig.params.len()); assert_eq!(outputs.len(), sig.returns.len()); ( X64ABICaller::from_func(sig, &extname, dist, caller_conv, flags)?, &inputs[..], ) } Opcode::CallIndirect => { let ptr = put_input_in_reg(ctx, inputs[0]); let sig = ctx.call_sig(insn).unwrap(); assert_eq!(inputs.len() - 1, sig.params.len()); assert_eq!(outputs.len(), sig.returns.len()); ( X64ABICaller::from_ptr(sig, ptr, op, caller_conv, flags)?, &inputs[1..], ) } _ => unreachable!(), }; abi.emit_stack_pre_adjust(ctx); assert_eq!(inputs.len(), abi.num_args()); for i in abi.get_copy_to_arg_order() { let input = inputs[i]; let arg_regs = put_input_in_regs(ctx, input); abi.emit_copy_regs_to_arg(ctx, i, arg_regs); } abi.emit_call(ctx); for (i, output) in outputs.iter().enumerate() { let retval_regs = get_output_reg(ctx, *output); abi.emit_copy_retval_to_regs(ctx, i, retval_regs); } abi.emit_stack_post_adjust(ctx); } Opcode::Debugtrap => { ctx.emit(Inst::Hlt); } Opcode::Trap | Opcode::ResumableTrap => { let trap_code = ctx.data(insn).trap_code().unwrap(); ctx.emit_safepoint(Inst::Ud2 { trap_code }); } Opcode::Trapif | Opcode::Trapff => { let trap_code = ctx.data(insn).trap_code().unwrap(); if matches_input(ctx, inputs[0], Opcode::IaddIfcout).is_some() { let cond_code = ctx.data(insn).cond_code().unwrap(); // The flags must not have been clobbered by any other instruction between the // iadd_ifcout and this instruction, as verified by the CLIF validator; so we can // simply use the flags here. let cc = CC::from_intcc(cond_code); ctx.emit_safepoint(Inst::TrapIf { trap_code, cc }); } else if op == Opcode::Trapif { let cond_code = ctx.data(insn).cond_code().unwrap(); // Verification ensures that the input is always a single-def ifcmp. let ifcmp = matches_input(ctx, inputs[0], Opcode::Ifcmp).unwrap(); let cond_code = emit_cmp(ctx, ifcmp, cond_code); let cc = CC::from_intcc(cond_code); ctx.emit_safepoint(Inst::TrapIf { trap_code, cc }); } else { let cond_code = ctx.data(insn).fp_cond_code().unwrap(); // Verification ensures that the input is always a single-def ffcmp. let ffcmp = matches_input(ctx, inputs[0], Opcode::Ffcmp).unwrap(); match emit_fcmp(ctx, ffcmp, cond_code, FcmpSpec::Normal) { FcmpCondResult::Condition(cc) => { ctx.emit_safepoint(Inst::TrapIf { trap_code, cc }) } FcmpCondResult::AndConditions(cc1, cc2) => { // A bit unfortunate, but materialize the flags in their own register, and // check against this. let tmp = ctx.alloc_tmp(types::I32).only_reg().unwrap(); let tmp2 = ctx.alloc_tmp(types::I32).only_reg().unwrap(); ctx.emit(Inst::setcc(cc1, tmp)); ctx.emit(Inst::setcc(cc2, tmp2)); ctx.emit(Inst::alu_rmi_r( OperandSize::Size32, AluRmiROpcode::And, RegMemImm::reg(tmp.to_reg()), tmp2, )); ctx.emit_safepoint(Inst::TrapIf { trap_code, cc: CC::NZ, }); } FcmpCondResult::OrConditions(cc1, cc2) => { ctx.emit_safepoint(Inst::TrapIf { trap_code, cc: cc1 }); ctx.emit_safepoint(Inst::TrapIf { trap_code, cc: cc2 }); } FcmpCondResult::InvertedEqualOrConditions(_, _) => unreachable!(), }; }; } Opcode::F64const => { // TODO use cmpeqpd for all 1s. let value = ctx.get_constant(insn).unwrap(); let dst = get_output_reg(ctx, outputs[0]); for inst in Inst::gen_constant(dst, value as u128, types::F64, |ty| { ctx.alloc_tmp(ty).only_reg().unwrap() }) { ctx.emit(inst); } } Opcode::F32const => { // TODO use cmpeqps for all 1s. let value = ctx.get_constant(insn).unwrap(); let dst = get_output_reg(ctx, outputs[0]); for inst in Inst::gen_constant(dst, value as u128, types::F32, |ty| { ctx.alloc_tmp(ty).only_reg().unwrap() }) { ctx.emit(inst); } } Opcode::WideningPairwiseDotProductS => { let lhs = put_input_in_reg(ctx, inputs[0]); let rhs = input_to_reg_mem(ctx, inputs[1]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let ty = ty.unwrap(); ctx.emit(Inst::gen_move(dst, lhs, ty)); if ty == types::I32X4 { ctx.emit(Inst::xmm_rm_r(SseOpcode::Pmaddwd, rhs, dst)); } else { panic!( "Opcode::WideningPairwiseDotProductS: unsupported laneage: {:?}", ty ); } } Opcode::Fadd | Opcode::Fsub | Opcode::Fmul | Opcode::Fdiv => { let lhs = put_input_in_reg(ctx, inputs[0]); // We can't guarantee the RHS (if a load) is 128-bit aligned, so we // must avoid merging a load here. let rhs = RegMem::reg(put_input_in_reg(ctx, inputs[1])); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let ty = ty.unwrap(); // Move the `lhs` to the same register as `dst`; this may not emit an actual move // but ensures that the registers are the same to match x86's read-write operand // encoding. ctx.emit(Inst::gen_move(dst, lhs, ty)); // Note: min and max can't be handled here, because of the way Cranelift defines them: // if any operand is a NaN, they must return the NaN operand, while the x86 machine // instruction will return the second operand if either operand is a NaN. let sse_op = match ty { types::F32 => match op { Opcode::Fadd => SseOpcode::Addss, Opcode::Fsub => SseOpcode::Subss, Opcode::Fmul => SseOpcode::Mulss, Opcode::Fdiv => SseOpcode::Divss, _ => unreachable!(), }, types::F64 => match op { Opcode::Fadd => SseOpcode::Addsd, Opcode::Fsub => SseOpcode::Subsd, Opcode::Fmul => SseOpcode::Mulsd, Opcode::Fdiv => SseOpcode::Divsd, _ => unreachable!(), }, types::F32X4 => match op { Opcode::Fadd => SseOpcode::Addps, Opcode::Fsub => SseOpcode::Subps, Opcode::Fmul => SseOpcode::Mulps, Opcode::Fdiv => SseOpcode::Divps, _ => unreachable!(), }, types::F64X2 => match op { Opcode::Fadd => SseOpcode::Addpd, Opcode::Fsub => SseOpcode::Subpd, Opcode::Fmul => SseOpcode::Mulpd, Opcode::Fdiv => SseOpcode::Divpd, _ => unreachable!(), }, _ => panic!( "invalid type: expected one of [F32, F64, F32X4, F64X2], found {}", ty ), }; ctx.emit(Inst::xmm_rm_r(sse_op, rhs, dst)); } Opcode::Fmin | Opcode::Fmax => { let lhs = put_input_in_reg(ctx, inputs[0]); let rhs = put_input_in_reg(ctx, inputs[1]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let is_min = op == Opcode::Fmin; let output_ty = ty.unwrap(); ctx.emit(Inst::gen_move(dst, rhs, output_ty)); if !output_ty.is_vector() { let op_size = match output_ty { types::F32 => OperandSize::Size32, types::F64 => OperandSize::Size64, _ => panic!("unexpected type {:?} for fmin/fmax", output_ty), }; ctx.emit(Inst::xmm_min_max_seq(op_size, is_min, lhs, dst)); } else { // X64's implementation of floating point min and floating point max does not // propagate NaNs and +0's in a way that is friendly to the SIMD spec. For the // scalar approach we use jumps to handle cases where NaN and +0 propagation is // not consistent with what is needed. However for packed floating point min and // floating point max we implement a different approach to avoid the sequence // of jumps that would be required on a per lane basis. Because we do not need to // lower labels and jumps but do need ctx for creating temporaries we implement // the lowering here in lower.rs instead of emit.rs as is done in the case for scalars. // The outline of approach is as follows: // // First we preform the Min/Max in both directions. This is because in the // case of an operand's lane containing a NaN or in the case of the lanes of the // two operands containing 0 but with mismatched signs, x64 will return the second // operand regardless of its contents. So in order to make sure we capture NaNs and // normalize NaNs and 0 values we capture the operation in both directions and merge the // results. Then we normalize the results through operations that create a mask for the // lanes containing NaNs, we use that mask to adjust NaNs to quite NaNs and normalize // 0s. // // The following sequence is generated for min: // // movap{s,d} %lhs, %tmp // minp{s,d} %dst, %tmp // minp,{s,d} %lhs, %dst // orp{s,d} %dst, %tmp // cmpp{s,d} %tmp, %dst, $3 // orps{s,d} %dst, %tmp // psrl{s,d} {$10, $13}, %dst // andnp{s,d} %tmp, %dst // // and for max the sequence is: // // movap{s,d} %lhs, %tmp // minp{s,d} %dst, %tmp // minp,{s,d} %lhs, %dst // xorp{s,d} %tmp, %dst // orp{s,d} %dst, %tmp // subp{s,d} %dst, %tmp // cmpp{s,d} %tmp, %dst, $3 // psrl{s,d} {$10, $13}, %dst // andnp{s,d} %tmp, %dst if is_min { let (mov_op, min_op, or_op, cmp_op, shift_op, shift_by, andn_op) = match output_ty { types::F32X4 => ( SseOpcode::Movaps, SseOpcode::Minps, SseOpcode::Orps, SseOpcode::Cmpps, SseOpcode::Psrld, 10, SseOpcode::Andnps, ), types::F64X2 => ( SseOpcode::Movapd, SseOpcode::Minpd, SseOpcode::Orpd, SseOpcode::Cmppd, SseOpcode::Psrlq, 13, SseOpcode::Andnpd, ), _ => unimplemented!("unsupported op type {:?}", output_ty), }; // Copy lhs into tmp let tmp_xmm1 = ctx.alloc_tmp(output_ty).only_reg().unwrap(); ctx.emit(Inst::xmm_mov(mov_op, RegMem::reg(lhs), tmp_xmm1)); // Perform min in reverse direction ctx.emit(Inst::xmm_rm_r(min_op, RegMem::from(dst), tmp_xmm1)); // Perform min in original direction ctx.emit(Inst::xmm_rm_r(min_op, RegMem::reg(lhs), dst)); // X64 handles propagation of -0's and Nans differently between left and right // operands. After doing the min in both directions, this OR will // guarrentee capture of -0's and Nan in our tmp register ctx.emit(Inst::xmm_rm_r(or_op, RegMem::from(dst), tmp_xmm1)); // Compare unordered to create mask for lanes containing NaNs and then use // that mask to saturate the NaN containing lanes in the tmp register with 1s. // TODO: Would a check for NaN and then a jump be better here in the // common case than continuing on to normalize NaNs that might not exist? let cond = FcmpImm::from(FloatCC::Unordered); ctx.emit(Inst::xmm_rm_r_imm( cmp_op, RegMem::reg(tmp_xmm1.to_reg()), dst, cond.encode(), OperandSize::Size32, )); ctx.emit(Inst::xmm_rm_r(or_op, RegMem::reg(dst.to_reg()), tmp_xmm1)); // The dst register holds a mask for lanes containing NaNs. // We take that mask and shift in preparation for creating a different mask // to normalize NaNs (create a quite NaN) by zeroing out the appropriate // number of least signficant bits. We shift right each lane by 10 bits // (1 sign + 8 exp. + 1 MSB sig.) for F32X4 and by 13 bits (1 sign + // 11 exp. + 1 MSB sig.) for F64X2. ctx.emit(Inst::xmm_rmi_reg(shift_op, RegMemImm::imm(shift_by), dst)); // Finally we do a nand with the tmp register to produce the final results // in the dst. ctx.emit(Inst::xmm_rm_r(andn_op, RegMem::reg(tmp_xmm1.to_reg()), dst)); } else { let ( mov_op, max_op, xor_op, or_op, sub_op, cmp_op, shift_op, shift_by, andn_op, ) = match output_ty { types::F32X4 => ( SseOpcode::Movaps, SseOpcode::Maxps, SseOpcode::Xorps, SseOpcode::Orps, SseOpcode::Subps, SseOpcode::Cmpps, SseOpcode::Psrld, 10, SseOpcode::Andnps, ), types::F64X2 => ( SseOpcode::Movapd, SseOpcode::Maxpd, SseOpcode::Xorpd, SseOpcode::Orpd, SseOpcode::Subpd, SseOpcode::Cmppd, SseOpcode::Psrlq, 13, SseOpcode::Andnpd, ), _ => unimplemented!("unsupported op type {:?}", output_ty), }; // Copy lhs into tmp. let tmp_xmm1 = ctx.alloc_tmp(types::F32).only_reg().unwrap(); ctx.emit(Inst::xmm_mov(mov_op, RegMem::reg(lhs), tmp_xmm1)); // Perform max in reverse direction. ctx.emit(Inst::xmm_rm_r(max_op, RegMem::reg(dst.to_reg()), tmp_xmm1)); // Perform max in original direction. ctx.emit(Inst::xmm_rm_r(max_op, RegMem::reg(lhs), dst)); // Get the difference between the two results and store in tmp. // Max uses a different approach than min to account for potential // discrepancies with plus/minus 0. ctx.emit(Inst::xmm_rm_r(xor_op, RegMem::reg(tmp_xmm1.to_reg()), dst)); // X64 handles propagation of -0's and Nans differently between left and right // operands. After doing the max in both directions, this OR will // guarentee capture of 0's and Nan in our tmp register. ctx.emit(Inst::xmm_rm_r(or_op, RegMem::reg(dst.to_reg()), tmp_xmm1)); // Capture NaNs and sign discrepancies. ctx.emit(Inst::xmm_rm_r(sub_op, RegMem::reg(dst.to_reg()), tmp_xmm1)); // Compare unordered to create mask for lanes containing NaNs and then use // that mask to saturate the NaN containing lanes in the tmp register with 1s. let cond = FcmpImm::from(FloatCC::Unordered); ctx.emit(Inst::xmm_rm_r_imm( cmp_op, RegMem::reg(tmp_xmm1.to_reg()), dst, cond.encode(), OperandSize::Size32, )); // The dst register holds a mask for lanes containing NaNs. // We take that mask and shift in preparation for creating a different mask // to normalize NaNs (create a quite NaN) by zeroing out the appropriate // number of least signficant bits. We shift right each lane by 10 bits // (1 sign + 8 exp. + 1 MSB sig.) for F32X4 and by 13 bits (1 sign + // 11 exp. + 1 MSB sig.) for F64X2. ctx.emit(Inst::xmm_rmi_reg(shift_op, RegMemImm::imm(shift_by), dst)); // Finally we do a nand with the tmp register to produce the final results // in the dst. ctx.emit(Inst::xmm_rm_r(andn_op, RegMem::reg(tmp_xmm1.to_reg()), dst)); } } } Opcode::FminPseudo | Opcode::FmaxPseudo => { // We can't guarantee the RHS (if a load) is 128-bit aligned, so we // must avoid merging a load here. let lhs = RegMem::reg(put_input_in_reg(ctx, inputs[0])); let rhs = put_input_in_reg(ctx, inputs[1]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let ty = ty.unwrap(); ctx.emit(Inst::gen_move(dst, rhs, ty)); let sse_opcode = match (ty, op) { (types::F32X4, Opcode::FminPseudo) => SseOpcode::Minps, (types::F32X4, Opcode::FmaxPseudo) => SseOpcode::Maxps, (types::F64X2, Opcode::FminPseudo) => SseOpcode::Minpd, (types::F64X2, Opcode::FmaxPseudo) => SseOpcode::Maxpd, _ => unimplemented!("unsupported type {} for {}", ty, op), }; ctx.emit(Inst::xmm_rm_r(sse_opcode, lhs, dst)); } Opcode::Sqrt => { // We can't guarantee the RHS (if a load) is 128-bit aligned, so we // must avoid merging a load here. let src = RegMem::reg(put_input_in_reg(ctx, inputs[0])); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let ty = ty.unwrap(); let sse_op = match ty { types::F32 => SseOpcode::Sqrtss, types::F64 => SseOpcode::Sqrtsd, types::F32X4 => SseOpcode::Sqrtps, types::F64X2 => SseOpcode::Sqrtpd, _ => panic!( "invalid type: expected one of [F32, F64, F32X4, F64X2], found {}", ty ), }; ctx.emit(Inst::xmm_unary_rm_r(sse_op, src, dst)); } Opcode::Fpromote => { // We can't guarantee the RHS (if a load) is 128-bit aligned, so we // must avoid merging a load here. let src = RegMem::reg(put_input_in_reg(ctx, inputs[0])); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); ctx.emit(Inst::xmm_unary_rm_r(SseOpcode::Cvtss2sd, src, dst)); } Opcode::Fdemote => { // We can't guarantee the RHS (if a load) is 128-bit aligned, so we // must avoid merging a load here. let src = RegMem::reg(put_input_in_reg(ctx, inputs[0])); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); ctx.emit(Inst::xmm_unary_rm_r(SseOpcode::Cvtsd2ss, src, dst)); } Opcode::FcvtFromSint => { let output_ty = ty.unwrap(); if !output_ty.is_vector() { let (ext_spec, src_size) = match ctx.input_ty(insn, 0) { types::I8 | types::I16 => (Some(ExtSpec::SignExtendTo32), OperandSize::Size32), types::I32 => (None, OperandSize::Size32), types::I64 => (None, OperandSize::Size64), _ => unreachable!(), }; let src = match ext_spec { Some(ext_spec) => RegMem::reg(extend_input_to_reg(ctx, inputs[0], ext_spec)), None => RegMem::reg(put_input_in_reg(ctx, inputs[0])), }; let opcode = if output_ty == types::F32 { SseOpcode::Cvtsi2ss } else { assert_eq!(output_ty, types::F64); SseOpcode::Cvtsi2sd }; let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); ctx.emit(Inst::gpr_to_xmm(opcode, src, src_size, dst)); } else { let ty = ty.unwrap(); let src = put_input_in_reg(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let opcode = match ctx.input_ty(insn, 0) { types::I32X4 => SseOpcode::Cvtdq2ps, _ => { unimplemented!("unable to use type {} for op {}", ctx.input_ty(insn, 0), op) } }; ctx.emit(Inst::gen_move(dst, src, ty)); ctx.emit(Inst::xmm_rm_r(opcode, RegMem::from(dst), dst)); } } Opcode::FcvtLowFromSint => { let src = RegMem::reg(put_input_in_reg(ctx, inputs[0])); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); ctx.emit(Inst::xmm_unary_rm_r( SseOpcode::Cvtdq2pd, RegMem::from(src), dst, )); } Opcode::FcvtFromUint => { let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let ty = ty.unwrap(); let input_ty = ctx.input_ty(insn, 0); if !ty.is_vector() { match input_ty { types::I8 | types::I16 | types::I32 => { // Conversion from an unsigned int smaller than 64-bit is easy: zero-extend + // do a signed conversion (which won't overflow). let opcode = if ty == types::F32 { SseOpcode::Cvtsi2ss } else { assert_eq!(ty, types::F64); SseOpcode::Cvtsi2sd }; let src = RegMem::reg(extend_input_to_reg( ctx, inputs[0], ExtSpec::ZeroExtendTo64, )); ctx.emit(Inst::gpr_to_xmm(opcode, src, OperandSize::Size64, dst)); } types::I64 => { let src = put_input_in_reg(ctx, inputs[0]); let src_copy = ctx.alloc_tmp(types::I64).only_reg().unwrap(); ctx.emit(Inst::gen_move(src_copy, src, types::I64)); let tmp_gpr1 = ctx.alloc_tmp(types::I64).only_reg().unwrap(); let tmp_gpr2 = ctx.alloc_tmp(types::I64).only_reg().unwrap(); ctx.emit(Inst::cvt_u64_to_float_seq( if ty == types::F64 { OperandSize::Size64 } else { OperandSize::Size32 }, src_copy, tmp_gpr1, tmp_gpr2, dst, )); } _ => panic!("unexpected input type for FcvtFromUint: {:?}", input_ty), }; } else { assert_eq!(ctx.input_ty(insn, 0), types::I32X4); let src = put_input_in_reg(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); if isa_flags.use_avx512f_simd() || isa_flags.use_avx512vl_simd() { // When either AVX512VL or AVX512F are available, // `fcvt_from_uint` can be lowered to a single instruction. ctx.emit(Inst::xmm_unary_rm_r_evex( Avx512Opcode::Vcvtudq2ps, RegMem::reg(src), dst, )); } else { // Converting packed unsigned integers to packed floats // requires a few steps. There is no single instruction // lowering for converting unsigned floats but there is for // converting packed signed integers to float (cvtdq2ps). In // the steps below we isolate the upper half (16 bits) and // lower half (16 bits) of each lane and then we convert // each half separately using cvtdq2ps meant for signed // integers. In order for this to work for the upper half // bits we must shift right by 1 (divide by 2) these bits in // order to ensure the most significant bit is 0 not signed, // and then after the conversion we double the value. // Finally we add the converted values where addition will // correctly round. // // Sequence: // -> A = 0xffffffff // -> Ah = 0xffff0000 // -> Al = 0x0000ffff // -> Convert(Al) // Convert int to float // -> Ah = Ah >> 1 // Shift right 1 to assure Ah conversion isn't treated as signed // -> Convert(Ah) // Convert .. with no loss of significant digits from previous shift // -> Ah = Ah + Ah // Double Ah to account for shift right before the conversion. // -> dst = Ah + Al // Add the two floats together // Create a temporary register let tmp = ctx.alloc_tmp(types::I32X4).only_reg().unwrap(); ctx.emit(Inst::xmm_unary_rm_r( SseOpcode::Movapd, RegMem::reg(src), tmp, )); ctx.emit(Inst::gen_move(dst, src, ty)); // Get the low 16 bits ctx.emit(Inst::xmm_rmi_reg(SseOpcode::Pslld, RegMemImm::imm(16), tmp)); ctx.emit(Inst::xmm_rmi_reg(SseOpcode::Psrld, RegMemImm::imm(16), tmp)); // Get the high 16 bits ctx.emit(Inst::xmm_rm_r(SseOpcode::Psubd, RegMem::from(tmp), dst)); // Convert the low 16 bits ctx.emit(Inst::xmm_rm_r(SseOpcode::Cvtdq2ps, RegMem::from(tmp), tmp)); // Shift the high bits by 1, convert, and double to get the correct value. ctx.emit(Inst::xmm_rmi_reg(SseOpcode::Psrld, RegMemImm::imm(1), dst)); ctx.emit(Inst::xmm_rm_r(SseOpcode::Cvtdq2ps, RegMem::from(dst), dst)); ctx.emit(Inst::xmm_rm_r( SseOpcode::Addps, RegMem::reg(dst.to_reg()), dst, )); // Add together the two converted values. ctx.emit(Inst::xmm_rm_r( SseOpcode::Addps, RegMem::reg(tmp.to_reg()), dst, )); } } } Opcode::FcvtToUint | Opcode::FcvtToUintSat | Opcode::FcvtToSint | Opcode::FcvtToSintSat => { let src = put_input_in_reg(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let input_ty = ctx.input_ty(insn, 0); if !input_ty.is_vector() { let src_size = if input_ty == types::F32 { OperandSize::Size32 } else { assert_eq!(input_ty, types::F64); OperandSize::Size64 }; let output_ty = ty.unwrap(); let dst_size = if output_ty == types::I32 { OperandSize::Size32 } else { assert_eq!(output_ty, types::I64); OperandSize::Size64 }; let to_signed = op == Opcode::FcvtToSint || op == Opcode::FcvtToSintSat; let is_sat = op == Opcode::FcvtToUintSat || op == Opcode::FcvtToSintSat; let src_copy = ctx.alloc_tmp(input_ty).only_reg().unwrap(); ctx.emit(Inst::gen_move(src_copy, src, input_ty)); let tmp_xmm = ctx.alloc_tmp(input_ty).only_reg().unwrap(); let tmp_gpr = ctx.alloc_tmp(output_ty).only_reg().unwrap(); if to_signed { ctx.emit(Inst::cvt_float_to_sint_seq( src_size, dst_size, is_sat, src_copy, dst, tmp_gpr, tmp_xmm, )); } else { ctx.emit(Inst::cvt_float_to_uint_seq( src_size, dst_size, is_sat, src_copy, dst, tmp_gpr, tmp_xmm, )); } } else { if op == Opcode::FcvtToSintSat { // Sets destination to zero if float is NaN assert_eq!(types::F32X4, ctx.input_ty(insn, 0)); let tmp = ctx.alloc_tmp(types::I32X4).only_reg().unwrap(); ctx.emit(Inst::xmm_unary_rm_r( SseOpcode::Movapd, RegMem::reg(src), tmp, )); ctx.emit(Inst::gen_move(dst, src, input_ty)); let cond = FcmpImm::from(FloatCC::Equal); ctx.emit(Inst::xmm_rm_r_imm( SseOpcode::Cmpps, RegMem::reg(tmp.to_reg()), tmp, cond.encode(), OperandSize::Size32, )); ctx.emit(Inst::xmm_rm_r( SseOpcode::Andps, RegMem::reg(tmp.to_reg()), dst, )); // Sets top bit of tmp if float is positive // Setting up to set top bit on negative float values ctx.emit(Inst::xmm_rm_r( SseOpcode::Pxor, RegMem::reg(dst.to_reg()), tmp, )); // Convert the packed float to packed doubleword. ctx.emit(Inst::xmm_rm_r( SseOpcode::Cvttps2dq, RegMem::reg(dst.to_reg()), dst, )); // Set top bit only if < 0 // Saturate lane with sign (top) bit. ctx.emit(Inst::xmm_rm_r( SseOpcode::Pand, RegMem::reg(dst.to_reg()), tmp, )); ctx.emit(Inst::xmm_rmi_reg(SseOpcode::Psrad, RegMemImm::imm(31), tmp)); // On overflow 0x80000000 is returned to a lane. // Below sets positive overflow lanes to 0x7FFFFFFF // Keeps negative overflow lanes as is. ctx.emit(Inst::xmm_rm_r( SseOpcode::Pxor, RegMem::reg(tmp.to_reg()), dst, )); } else if op == Opcode::FcvtToUintSat { // The algorithm for converting floats to unsigned ints is a little tricky. The // complication arises because we are converting from a signed 64-bit int with a positive // integer range from 1..INT_MAX (0x1..0x7FFFFFFF) to an unsigned integer with an extended // range from (INT_MAX+1)..UINT_MAX. It's this range from (INT_MAX+1)..UINT_MAX // (0x80000000..0xFFFFFFFF) that needs to be accounted for as a special case since our // conversion instruction (cvttps2dq) only converts as high as INT_MAX (0x7FFFFFFF), but // which conveniently setting underflows and overflows (smaller than MIN_INT or larger than // MAX_INT) to be INT_MAX+1 (0x80000000). Nothing that the range (INT_MAX+1)..UINT_MAX includes // precisely INT_MAX values we can correctly account for and convert every value in this range // if we simply subtract INT_MAX+1 before doing the cvttps2dq conversion. After the subtraction // every value originally (INT_MAX+1)..UINT_MAX is now the range (0..INT_MAX). // After the conversion we add INT_MAX+1 back to this converted value, noting again that // values we are trying to account for were already set to INT_MAX+1 during the original conversion. // We simply have to create a mask and make sure we are adding together only the lanes that need // to be accounted for. Digesting it all the steps then are: // // Step 1 - Account for NaN and negative floats by setting these src values to zero. // Step 2 - Make a copy (tmp1) of the src value since we need to convert twice for // reasons described above. // Step 3 - Convert the original src values. This will convert properly all floats up to INT_MAX // Step 4 - Subtract INT_MAX from the copy set (tmp1). Note, all zero and negative values are those // values that were originally in the range (0..INT_MAX). This will come in handy during // step 7 when we zero negative lanes. // Step 5 - Create a bit mask for tmp1 that will correspond to all lanes originally less than // UINT_MAX that are now less than INT_MAX thanks to the subtraction. // Step 6 - Convert the second set of values (tmp1) // Step 7 - Prep the converted second set by zeroing out negative lanes (these have already been // converted correctly with the first set) and by setting overflow lanes to 0x7FFFFFFF // as this will allow us to properly saturate overflow lanes when adding to 0x80000000 // Step 8 - Add the orginal converted src and the converted tmp1 where float values originally less // than and equal to INT_MAX will be unchanged, float values originally between INT_MAX+1 and // UINT_MAX will add together (INT_MAX) + (SRC - INT_MAX), and float values originally // greater than UINT_MAX will be saturated to UINT_MAX (0xFFFFFFFF) after adding (0x8000000 + 0x7FFFFFFF). // // // The table below illustrates the result after each step where it matters for the converted set. // Note the original value range (original src set) is the final dst in Step 8: // // Original src set: // | Original Value Range | Step 1 | Step 3 | Step 8 | // | -FLT_MIN..FLT_MAX | 0.0..FLT_MAX | 0..INT_MAX(w/overflow) | 0..UINT_MAX(w/saturation) | // // Copied src set (tmp1): // | Step 2 | Step 4 | // | 0.0..FLT_MAX | (0.0-(INT_MAX+1))..(FLT_MAX-(INT_MAX+1)) | // // | Step 6 | Step 7 | // | (0-(INT_MAX+1))..(UINT_MAX-(INT_MAX+1))(w/overflow) | ((INT_MAX+1)-(INT_MAX+1))..(INT_MAX+1) | // Create temporaries assert_eq!(types::F32X4, ctx.input_ty(insn, 0)); let tmp1 = ctx.alloc_tmp(types::I32X4).only_reg().unwrap(); let tmp2 = ctx.alloc_tmp(types::I32X4).only_reg().unwrap(); // Converting to unsigned int so if float src is negative or NaN // will first set to zero. ctx.emit(Inst::xmm_rm_r(SseOpcode::Pxor, RegMem::from(tmp2), tmp2)); ctx.emit(Inst::gen_move(dst, src, input_ty)); ctx.emit(Inst::xmm_rm_r(SseOpcode::Maxps, RegMem::from(tmp2), dst)); // Set tmp2 to INT_MAX+1. It is important to note here that after it looks // like we are only converting INT_MAX (0x7FFFFFFF) but in fact because // single precision IEEE-754 floats can only accurately represent contingous // integers up to 2^23 and outside of this range it rounds to the closest // integer that it can represent. In the case of INT_MAX, this value gets // represented as 0x4f000000 which is the integer value (INT_MAX+1). ctx.emit(Inst::xmm_rm_r(SseOpcode::Pcmpeqd, RegMem::from(tmp2), tmp2)); ctx.emit(Inst::xmm_rmi_reg(SseOpcode::Psrld, RegMemImm::imm(1), tmp2)); ctx.emit(Inst::xmm_rm_r( SseOpcode::Cvtdq2ps, RegMem::from(tmp2), tmp2, )); // Make a copy of these lanes and then do the first conversion. // Overflow lanes greater than the maximum allowed signed value will // set to 0x80000000. Negative and NaN lanes will be 0x0 ctx.emit(Inst::xmm_mov(SseOpcode::Movaps, RegMem::from(dst), tmp1)); ctx.emit(Inst::xmm_rm_r(SseOpcode::Cvttps2dq, RegMem::from(dst), dst)); // Set lanes to src - max_signed_int ctx.emit(Inst::xmm_rm_r(SseOpcode::Subps, RegMem::from(tmp2), tmp1)); // Create mask for all positive lanes to saturate (i.e. greater than // or equal to the maxmimum allowable unsigned int). let cond = FcmpImm::from(FloatCC::LessThanOrEqual); ctx.emit(Inst::xmm_rm_r_imm( SseOpcode::Cmpps, RegMem::from(tmp1), tmp2, cond.encode(), OperandSize::Size32, )); // Convert those set of lanes that have the max_signed_int factored out. ctx.emit(Inst::xmm_rm_r( SseOpcode::Cvttps2dq, RegMem::from(tmp1), tmp1, )); // Prepare converted lanes by zeroing negative lanes and prepping lanes // that have positive overflow (based on the mask) by setting these lanes // to 0x7FFFFFFF ctx.emit(Inst::xmm_rm_r(SseOpcode::Pxor, RegMem::from(tmp2), tmp1)); ctx.emit(Inst::xmm_rm_r(SseOpcode::Pxor, RegMem::from(tmp2), tmp2)); ctx.emit(Inst::xmm_rm_r(SseOpcode::Pmaxsd, RegMem::from(tmp2), tmp1)); // Add this second set of converted lanes to the original to properly handle // values greater than max signed int. ctx.emit(Inst::xmm_rm_r(SseOpcode::Paddd, RegMem::from(tmp1), dst)); } else { // Since this branch is also guarded by a check for vector types // neither Opcode::FcvtToUint nor Opcode::FcvtToSint can reach here // due to vector varients not existing. The first two branches will // cover all reachable cases. unreachable!(); } } } Opcode::UwidenHigh | Opcode::UwidenLow | Opcode::SwidenHigh | Opcode::SwidenLow => { let input_ty = ctx.input_ty(insn, 0); let output_ty = ctx.output_ty(insn, 0); let src = put_input_in_reg(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); if output_ty.is_vector() { match op { Opcode::SwidenLow => match (input_ty, output_ty) { (types::I8X16, types::I16X8) => { ctx.emit(Inst::xmm_mov(SseOpcode::Pmovsxbw, RegMem::reg(src), dst)); } (types::I16X8, types::I32X4) => { ctx.emit(Inst::xmm_mov(SseOpcode::Pmovsxwd, RegMem::reg(src), dst)); } _ => unreachable!(), }, Opcode::SwidenHigh => match (input_ty, output_ty) { (types::I8X16, types::I16X8) => { ctx.emit(Inst::gen_move(dst, src, output_ty)); ctx.emit(Inst::xmm_rm_r_imm( SseOpcode::Palignr, RegMem::reg(src), dst, 8, OperandSize::Size32, )); ctx.emit(Inst::xmm_mov(SseOpcode::Pmovsxbw, RegMem::from(dst), dst)); } (types::I16X8, types::I32X4) => { ctx.emit(Inst::gen_move(dst, src, output_ty)); ctx.emit(Inst::xmm_rm_r_imm( SseOpcode::Palignr, RegMem::reg(src), dst, 8, OperandSize::Size32, )); ctx.emit(Inst::xmm_mov(SseOpcode::Pmovsxwd, RegMem::from(dst), dst)); } _ => unreachable!(), }, Opcode::UwidenLow => match (input_ty, output_ty) { (types::I8X16, types::I16X8) => { ctx.emit(Inst::xmm_mov(SseOpcode::Pmovzxbw, RegMem::reg(src), dst)); } (types::I16X8, types::I32X4) => { ctx.emit(Inst::xmm_mov(SseOpcode::Pmovzxwd, RegMem::reg(src), dst)); } _ => unreachable!(), }, Opcode::UwidenHigh => match (input_ty, output_ty) { (types::I8X16, types::I16X8) => { ctx.emit(Inst::gen_move(dst, src, output_ty)); ctx.emit(Inst::xmm_rm_r_imm( SseOpcode::Palignr, RegMem::reg(src), dst, 8, OperandSize::Size32, )); ctx.emit(Inst::xmm_mov(SseOpcode::Pmovzxbw, RegMem::from(dst), dst)); } (types::I16X8, types::I32X4) => { ctx.emit(Inst::gen_move(dst, src, output_ty)); ctx.emit(Inst::xmm_rm_r_imm( SseOpcode::Palignr, RegMem::reg(src), dst, 8, OperandSize::Size32, )); ctx.emit(Inst::xmm_mov(SseOpcode::Pmovzxwd, RegMem::from(dst), dst)); } _ => unreachable!(), }, _ => unreachable!(), } } else { panic!("Unsupported non-vector type for widen instruction {:?}", ty); } } Opcode::Snarrow | Opcode::Unarrow => { let input_ty = ctx.input_ty(insn, 0); let output_ty = ctx.output_ty(insn, 0); let src1 = put_input_in_reg(ctx, inputs[0]); let src2 = put_input_in_reg(ctx, inputs[1]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); if output_ty.is_vector() { match op { Opcode::Snarrow => match (input_ty, output_ty) { (types::I16X8, types::I8X16) => { ctx.emit(Inst::gen_move(dst, src1, input_ty)); ctx.emit(Inst::xmm_rm_r(SseOpcode::Packsswb, RegMem::reg(src2), dst)); } (types::I32X4, types::I16X8) => { ctx.emit(Inst::gen_move(dst, src1, input_ty)); ctx.emit(Inst::xmm_rm_r(SseOpcode::Packssdw, RegMem::reg(src2), dst)); } _ => unreachable!(), }, Opcode::Unarrow => match (input_ty, output_ty) { (types::I16X8, types::I8X16) => { ctx.emit(Inst::gen_move(dst, src1, input_ty)); ctx.emit(Inst::xmm_rm_r(SseOpcode::Packuswb, RegMem::reg(src2), dst)); } (types::I32X4, types::I16X8) => { ctx.emit(Inst::gen_move(dst, src1, input_ty)); ctx.emit(Inst::xmm_rm_r(SseOpcode::Packusdw, RegMem::reg(src2), dst)); } _ => unreachable!(), }, _ => unreachable!(), } } else { panic!("Unsupported non-vector type for widen instruction {:?}", ty); } } Opcode::Bitcast => { let input_ty = ctx.input_ty(insn, 0); let output_ty = ctx.output_ty(insn, 0); match (input_ty, output_ty) { (types::F32, types::I32) => { let src = put_input_in_reg(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); ctx.emit(Inst::xmm_to_gpr( SseOpcode::Movd, src, dst, OperandSize::Size32, )); } (types::I32, types::F32) => { let src = input_to_reg_mem(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); ctx.emit(Inst::gpr_to_xmm( SseOpcode::Movd, src, OperandSize::Size32, dst, )); } (types::F64, types::I64) => { let src = put_input_in_reg(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); ctx.emit(Inst::xmm_to_gpr( SseOpcode::Movq, src, dst, OperandSize::Size64, )); } (types::I64, types::F64) => { let src = input_to_reg_mem(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); ctx.emit(Inst::gpr_to_xmm( SseOpcode::Movq, src, OperandSize::Size64, dst, )); } _ => unreachable!("invalid bitcast from {:?} to {:?}", input_ty, output_ty), } } Opcode::Fabs | Opcode::Fneg => { let src = RegMem::reg(put_input_in_reg(ctx, inputs[0])); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); // In both cases, generate a constant and apply a single binary instruction: // - to compute the absolute value, set all bits to 1 but the MSB to 0, and bit-AND the // src with it. // - to compute the negated value, set all bits to 0 but the MSB to 1, and bit-XOR the // src with it. let output_ty = ty.unwrap(); if !output_ty.is_vector() { let (val, opcode): (u64, _) = match output_ty { types::F32 => match op { Opcode::Fabs => (0x7fffffff, SseOpcode::Andps), Opcode::Fneg => (0x80000000, SseOpcode::Xorps), _ => unreachable!(), }, types::F64 => match op { Opcode::Fabs => (0x7fffffffffffffff, SseOpcode::Andpd), Opcode::Fneg => (0x8000000000000000, SseOpcode::Xorpd), _ => unreachable!(), }, _ => panic!("unexpected type {:?} for Fabs", output_ty), }; for inst in Inst::gen_constant(ValueRegs::one(dst), val as u128, output_ty, |ty| { ctx.alloc_tmp(ty).only_reg().unwrap() }) { ctx.emit(inst); } ctx.emit(Inst::xmm_rm_r(opcode, src, dst)); } else { // Eventually vector constants should be available in `gen_constant` and this block // can be merged with the one above (TODO). if output_ty.bits() == 128 { // Move the `lhs` to the same register as `dst`; this may not emit an actual move // but ensures that the registers are the same to match x86's read-write operand // encoding. let src = put_input_in_reg(ctx, inputs[0]); ctx.emit(Inst::gen_move(dst, src, output_ty)); // Generate an all 1s constant in an XMM register. This uses CMPPS but could // have used CMPPD with the same effect. Note, we zero the temp we allocate // because if not, there is a chance that the register we use could be initialized // with NaN .. in which case the CMPPS would fail since NaN != NaN. let tmp = ctx.alloc_tmp(output_ty).only_reg().unwrap(); ctx.emit(Inst::xmm_rm_r(SseOpcode::Xorps, RegMem::from(tmp), tmp)); let cond = FcmpImm::from(FloatCC::Equal); let cmpps = Inst::xmm_rm_r_imm( SseOpcode::Cmpps, RegMem::reg(tmp.to_reg()), tmp, cond.encode(), OperandSize::Size32, ); ctx.emit(cmpps); // Shift the all 1s constant to generate the mask. let lane_bits = output_ty.lane_bits(); let (shift_opcode, opcode, shift_by) = match (op, lane_bits) { (Opcode::Fabs, 32) => (SseOpcode::Psrld, SseOpcode::Andps, 1), (Opcode::Fabs, 64) => (SseOpcode::Psrlq, SseOpcode::Andpd, 1), (Opcode::Fneg, 32) => (SseOpcode::Pslld, SseOpcode::Xorps, 31), (Opcode::Fneg, 64) => (SseOpcode::Psllq, SseOpcode::Xorpd, 63), _ => unreachable!( "unexpected opcode and lane size: {:?}, {} bits", op, lane_bits ), }; let shift = Inst::xmm_rmi_reg(shift_opcode, RegMemImm::imm(shift_by), tmp); ctx.emit(shift); // Apply shifted mask (XOR or AND). let mask = Inst::xmm_rm_r(opcode, RegMem::reg(tmp.to_reg()), dst); ctx.emit(mask); } else { panic!("unexpected type {:?} for Fabs", output_ty); } } } Opcode::Fcopysign => { let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let lhs = put_input_in_reg(ctx, inputs[0]); let rhs = put_input_in_reg(ctx, inputs[1]); let ty = ty.unwrap(); // We're going to generate the following sequence: // // movabs $INT_MIN, tmp_gpr1 // mov{d,q} tmp_gpr1, tmp_xmm1 // movap{s,d} tmp_xmm1, dst // andnp{s,d} src_1, dst // movap{s,d} src_2, tmp_xmm2 // andp{s,d} tmp_xmm1, tmp_xmm2 // orp{s,d} tmp_xmm2, dst let tmp_xmm1 = ctx.alloc_tmp(types::F32).only_reg().unwrap(); let tmp_xmm2 = ctx.alloc_tmp(types::F32).only_reg().unwrap(); let (sign_bit_cst, mov_op, and_not_op, and_op, or_op) = match ty { types::F32 => ( 0x8000_0000, SseOpcode::Movaps, SseOpcode::Andnps, SseOpcode::Andps, SseOpcode::Orps, ), types::F64 => ( 0x8000_0000_0000_0000, SseOpcode::Movapd, SseOpcode::Andnpd, SseOpcode::Andpd, SseOpcode::Orpd, ), _ => { panic!("unexpected type {:?} for copysign", ty); } }; for inst in Inst::gen_constant(ValueRegs::one(tmp_xmm1), sign_bit_cst, ty, |ty| { ctx.alloc_tmp(ty).only_reg().unwrap() }) { ctx.emit(inst); } ctx.emit(Inst::xmm_mov(mov_op, RegMem::reg(tmp_xmm1.to_reg()), dst)); ctx.emit(Inst::xmm_rm_r(and_not_op, RegMem::reg(lhs), dst)); ctx.emit(Inst::xmm_mov(mov_op, RegMem::reg(rhs), tmp_xmm2)); ctx.emit(Inst::xmm_rm_r( and_op, RegMem::reg(tmp_xmm1.to_reg()), tmp_xmm2, )); ctx.emit(Inst::xmm_rm_r(or_op, RegMem::reg(tmp_xmm2.to_reg()), dst)); } Opcode::Ceil | Opcode::Floor | Opcode::Nearest | Opcode::Trunc => { let ty = ty.unwrap(); if isa_flags.use_sse41() { let mode = match op { Opcode::Ceil => RoundImm::RoundUp, Opcode::Floor => RoundImm::RoundDown, Opcode::Nearest => RoundImm::RoundNearest, Opcode::Trunc => RoundImm::RoundZero, _ => panic!("unexpected opcode {:?} in Ceil/Floor/Nearest/Trunc", op), }; let op = match ty { types::F32 => SseOpcode::Roundss, types::F64 => SseOpcode::Roundsd, types::F32X4 => SseOpcode::Roundps, types::F64X2 => SseOpcode::Roundpd, _ => panic!("unexpected type {:?} in Ceil/Floor/Nearest/Trunc", ty), }; let src = input_to_reg_mem(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); ctx.emit(Inst::xmm_rm_r_imm( op, src, dst, mode.encode(), OperandSize::Size32, )); } else { // Lower to VM calls when there's no access to SSE4.1. // Note, for vector types on platforms that don't support sse41 // the execution will panic here. let libcall = match (op, ty) { (Opcode::Ceil, types::F32) => LibCall::CeilF32, (Opcode::Ceil, types::F64) => LibCall::CeilF64, (Opcode::Floor, types::F32) => LibCall::FloorF32, (Opcode::Floor, types::F64) => LibCall::FloorF64, (Opcode::Nearest, types::F32) => LibCall::NearestF32, (Opcode::Nearest, types::F64) => LibCall::NearestF64, (Opcode::Trunc, types::F32) => LibCall::TruncF32, (Opcode::Trunc, types::F64) => LibCall::TruncF64, _ => panic!( "unexpected type/opcode {:?}/{:?} in Ceil/Floor/Nearest/Trunc", ty, op ), }; emit_vm_call(ctx, flags, triple, libcall, insn, inputs, outputs)?; } } Opcode::Load | Opcode::Uload8 | Opcode::Sload8 | Opcode::Uload16 | Opcode::Sload16 | Opcode::Uload32 | Opcode::Sload32 | Opcode::LoadComplex | Opcode::Uload8Complex | Opcode::Sload8Complex | Opcode::Uload16Complex | Opcode::Sload16Complex | Opcode::Uload32Complex | Opcode::Sload32Complex | Opcode::Sload8x8 | Opcode::Uload8x8 | Opcode::Sload16x4 | Opcode::Uload16x4 | Opcode::Sload32x2 | Opcode::Uload32x2 => { let offset = ctx.data(insn).load_store_offset().unwrap(); let elem_ty = match op { Opcode::Sload8 | Opcode::Uload8 | Opcode::Sload8Complex | Opcode::Uload8Complex => { types::I8 } Opcode::Sload16 | Opcode::Uload16 | Opcode::Sload16Complex | Opcode::Uload16Complex => types::I16, Opcode::Sload32 | Opcode::Uload32 | Opcode::Sload32Complex | Opcode::Uload32Complex => types::I32, Opcode::Sload8x8 | Opcode::Uload8x8 | Opcode::Sload8x8Complex | Opcode::Uload8x8Complex => types::I8X8, Opcode::Sload16x4 | Opcode::Uload16x4 | Opcode::Sload16x4Complex | Opcode::Uload16x4Complex => types::I16X4, Opcode::Sload32x2 | Opcode::Uload32x2 | Opcode::Sload32x2Complex | Opcode::Uload32x2Complex => types::I32X2, Opcode::Load | Opcode::LoadComplex => ctx.output_ty(insn, 0), _ => unimplemented!(), }; let ext_mode = ExtMode::new(elem_ty.bits(), 64); let sign_extend = match op { Opcode::Sload8 | Opcode::Sload8Complex | Opcode::Sload16 | Opcode::Sload16Complex | Opcode::Sload32 | Opcode::Sload32Complex | Opcode::Sload8x8 | Opcode::Sload8x8Complex | Opcode::Sload16x4 | Opcode::Sload16x4Complex | Opcode::Sload32x2 | Opcode::Sload32x2Complex => true, _ => false, }; let amode = match op { Opcode::Load | Opcode::Uload8 | Opcode::Sload8 | Opcode::Uload16 | Opcode::Sload16 | Opcode::Uload32 | Opcode::Sload32 | Opcode::Sload8x8 | Opcode::Uload8x8 | Opcode::Sload16x4 | Opcode::Uload16x4 | Opcode::Sload32x2 | Opcode::Uload32x2 => { assert_eq!(inputs.len(), 1, "only one input for load operands"); lower_to_amode(ctx, inputs[0], offset) } Opcode::LoadComplex | Opcode::Uload8Complex | Opcode::Sload8Complex | Opcode::Uload16Complex | Opcode::Sload16Complex | Opcode::Uload32Complex | Opcode::Sload32Complex | Opcode::Sload8x8Complex | Opcode::Uload8x8Complex | Opcode::Sload16x4Complex | Opcode::Uload16x4Complex | Opcode::Sload32x2Complex | Opcode::Uload32x2Complex => { assert_eq!( inputs.len(), 2, "can't handle more than two inputs in complex load" ); let base = put_input_in_reg(ctx, inputs[0]); let index = put_input_in_reg(ctx, inputs[1]); let shift = 0; let flags = ctx.memflags(insn).expect("load should have memflags"); Amode::imm_reg_reg_shift(offset as u32, base, index, shift).with_flags(flags) } _ => unreachable!(), }; if elem_ty == types::I128 { let dsts = get_output_reg(ctx, outputs[0]); ctx.emit(Inst::mov64_m_r(amode.clone(), dsts.regs()[0])); ctx.emit(Inst::mov64_m_r(amode.offset(8), dsts.regs()[1])); } else { let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let is_xmm = elem_ty.is_float() || elem_ty.is_vector(); match (sign_extend, is_xmm) { (true, false) => { // The load is sign-extended only when the output size is lower than 64 bits, // so ext-mode is defined in this case. ctx.emit(Inst::movsx_rm_r(ext_mode.unwrap(), RegMem::mem(amode), dst)); } (false, false) => { if elem_ty.bytes() == 8 { // Use a plain load. ctx.emit(Inst::mov64_m_r(amode, dst)) } else { // Use a zero-extended load. ctx.emit(Inst::movzx_rm_r(ext_mode.unwrap(), RegMem::mem(amode), dst)) } } (_, true) => { ctx.emit(match elem_ty { types::F32 => Inst::xmm_mov(SseOpcode::Movss, RegMem::mem(amode), dst), types::F64 => Inst::xmm_mov(SseOpcode::Movsd, RegMem::mem(amode), dst), types::I8X8 => { if sign_extend == true { Inst::xmm_mov(SseOpcode::Pmovsxbw, RegMem::mem(amode), dst) } else { Inst::xmm_mov(SseOpcode::Pmovzxbw, RegMem::mem(amode), dst) } } types::I16X4 => { if sign_extend == true { Inst::xmm_mov(SseOpcode::Pmovsxwd, RegMem::mem(amode), dst) } else { Inst::xmm_mov(SseOpcode::Pmovzxwd, RegMem::mem(amode), dst) } } types::I32X2 => { if sign_extend == true { Inst::xmm_mov(SseOpcode::Pmovsxdq, RegMem::mem(amode), dst) } else { Inst::xmm_mov(SseOpcode::Pmovzxdq, RegMem::mem(amode), dst) } } _ if elem_ty.is_vector() && elem_ty.bits() == 128 => { Inst::xmm_mov(SseOpcode::Movups, RegMem::mem(amode), dst) } // TODO Specialize for different types: MOVUPD, MOVDQU _ => unreachable!( "unexpected type for load: {:?} - {:?}", elem_ty, elem_ty.bits() ), }); } } } } Opcode::Store | Opcode::Istore8 | Opcode::Istore16 | Opcode::Istore32 | Opcode::StoreComplex | Opcode::Istore8Complex | Opcode::Istore16Complex | Opcode::Istore32Complex => { let offset = ctx.data(insn).load_store_offset().unwrap(); let elem_ty = match op { Opcode::Istore8 | Opcode::Istore8Complex => types::I8, Opcode::Istore16 | Opcode::Istore16Complex => types::I16, Opcode::Istore32 | Opcode::Istore32Complex => types::I32, Opcode::Store | Opcode::StoreComplex => ctx.input_ty(insn, 0), _ => unreachable!(), }; let addr = match op { Opcode::Store | Opcode::Istore8 | Opcode::Istore16 | Opcode::Istore32 => { assert_eq!(inputs.len(), 2, "only one input for store memory operands"); lower_to_amode(ctx, inputs[1], offset) } Opcode::StoreComplex | Opcode::Istore8Complex | Opcode::Istore16Complex | Opcode::Istore32Complex => { assert_eq!( inputs.len(), 3, "can't handle more than two inputs in complex store" ); let base = put_input_in_reg(ctx, inputs[1]); let index = put_input_in_reg(ctx, inputs[2]); let shift = 0; let flags = ctx.memflags(insn).expect("store should have memflags"); Amode::imm_reg_reg_shift(offset as u32, base, index, shift).with_flags(flags) } _ => unreachable!(), }; if elem_ty == types::I128 { let srcs = put_input_in_regs(ctx, inputs[0]); ctx.emit(Inst::store(types::I64, srcs.regs()[0], addr.clone())); ctx.emit(Inst::store(types::I64, srcs.regs()[1], addr.offset(8))); } else { let src = put_input_in_reg(ctx, inputs[0]); ctx.emit(Inst::store(elem_ty, src, addr)); } } Opcode::AtomicRmw => { // This is a simple, general-case atomic update, based on a loop involving // `cmpxchg`. Note that we could do much better than this in the case where the old // value at the location (that is to say, the SSA `Value` computed by this CLIF // instruction) is not required. In that case, we could instead implement this // using a single `lock`-prefixed x64 read-modify-write instruction. Also, even in // the case where the old value is required, for the `add` and `sub` cases, we can // use the single instruction `lock xadd`. However, those improvements have been // left for another day. // TODO: filed as https://github.com/bytecodealliance/wasmtime/issues/2153 let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let mut addr = put_input_in_reg(ctx, inputs[0]); let mut arg2 = put_input_in_reg(ctx, inputs[1]); let ty_access = ty.unwrap(); assert!(is_valid_atomic_transaction_ty(ty_access)); // Make sure that both args are in virtual regs, since in effect we have to do a // parallel copy to get them safely to the AtomicRmwSeq input regs, and that's not // guaranteed safe if either is in a real reg. addr = ctx.ensure_in_vreg(addr, types::I64); arg2 = ctx.ensure_in_vreg(arg2, types::I64); // Move the args to the preordained AtomicRMW input regs. Note that `AtomicRmwSeq` // operates at whatever width is specified by `ty`, so there's no need to // zero-extend `arg2` in the case of `ty` being I8/I16/I32. ctx.emit(Inst::gen_move( Writable::from_reg(regs::r9()), addr, types::I64, )); ctx.emit(Inst::gen_move( Writable::from_reg(regs::r10()), arg2, types::I64, )); // Now the AtomicRmwSeq (pseudo-) instruction itself let op = inst_common::AtomicRmwOp::from(ctx.data(insn).atomic_rmw_op().unwrap()); ctx.emit(Inst::AtomicRmwSeq { ty: ty_access, op }); // And finally, copy the preordained AtomicRmwSeq output reg to its destination. ctx.emit(Inst::gen_move(dst, regs::rax(), types::I64)); } Opcode::AtomicCas => { // This is very similar to, but not identical to, the `AtomicRmw` case. As with // `AtomicRmw`, there's no need to zero-extend narrow values here. let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let addr = lower_to_amode(ctx, inputs[0], 0); let expected = put_input_in_reg(ctx, inputs[1]); let replacement = put_input_in_reg(ctx, inputs[2]); let ty_access = ty.unwrap(); assert!(is_valid_atomic_transaction_ty(ty_access)); // Move the expected value into %rax. Because there's only one fixed register on // the input side, we don't have to use `ensure_in_vreg`, as is necessary in the // `AtomicRmw` case. ctx.emit(Inst::gen_move( Writable::from_reg(regs::rax()), expected, types::I64, )); ctx.emit(Inst::LockCmpxchg { ty: ty_access, src: replacement, dst: addr.into(), }); // And finally, copy the old value at the location to its destination reg. ctx.emit(Inst::gen_move(dst, regs::rax(), types::I64)); } Opcode::AtomicLoad => { // This is a normal load. The x86-TSO memory model provides sufficient sequencing // to satisfy the CLIF synchronisation requirements for `AtomicLoad` without the // need for any fence instructions. let data = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let addr = lower_to_amode(ctx, inputs[0], 0); let ty_access = ty.unwrap(); assert!(is_valid_atomic_transaction_ty(ty_access)); let rm = RegMem::mem(addr); if ty_access == types::I64 { ctx.emit(Inst::mov64_rm_r(rm, data)); } else { let ext_mode = ExtMode::new(ty_access.bits(), 64).expect(&format!( "invalid extension during AtomicLoad: {} -> {}", ty_access.bits(), 64 )); ctx.emit(Inst::movzx_rm_r(ext_mode, rm, data)); } } Opcode::AtomicStore => { // This is a normal store, followed by an `mfence` instruction. let data = put_input_in_reg(ctx, inputs[0]); let addr = lower_to_amode(ctx, inputs[1], 0); let ty_access = ctx.input_ty(insn, 0); assert!(is_valid_atomic_transaction_ty(ty_access)); ctx.emit(Inst::store(ty_access, data, addr)); ctx.emit(Inst::Fence { kind: FenceKind::MFence, }); } Opcode::Fence => { ctx.emit(Inst::Fence { kind: FenceKind::MFence, }); } Opcode::FuncAddr => { let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let (extname, _) = ctx.call_target(insn).unwrap(); let extname = extname.clone(); ctx.emit(Inst::LoadExtName { dst, name: Box::new(extname), offset: 0, }); } Opcode::SymbolValue => { let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let (extname, _, offset) = ctx.symbol_value(insn).unwrap(); let extname = extname.clone(); ctx.emit(Inst::LoadExtName { dst, name: Box::new(extname), offset, }); } Opcode::StackAddr => { let (stack_slot, offset) = match *ctx.data(insn) { InstructionData::StackLoad { opcode: Opcode::StackAddr, stack_slot, offset, } => (stack_slot, offset), _ => unreachable!(), }; let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let offset: i32 = offset.into(); let inst = ctx .abi() .stackslot_addr(stack_slot, u32::try_from(offset).unwrap(), dst); ctx.emit(inst); } Opcode::Select => { let flag_input = inputs[0]; if let Some(fcmp) = matches_input(ctx, flag_input, Opcode::Fcmp) { let cond_code = ctx.data(fcmp).fp_cond_code().unwrap(); // For equal, we flip the operands, because we can't test a conjunction of // CPU flags with a single cmove; see InvertedEqualOrConditions doc comment. let (lhs_input, rhs_input) = match cond_code { FloatCC::Equal => (inputs[2], inputs[1]), _ => (inputs[1], inputs[2]), }; let ty = ctx.output_ty(insn, 0); let rhs = put_input_in_regs(ctx, rhs_input); let dst = get_output_reg(ctx, outputs[0]); let lhs = put_input_in_regs(ctx, lhs_input); // We request inversion of Equal to NotEqual here: taking LHS if equal would mean // take it if both CC::NP and CC::Z are set, the conjunction of which can't be // modeled with a single cmov instruction. Instead, we'll swap LHS and RHS in the // select operation, and invert the equal to a not-equal here. let fcmp_results = emit_fcmp(ctx, fcmp, cond_code, FcmpSpec::InvertEqual); if let FcmpCondResult::InvertedEqualOrConditions(_, _) = &fcmp_results { // Keep this sync'd with the lowering of the select inputs above. assert_eq!(cond_code, FloatCC::Equal); } emit_moves(ctx, dst, rhs, ty); let operand_size = if ty == types::F64 { OperandSize::Size64 } else { OperandSize::Size32 }; match fcmp_results { FcmpCondResult::Condition(cc) => { if is_int_or_ref_ty(ty) || ty == types::I128 || ty == types::B128 { let size = ty.bytes() as u8; emit_cmoves(ctx, size, cc, lhs, dst); } else { ctx.emit(Inst::xmm_cmove( operand_size, cc, RegMem::reg(lhs.only_reg().unwrap()), dst.only_reg().unwrap(), )); } } FcmpCondResult::AndConditions(_, _) => { unreachable!( "can't AND with select; see above comment about inverting equal" ); } FcmpCondResult::InvertedEqualOrConditions(cc1, cc2) | FcmpCondResult::OrConditions(cc1, cc2) => { if is_int_or_ref_ty(ty) || ty == types::I128 { let size = ty.bytes() as u8; emit_cmoves(ctx, size, cc1, lhs.clone(), dst); emit_cmoves(ctx, size, cc2, lhs, dst); } else { ctx.emit(Inst::xmm_cmove( operand_size, cc1, RegMem::reg(lhs.only_reg().unwrap()), dst.only_reg().unwrap(), )); ctx.emit(Inst::xmm_cmove( operand_size, cc2, RegMem::reg(lhs.only_reg().unwrap()), dst.only_reg().unwrap(), )); } } } } else { let ty = ty.unwrap(); let size = ty.bytes() as u8; let lhs = put_input_in_regs(ctx, inputs[1]); let rhs = put_input_in_regs(ctx, inputs[2]); let dst = get_output_reg(ctx, outputs[0]); let cc = if let Some(icmp) = matches_input(ctx, flag_input, Opcode::Icmp) { let cond_code = ctx.data(icmp).cond_code().unwrap(); let cond_code = emit_cmp(ctx, icmp, cond_code); CC::from_intcc(cond_code) } else { let sel_ty = ctx.input_ty(insn, 0); let size = OperandSize::from_ty(ctx.input_ty(insn, 0)); let test = put_input_in_reg(ctx, flag_input); let test_input = if sel_ty == types::B1 { // The input is a boolean value; test the LSB for nonzero with: // test reg, 1 RegMemImm::imm(1) } else { // The input is an integer; test the whole value for // nonzero with: // test reg, reg // // (It doesn't make sense to have a boolean wider than // one bit here -- which bit would cause us to select an // input?) assert!(!is_bool_ty(sel_ty)); RegMemImm::reg(test) }; ctx.emit(Inst::test_rmi_r(size, test_input, test)); CC::NZ }; // This doesn't affect the flags. emit_moves(ctx, dst, rhs, ty); if is_int_or_ref_ty(ty) || ty == types::I128 { emit_cmoves(ctx, size, cc, lhs, dst); } else { debug_assert!(ty == types::F32 || ty == types::F64); ctx.emit(Inst::xmm_cmove( if ty == types::F64 { OperandSize::Size64 } else { OperandSize::Size32 }, cc, RegMem::reg(lhs.only_reg().unwrap()), dst.only_reg().unwrap(), )); } } } Opcode::Selectif | Opcode::SelectifSpectreGuard => { let lhs = put_input_in_regs(ctx, inputs[1]); let rhs = put_input_in_regs(ctx, inputs[2]); let dst = get_output_reg(ctx, outputs[0]); let ty = ctx.output_ty(insn, 0); // Verification ensures that the input is always a single-def ifcmp. let cmp_insn = ctx .get_input_as_source_or_const(inputs[0].insn, inputs[0].input) .inst .unwrap() .0; debug_assert_eq!(ctx.data(cmp_insn).opcode(), Opcode::Ifcmp); let cond_code = ctx.data(insn).cond_code().unwrap(); let cond_code = emit_cmp(ctx, cmp_insn, cond_code); let cc = CC::from_intcc(cond_code); if is_int_or_ref_ty(ty) || ty == types::I128 { let size = ty.bytes() as u8; emit_moves(ctx, dst, rhs, ty); emit_cmoves(ctx, size, cc, lhs, dst); } else { debug_assert!(ty == types::F32 || ty == types::F64); emit_moves(ctx, dst, rhs, ty); ctx.emit(Inst::xmm_cmove( if ty == types::F64 { OperandSize::Size64 } else { OperandSize::Size32 }, cc, RegMem::reg(lhs.only_reg().unwrap()), dst.only_reg().unwrap(), )); } } Opcode::Udiv | Opcode::Urem | Opcode::Sdiv | Opcode::Srem => { let kind = match op { Opcode::Udiv => DivOrRemKind::UnsignedDiv, Opcode::Sdiv => DivOrRemKind::SignedDiv, Opcode::Urem => DivOrRemKind::UnsignedRem, Opcode::Srem => DivOrRemKind::SignedRem, _ => unreachable!(), }; let is_div = kind.is_div(); let input_ty = ctx.input_ty(insn, 0); let size = OperandSize::from_ty(input_ty); let dividend = put_input_in_reg(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); ctx.emit(Inst::gen_move( Writable::from_reg(regs::rax()), dividend, input_ty, )); // Always do explicit checks for `srem`: otherwise, INT_MIN % -1 is not handled properly. if flags.avoid_div_traps() || op == Opcode::Srem { // A vcode meta-instruction is used to lower the inline checks, since they embed // pc-relative offsets that must not change, thus requiring regalloc to not // interfere by introducing spills and reloads. // // Note it keeps the result in $rax (for divide) or $rdx (for rem), so that // regalloc is aware of the coalescing opportunity between rax/rdx and the // destination register. let divisor = put_input_in_reg(ctx, inputs[1]); let divisor_copy = ctx.alloc_tmp(types::I64).only_reg().unwrap(); ctx.emit(Inst::gen_move(divisor_copy, divisor, types::I64)); let tmp = if op == Opcode::Sdiv && size == OperandSize::Size64 { Some(ctx.alloc_tmp(types::I64).only_reg().unwrap()) } else { None }; // TODO use xor ctx.emit(Inst::imm( OperandSize::Size32, 0, Writable::from_reg(regs::rdx()), )); ctx.emit(Inst::checked_div_or_rem_seq(kind, size, divisor_copy, tmp)); } else { // We don't want more than one trap record for a single instruction, // so let's not allow the "mem" case (load-op merging) here; force // divisor into a register instead. let divisor = RegMem::reg(put_input_in_reg(ctx, inputs[1])); // Fill in the high parts: if kind.is_signed() { // sign-extend the sign-bit of al into ah for size 1, or rax into rdx, for // signed opcodes. ctx.emit(Inst::sign_extend_data(size)); } else if input_ty == types::I8 { ctx.emit(Inst::movzx_rm_r( ExtMode::BL, RegMem::reg(regs::rax()), Writable::from_reg(regs::rax()), )); } else { // zero for unsigned opcodes. ctx.emit(Inst::imm( OperandSize::Size64, 0, Writable::from_reg(regs::rdx()), )); } // Emit the actual idiv. ctx.emit(Inst::div(size, kind.is_signed(), divisor)); } // Move the result back into the destination reg. if is_div { // The quotient is in rax. ctx.emit(Inst::gen_move(dst, regs::rax(), input_ty)); } else { if size == OperandSize::Size8 { // The remainder is in AH. Right-shift by 8 bits then move from rax. ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftRightLogical, Some(8), Writable::from_reg(regs::rax()), )); ctx.emit(Inst::gen_move(dst, regs::rax(), input_ty)); } else { // The remainder is in rdx. ctx.emit(Inst::gen_move(dst, regs::rdx(), input_ty)); } } } Opcode::Umulhi | Opcode::Smulhi => { let input_ty = ctx.input_ty(insn, 0); let lhs = put_input_in_reg(ctx, inputs[0]); let rhs = input_to_reg_mem(ctx, inputs[1]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); // Move lhs in %rax. ctx.emit(Inst::gen_move( Writable::from_reg(regs::rax()), lhs, input_ty, )); // Emit the actual mul or imul. let signed = op == Opcode::Smulhi; ctx.emit(Inst::mul_hi(OperandSize::from_ty(input_ty), signed, rhs)); // Read the result from the high part (stored in %rdx). ctx.emit(Inst::gen_move(dst, regs::rdx(), input_ty)); } Opcode::GetPinnedReg => { let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); ctx.emit(Inst::gen_move(dst, regs::pinned_reg(), types::I64)); } Opcode::SetPinnedReg => { let src = put_input_in_reg(ctx, inputs[0]); ctx.emit(Inst::gen_move( Writable::from_reg(regs::pinned_reg()), src, types::I64, )); } Opcode::Vconst => { let used_constant = if let &InstructionData::UnaryConst { constant_handle, .. } = ctx.data(insn) { ctx.use_constant(VCodeConstantData::Pool( constant_handle, ctx.get_constant_data(constant_handle).clone(), )) } else { unreachable!("vconst should always have unary_const format") }; // TODO use Inst::gen_constant() instead. let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let ty = ty.unwrap(); ctx.emit(Inst::xmm_load_const(used_constant, dst, ty)); } Opcode::RawBitcast => { // A raw_bitcast is just a mechanism for correcting the type of V128 values (see // https://github.com/bytecodealliance/wasmtime/issues/1147). As such, this IR // instruction should emit no machine code but a move is necessary to give the register // allocator a definition for the output virtual register. let src = put_input_in_reg(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let ty = ty.unwrap(); ctx.emit(Inst::gen_move(dst, src, ty)); } Opcode::Shuffle => { let ty = ty.unwrap(); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let lhs_ty = ctx.input_ty(insn, 0); let lhs = put_input_in_reg(ctx, inputs[0]); let rhs = put_input_in_reg(ctx, inputs[1]); let mask = match ctx.get_immediate(insn) { Some(DataValue::V128(bytes)) => bytes.to_vec(), _ => unreachable!("shuffle should always have a 16-byte immediate"), }; // A mask-building helper: in 128-bit SIMD, 0-15 indicate which lane to read from and a // 1 in the most significant position zeroes the lane. let zero_unknown_lane_index = |b: u8| if b > 15 { 0b10000000 } else { b }; ctx.emit(Inst::gen_move(dst, rhs, ty)); if rhs == lhs { // If `lhs` and `rhs` are the same we can use a single PSHUFB to shuffle the XMM // register. We statically build `constructed_mask` to zero out any unknown lane // indices (may not be completely necessary: verification could fail incorrect mask // values) and fix the indexes to all point to the `dst` vector. let constructed_mask = mask .iter() // If the mask is greater than 15 it still may be referring to a lane in b. .map(|&b| if b > 15 { b.wrapping_sub(16) } else { b }) .map(zero_unknown_lane_index) .collect(); let constant = ctx.use_constant(VCodeConstantData::Generated(constructed_mask)); let tmp = ctx.alloc_tmp(types::I8X16).only_reg().unwrap(); ctx.emit(Inst::xmm_load_const(constant, tmp, ty)); // After loading the constructed mask in a temporary register, we use this to // shuffle the `dst` register (remember that, in this case, it is the same as // `src` so we disregard this register). ctx.emit(Inst::xmm_rm_r(SseOpcode::Pshufb, RegMem::from(tmp), dst)); } else { // If `lhs` and `rhs` are different, we must shuffle each separately and then OR // them together. This is necessary due to PSHUFB semantics. As in the case above, // we build the `constructed_mask` for each case statically. // PSHUFB the `lhs` argument into `tmp0`, placing zeroes for unused lanes. let tmp0 = ctx.alloc_tmp(lhs_ty).only_reg().unwrap(); ctx.emit(Inst::gen_move(tmp0, lhs, lhs_ty)); let constructed_mask = mask.iter().cloned().map(zero_unknown_lane_index).collect(); let constant = ctx.use_constant(VCodeConstantData::Generated(constructed_mask)); let tmp1 = ctx.alloc_tmp(types::I8X16).only_reg().unwrap(); ctx.emit(Inst::xmm_load_const(constant, tmp1, ty)); ctx.emit(Inst::xmm_rm_r(SseOpcode::Pshufb, RegMem::from(tmp1), tmp0)); // PSHUFB the second argument, placing zeroes for unused lanes. let constructed_mask = mask .iter() .map(|b| b.wrapping_sub(16)) .map(zero_unknown_lane_index) .collect(); let constant = ctx.use_constant(VCodeConstantData::Generated(constructed_mask)); let tmp2 = ctx.alloc_tmp(types::I8X16).only_reg().unwrap(); ctx.emit(Inst::xmm_load_const(constant, tmp2, ty)); ctx.emit(Inst::xmm_rm_r(SseOpcode::Pshufb, RegMem::from(tmp2), dst)); // OR the shuffled registers (the mechanism and lane-size for OR-ing the registers // is not important). ctx.emit(Inst::xmm_rm_r(SseOpcode::Orps, RegMem::from(tmp0), dst)); // TODO when AVX512 is enabled we should replace this sequence with a single VPERMB } } Opcode::Swizzle => { // SIMD swizzle; the following inefficient implementation is due to the Wasm SIMD spec // requiring mask indexes greater than 15 to have the same semantics as a 0 index. For // the spec discussion, see https://github.com/WebAssembly/simd/issues/93. The CLIF // semantics match the Wasm SIMD semantics for this instruction. // The instruction format maps to variables like: %dst = swizzle %src, %mask let ty = ty.unwrap(); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let src = put_input_in_reg(ctx, inputs[0]); let swizzle_mask = put_input_in_reg(ctx, inputs[1]); // Inform the register allocator that `src` and `dst` should be in the same register. ctx.emit(Inst::gen_move(dst, src, ty)); // Create a mask for zeroing out-of-bounds lanes of the swizzle mask. let zero_mask = ctx.alloc_tmp(types::I8X16).only_reg().unwrap(); static ZERO_MASK_VALUE: [u8; 16] = [ 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, ]; let constant = ctx.use_constant(VCodeConstantData::WellKnown(&ZERO_MASK_VALUE)); ctx.emit(Inst::xmm_load_const(constant, zero_mask, ty)); // Use the `zero_mask` on a writable `swizzle_mask`. let swizzle_mask = Writable::from_reg(swizzle_mask); ctx.emit(Inst::xmm_rm_r( SseOpcode::Paddusb, RegMem::from(zero_mask), swizzle_mask, )); // Shuffle `dst` using the fixed-up `swizzle_mask`. ctx.emit(Inst::xmm_rm_r( SseOpcode::Pshufb, RegMem::from(swizzle_mask), dst, )); } Opcode::Insertlane => { // The instruction format maps to variables like: %dst = insertlane %in_vec, %src, %lane let ty = ty.unwrap(); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let in_vec = put_input_in_reg(ctx, inputs[0]); let src_ty = ctx.input_ty(insn, 1); debug_assert!(!src_ty.is_vector()); let src = input_to_reg_mem(ctx, inputs[1]); let lane = if let InstructionData::TernaryImm8 { imm, .. } = ctx.data(insn) { *imm } else { unreachable!(); }; debug_assert!(lane < ty.lane_count() as u8); ctx.emit(Inst::gen_move(dst, in_vec, ty)); emit_insert_lane(ctx, src, dst, lane, ty.lane_type()); } Opcode::Extractlane => { // The instruction format maps to variables like: %dst = extractlane %src, %lane let ty = ty.unwrap(); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let src_ty = ctx.input_ty(insn, 0); assert_eq!(src_ty.bits(), 128); let src = put_input_in_reg(ctx, inputs[0]); let lane = if let InstructionData::BinaryImm8 { imm, .. } = ctx.data(insn) { *imm } else { unreachable!(); }; debug_assert!(lane < src_ty.lane_count() as u8); emit_extract_lane(ctx, src, dst, lane, ty); } Opcode::ScalarToVector => { // When moving a scalar value to a vector register, we must be handle several // situations: // 1. a scalar float is already in an XMM register, so we simply move it // 2. a scalar of any other type resides in a GPR register: MOVD moves the bits to an // XMM register and zeroes the upper bits // 3. a scalar (float or otherwise) that has previously been loaded from memory (e.g. // the default lowering of Wasm's `load[32|64]_zero`) can be lowered to a single // MOVSS/MOVSD instruction; to do this, we rely on `input_to_reg_mem` to sink the // unused load. let src = input_to_reg_mem(ctx, inputs[0]); let src_ty = ctx.input_ty(insn, 0); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let dst_ty = ty.unwrap(); assert!(src_ty == dst_ty.lane_type() && dst_ty.bits() == 128); match src { RegMem::Reg { reg } => { if src_ty.is_float() { // Case 1: when moving a scalar float, we simply move from one XMM register // to another, expecting the register allocator to elide this. Here we // assume that the upper bits of a scalar float have not been munged with // (the same assumption the old backend makes). ctx.emit(Inst::gen_move(dst, reg, dst_ty)); } else { // Case 2: when moving a scalar value of any other type, use MOVD to zero // the upper lanes. let src_size = match src_ty.bits() { 32 => OperandSize::Size32, 64 => OperandSize::Size64, _ => unimplemented!("invalid source size for type: {}", src_ty), }; ctx.emit(Inst::gpr_to_xmm(SseOpcode::Movd, src, src_size, dst)); } } RegMem::Mem { .. } => { // Case 3: when presented with `load + scalar_to_vector`, coalesce into a single // MOVSS/MOVSD instruction. let opcode = match src_ty.bits() { 32 => SseOpcode::Movss, 64 => SseOpcode::Movsd, _ => unimplemented!("unable to move scalar to vector for type: {}", src_ty), }; ctx.emit(Inst::xmm_mov(opcode, src, dst)); } } } Opcode::Splat => { let ty = ty.unwrap(); assert_eq!(ty.bits(), 128); let src_ty = ctx.input_ty(insn, 0); assert!(src_ty.bits() < 128); let src = input_to_reg_mem(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); // We know that splat will overwrite all of the lanes of `dst` but it takes several // instructions to do so. Because of the multiple instructions, there is no good way to // declare `dst` a `def` except with the following pseudo-instruction. ctx.emit(Inst::xmm_uninit_value(dst)); // TODO: eventually many of these sequences could be optimized with AVX's VBROADCAST* // and VPBROADCAST*. match ty.lane_bits() { 8 => { emit_insert_lane(ctx, src, dst, 0, ty.lane_type()); // Initialize a register with all 0s. let tmp = ctx.alloc_tmp(ty).only_reg().unwrap(); ctx.emit(Inst::xmm_rm_r(SseOpcode::Pxor, RegMem::from(tmp), tmp)); // Shuffle the lowest byte lane to all other lanes. ctx.emit(Inst::xmm_rm_r(SseOpcode::Pshufb, RegMem::from(tmp), dst)) } 16 => { emit_insert_lane(ctx, src.clone(), dst, 0, ty.lane_type()); emit_insert_lane(ctx, src, dst, 1, ty.lane_type()); // Shuffle the lowest two lanes to all other lanes. ctx.emit(Inst::xmm_rm_r_imm( SseOpcode::Pshufd, RegMem::from(dst), dst, 0, OperandSize::Size32, )) } 32 => { emit_insert_lane(ctx, src, dst, 0, ty.lane_type()); // Shuffle the lowest lane to all other lanes. ctx.emit(Inst::xmm_rm_r_imm( SseOpcode::Pshufd, RegMem::from(dst), dst, 0, OperandSize::Size32, )) } 64 => { emit_insert_lane(ctx, src.clone(), dst, 0, ty.lane_type()); emit_insert_lane(ctx, src, dst, 1, ty.lane_type()); } _ => panic!("Invalid type to splat: {}", ty), } } Opcode::VanyTrue => { let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let src_ty = ctx.input_ty(insn, 0); assert_eq!(src_ty.bits(), 128); let src = put_input_in_reg(ctx, inputs[0]); // Set the ZF if the result is all zeroes. ctx.emit(Inst::xmm_cmp_rm_r(SseOpcode::Ptest, RegMem::reg(src), src)); // If the ZF is not set, place a 1 in `dst`. ctx.emit(Inst::setcc(CC::NZ, dst)); } Opcode::VallTrue => { let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let src_ty = ctx.input_ty(insn, 0); assert_eq!(src_ty.bits(), 128); let src = input_to_reg_mem(ctx, inputs[0]); let eq = |ty: Type| match ty.lane_bits() { 8 => SseOpcode::Pcmpeqb, 16 => SseOpcode::Pcmpeqw, 32 => SseOpcode::Pcmpeqd, 64 => SseOpcode::Pcmpeqq, _ => panic!("Unable to find an instruction for {} for type: {}", op, ty), }; // Initialize a register with all 0s. let tmp = ctx.alloc_tmp(src_ty).only_reg().unwrap(); ctx.emit(Inst::xmm_rm_r(SseOpcode::Pxor, RegMem::from(tmp), tmp)); // Compare to see what lanes are filled with all 1s. ctx.emit(Inst::xmm_rm_r(eq(src_ty), src, tmp)); // Set the ZF if the result is all zeroes. ctx.emit(Inst::xmm_cmp_rm_r( SseOpcode::Ptest, RegMem::from(tmp), tmp.to_reg(), )); // If the ZF is set, place a 1 in `dst`. ctx.emit(Inst::setcc(CC::Z, dst)); } Opcode::VhighBits => { let src = put_input_in_reg(ctx, inputs[0]); let src_ty = ctx.input_ty(insn, 0); debug_assert!(src_ty.is_vector() && src_ty.bits() == 128); let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); debug_assert!(dst.to_reg().get_class() == RegClass::I64); // The Intel specification allows using both 32-bit and 64-bit GPRs as destination for // the "move mask" instructions. This is controlled by the REX.R bit: "In 64-bit mode, // the instruction can access additional registers when used with a REX.R prefix. The // default operand size is 64-bit in 64-bit mode" (PMOVMSKB in IA Software Development // Manual, vol. 2). This being the case, we will always clear REX.W since its use is // unnecessary (`OperandSize` is used for setting/clearing REX.W). let size = OperandSize::Size32; match src_ty { types::I8X16 | types::B8X16 => { ctx.emit(Inst::xmm_to_gpr(SseOpcode::Pmovmskb, src, dst, size)) } types::I32X4 | types::B32X4 | types::F32X4 => { ctx.emit(Inst::xmm_to_gpr(SseOpcode::Movmskps, src, dst, size)) } types::I64X2 | types::B64X2 | types::F64X2 => { ctx.emit(Inst::xmm_to_gpr(SseOpcode::Movmskpd, src, dst, size)) } types::I16X8 | types::B16X8 => { // There is no x86 instruction for extracting the high bit of 16-bit lanes so // here we: // - duplicate the 16-bit lanes of `src` into 8-bit lanes: // PACKSSWB([x1, x2, ...], [x1, x2, ...]) = [x1', x2', ..., x1', x2', ...] // - use PMOVMSKB to gather the high bits; now we have duplicates, though // - shift away the bottom 8 high bits to remove the duplicates. let tmp = ctx.alloc_tmp(src_ty).only_reg().unwrap(); ctx.emit(Inst::gen_move(tmp, src, src_ty)); ctx.emit(Inst::xmm_rm_r(SseOpcode::Packsswb, RegMem::reg(src), tmp)); ctx.emit(Inst::xmm_to_gpr( SseOpcode::Pmovmskb, tmp.to_reg(), dst, size, )); ctx.emit(Inst::shift_r( OperandSize::Size64, ShiftKind::ShiftRightLogical, Some(8), dst, )); } _ => unimplemented!("unknown input type {} for {}", src_ty, op), } } Opcode::Iconcat => { let ty = ctx.output_ty(insn, 0); assert_eq!( ty, types::I128, "Iconcat not expected to be used for non-128-bit type" ); assert_eq!(ctx.input_ty(insn, 0), types::I64); assert_eq!(ctx.input_ty(insn, 1), types::I64); let lo = put_input_in_reg(ctx, inputs[0]); let hi = put_input_in_reg(ctx, inputs[1]); let dst = get_output_reg(ctx, outputs[0]); ctx.emit(Inst::gen_move(dst.regs()[0], lo, types::I64)); ctx.emit(Inst::gen_move(dst.regs()[1], hi, types::I64)); } Opcode::Isplit => { let ty = ctx.input_ty(insn, 0); assert_eq!( ty, types::I128, "Iconcat not expected to be used for non-128-bit type" ); assert_eq!(ctx.output_ty(insn, 0), types::I64); assert_eq!(ctx.output_ty(insn, 1), types::I64); let src = put_input_in_regs(ctx, inputs[0]); let dst_lo = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let dst_hi = get_output_reg(ctx, outputs[1]).only_reg().unwrap(); ctx.emit(Inst::gen_move(dst_lo, src.regs()[0], types::I64)); ctx.emit(Inst::gen_move(dst_hi, src.regs()[1], types::I64)); } Opcode::TlsValue => match flags.tls_model() { TlsModel::ElfGd => { let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let (name, _, _) = ctx.symbol_value(insn).unwrap(); let symbol = name.clone(); ctx.emit(Inst::ElfTlsGetAddr { symbol }); ctx.emit(Inst::gen_move(dst, regs::rax(), types::I64)); } TlsModel::Macho => { let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let (name, _, _) = ctx.symbol_value(insn).unwrap(); let symbol = name.clone(); ctx.emit(Inst::MachOTlsGetAddr { symbol }); ctx.emit(Inst::gen_move(dst, regs::rax(), types::I64)); } _ => { todo!( "Unimplemented TLS model in x64 backend: {:?}", flags.tls_model() ); } }, Opcode::IaddImm | Opcode::ImulImm | Opcode::UdivImm | Opcode::SdivImm | Opcode::UremImm | Opcode::SremImm | Opcode::IrsubImm | Opcode::IaddCin | Opcode::IaddIfcin | Opcode::IaddCout | Opcode::IaddCarry | Opcode::IaddIfcarry | Opcode::IsubBin | Opcode::IsubIfbin | Opcode::IsubBout | Opcode::IsubIfbout | Opcode::IsubBorrow | Opcode::IsubIfborrow | Opcode::BandImm | Opcode::BorImm | Opcode::BxorImm | Opcode::RotlImm | Opcode::RotrImm | Opcode::IshlImm | Opcode::UshrImm | Opcode::SshrImm => { panic!("ALU+imm and ALU+carry ops should not appear here!"); } _ => unimplemented!("unimplemented lowering for opcode {:?}", op), } Ok(()) } //============================================================================= // Lowering-backend trait implementation. impl LowerBackend for X64Backend { type MInst = Inst; fn lower<C: LowerCtx<I = Inst>>(&self, ctx: &mut C, ir_inst: IRInst) -> CodegenResult<()> { lower_insn_to_regs(ctx, ir_inst, &self.flags, &self.x64_flags, &self.triple) } fn lower_branch_group<C: LowerCtx<I = Inst>>( &self, ctx: &mut C, branches: &[IRInst], targets: &[MachLabel], ) -> CodegenResult<()> { // A block should end with at most two branches. The first may be a // conditional branch; a conditional branch can be followed only by an // unconditional branch or fallthrough. Otherwise, if only one branch, // it may be an unconditional branch, a fallthrough, a return, or a // trap. These conditions are verified by `is_ebb_basic()` during the // verifier pass. assert!(branches.len() <= 2); if branches.len() == 2 { // Must be a conditional branch followed by an unconditional branch. let op0 = ctx.data(branches[0]).opcode(); let op1 = ctx.data(branches[1]).opcode(); trace!( "lowering two-branch group: opcodes are {:?} and {:?}", op0, op1 ); assert!(op1 == Opcode::Jump || op1 == Opcode::Fallthrough); let taken = targets[0]; // not_taken target is the target of the second branch, even if it is a Fallthrough // instruction: because we reorder blocks while we lower, the fallthrough in the new // order is not (necessarily) the same as the fallthrough in CLIF. So we use the // explicitly-provided target. let not_taken = targets[1]; match op0 { Opcode::Brz | Opcode::Brnz => { let flag_input = InsnInput { insn: branches[0], input: 0, }; let src_ty = ctx.input_ty(branches[0], 0); if let Some(icmp) = matches_input(ctx, flag_input, Opcode::Icmp) { let cond_code = ctx.data(icmp).cond_code().unwrap(); let cond_code = emit_cmp(ctx, icmp, cond_code); let cond_code = if op0 == Opcode::Brz { cond_code.inverse() } else { cond_code }; let cc = CC::from_intcc(cond_code); ctx.emit(Inst::jmp_cond(cc, taken, not_taken)); } else if let Some(fcmp) = matches_input(ctx, flag_input, Opcode::Fcmp) { let cond_code = ctx.data(fcmp).fp_cond_code().unwrap(); let cond_code = if op0 == Opcode::Brz { cond_code.inverse() } else { cond_code }; match emit_fcmp(ctx, fcmp, cond_code, FcmpSpec::Normal) { FcmpCondResult::Condition(cc) => { ctx.emit(Inst::jmp_cond(cc, taken, not_taken)); } FcmpCondResult::AndConditions(cc1, cc2) => { ctx.emit(Inst::jmp_if(cc1.invert(), not_taken)); ctx.emit(Inst::jmp_cond(cc2.invert(), not_taken, taken)); } FcmpCondResult::OrConditions(cc1, cc2) => { ctx.emit(Inst::jmp_if(cc1, taken)); ctx.emit(Inst::jmp_cond(cc2, taken, not_taken)); } FcmpCondResult::InvertedEqualOrConditions(_, _) => unreachable!(), } } else if src_ty == types::I128 { let src = put_input_in_regs( ctx, InsnInput { insn: branches[0], input: 0, }, ); let (half_cc, comb_op) = match op0 { Opcode::Brz => (CC::Z, AluRmiROpcode::And8), Opcode::Brnz => (CC::NZ, AluRmiROpcode::Or8), _ => unreachable!(), }; let tmp1 = ctx.alloc_tmp(types::I64).only_reg().unwrap(); let tmp2 = ctx.alloc_tmp(types::I64).only_reg().unwrap(); ctx.emit(Inst::cmp_rmi_r( OperandSize::Size64, RegMemImm::imm(0), src.regs()[0], )); ctx.emit(Inst::setcc(half_cc, tmp1)); ctx.emit(Inst::cmp_rmi_r( OperandSize::Size64, RegMemImm::imm(0), src.regs()[1], )); ctx.emit(Inst::setcc(half_cc, tmp2)); ctx.emit(Inst::alu_rmi_r( OperandSize::Size32, comb_op, RegMemImm::reg(tmp1.to_reg()), tmp2, )); ctx.emit(Inst::jmp_cond(CC::NZ, taken, not_taken)); } else if is_int_or_ref_ty(src_ty) || is_bool_ty(src_ty) { let src = put_input_in_reg( ctx, InsnInput { insn: branches[0], input: 0, }, ); let cc = match op0 { Opcode::Brz => CC::Z, Opcode::Brnz => CC::NZ, _ => unreachable!(), }; // See case for `Opcode::Select` above re: testing the // boolean input. let test_input = if src_ty == types::B1 { // test src, 1 RegMemImm::imm(1) } else { assert!(!is_bool_ty(src_ty)); // test src, src RegMemImm::reg(src) }; ctx.emit(Inst::test_rmi_r( OperandSize::from_ty(src_ty), test_input, src, )); ctx.emit(Inst::jmp_cond(cc, taken, not_taken)); } else { unimplemented!("brz/brnz with non-int type {:?}", src_ty); } } Opcode::BrIcmp => { let src_ty = ctx.input_ty(branches[0], 0); if is_int_or_ref_ty(src_ty) || is_bool_ty(src_ty) { let lhs = put_input_in_reg( ctx, InsnInput { insn: branches[0], input: 0, }, ); let rhs = input_to_reg_mem_imm( ctx, InsnInput { insn: branches[0], input: 1, }, ); let cc = CC::from_intcc(ctx.data(branches[0]).cond_code().unwrap()); // Cranelift's icmp semantics want to compare lhs - rhs, while Intel gives // us dst - src at the machine instruction level, so invert operands. ctx.emit(Inst::cmp_rmi_r(OperandSize::from_ty(src_ty), rhs, lhs)); ctx.emit(Inst::jmp_cond(cc, taken, not_taken)); } else { unimplemented!("bricmp with non-int type {:?}", src_ty); } } Opcode::Brif => { let flag_input = InsnInput { insn: branches[0], input: 0, }; if let Some(ifcmp) = matches_input(ctx, flag_input, Opcode::Ifcmp) { let cond_code = ctx.data(branches[0]).cond_code().unwrap(); let cond_code = emit_cmp(ctx, ifcmp, cond_code); let cc = CC::from_intcc(cond_code); ctx.emit(Inst::jmp_cond(cc, taken, not_taken)); } else if let Some(ifcmp_sp) = matches_input(ctx, flag_input, Opcode::IfcmpSp) { let operand = put_input_in_reg( ctx, InsnInput { insn: ifcmp_sp, input: 0, }, ); let ty = ctx.input_ty(ifcmp_sp, 0); ctx.emit(Inst::cmp_rmi_r( OperandSize::from_ty(ty), RegMemImm::reg(regs::rsp()), operand, )); let cond_code = ctx.data(branches[0]).cond_code().unwrap(); let cc = CC::from_intcc(cond_code); ctx.emit(Inst::jmp_cond(cc, taken, not_taken)); } else { // Should be disallowed by flags checks in verifier. unimplemented!("Brif with non-ifcmp input"); } } Opcode::Brff => { let flag_input = InsnInput { insn: branches[0], input: 0, }; if let Some(ffcmp) = matches_input(ctx, flag_input, Opcode::Ffcmp) { let cond_code = ctx.data(branches[0]).fp_cond_code().unwrap(); match emit_fcmp(ctx, ffcmp, cond_code, FcmpSpec::Normal) { FcmpCondResult::Condition(cc) => { ctx.emit(Inst::jmp_cond(cc, taken, not_taken)); } FcmpCondResult::AndConditions(cc1, cc2) => { ctx.emit(Inst::jmp_if(cc1.invert(), not_taken)); ctx.emit(Inst::jmp_cond(cc2.invert(), not_taken, taken)); } FcmpCondResult::OrConditions(cc1, cc2) => { ctx.emit(Inst::jmp_if(cc1, taken)); ctx.emit(Inst::jmp_cond(cc2, taken, not_taken)); } FcmpCondResult::InvertedEqualOrConditions(_, _) => unreachable!(), } } else { // Should be disallowed by flags checks in verifier. unimplemented!("Brff with input not from ffcmp"); } } _ => panic!("unexpected branch opcode: {:?}", op0), } } else { assert_eq!(branches.len(), 1); // Must be an unconditional branch or trap. let op = ctx.data(branches[0]).opcode(); match op { Opcode::Jump | Opcode::Fallthrough => { ctx.emit(Inst::jmp_known(targets[0])); } Opcode::BrTable => { let jt_size = targets.len() - 1; assert!(jt_size <= u32::max_value() as usize); let jt_size = jt_size as u32; let idx = extend_input_to_reg( ctx, InsnInput { insn: branches[0], input: 0, }, ExtSpec::ZeroExtendTo32, ); // Bounds-check (compute flags from idx - jt_size) and branch to default. ctx.emit(Inst::cmp_rmi_r( OperandSize::Size32, RegMemImm::imm(jt_size), idx, )); // Emit the compound instruction that does: // // lea $jt, %rA // movsbl [%rA, %rIndex, 2], %rB // add %rB, %rA // j *%rA // [jt entries] // // This must be *one* instruction in the vcode because we cannot allow regalloc // to insert any spills/fills in the middle of the sequence; otherwise, the // lea PC-rel offset to the jumptable would be incorrect. (The alternative // is to introduce a relocation pass for inlined jumptables, which is much // worse.) // This temporary is used as a signed integer of 64-bits (to hold addresses). let tmp1 = ctx.alloc_tmp(types::I64).only_reg().unwrap(); // This temporary is used as a signed integer of 32-bits (for the wasm-table // index) and then 64-bits (address addend). The small lie about the I64 type // is benign, since the temporary is dead after this instruction (and its // Cranelift type is thus unused). let tmp2 = ctx.alloc_tmp(types::I64).only_reg().unwrap(); let targets_for_term: Vec<MachLabel> = targets.to_vec(); let default_target = targets[0]; let jt_targets: Vec<MachLabel> = targets.iter().skip(1).cloned().collect(); ctx.emit(Inst::JmpTableSeq { idx, tmp1, tmp2, default_target, targets: jt_targets, targets_for_term, }); } _ => panic!("Unknown branch type {:?}", op), } } Ok(()) } fn maybe_pinned_reg(&self) -> Option<Reg> { Some(regs::pinned_reg()) } }
43.882532
135
0.464117
9b3874aa6341b141411ec99e599985cd54918c3a
500
use super::pattern::Pattern; use crate::Cell; pub struct Acorn {} impl Acorn { pub fn new() -> Pattern { let mut pattern = Pattern::rect(11, 7); pattern.set_cell(2, 2, Cell::Alive); pattern.set_cell(3, 2, Cell::Alive); pattern.set_cell(4, 2, Cell::Alive); pattern.set_cell(7, 2, Cell::Alive); pattern.set_cell(8, 2, Cell::Alive); pattern.set_cell(5, 3, Cell::Alive); pattern.set_cell(7, 4, Cell::Alive); pattern } }
23.809524
47
0.576
eb785e9a0920deca905a77b2a81cd05f3d1cb298
885
#encoding iso8859-1 ; ; Launcher: Esperanto 'Afrikaans' 'Afrikansa' 'Arabic' 'Araba' 'Chinese' 'Cina' 'Chinese (simplified)' 'Cina (simplified)' 'Chinese (traditional)' 'Cina (traditional)' 'Croatian' 'Kroata' ; 'Czech' 'Czech' 'Danish' 'Danska' 'Dutch' 'Nederlanda' 'English (US)' 'Angla (US)' 'English (UK)' 'Angla (UK)' ; 'Esperanto' 'Esperanto' 'Finnish' 'Finna' 'French' 'Franca' 'German' 'Germana' 'Greek' 'Greka' 'Italian' 'Itala' 'Latin' 'Latina' 'Japanese' 'Japana' 'Norwegian' 'Norvega' 'Portuguese' 'Portugala' 'Portuguese (Brazilian)' 'Portugala (brazila)' 'Russian' 'Rusa' 'Spanish' 'Hispana' 'Swedish' 'Sveda' 'Turkish' 'Turka'
27.65625
52
0.498305
0a9c5cc5e88d91510d405fee9d53f8e61ba78bb5
19,735
//! Route Origin Authorizations. //! //! For details, see RFC 6482. use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use bcder::{decode, encode}; use bcder::{Captured, Mode, OctetString, Oid, Tag, xerr}; use bcder::encode::{PrimitiveContent, Values}; use super::oid; use super::cert::{Cert, ResourceCert}; use super::crypto::{Signer, SigningError}; use super::resources::{Addr, AddressFamily, AsId, IpResources, Prefix}; use super::sigobj::{SignedObject, SignedObjectBuilder}; use super::x509::ValidationError; //------------ Roa ----------------------------------------------------------- #[derive(Clone, Debug)] pub struct Roa { signed: SignedObject, content: RouteOriginAttestation, } impl Roa { pub fn decode<S: decode::Source>( source: S, strict: bool ) -> Result<Self, S::Err> { let signed = SignedObject::decode(source, strict)?; if signed.content_type().ne(&oid::ROUTE_ORIGIN_AUTHZ) { return Err(decode::Malformed.into()) } let content = signed.decode_content(|cons| { RouteOriginAttestation::take_from(cons) })?; Ok(Roa { signed, content }) } pub fn process<F>( mut self, issuer: &ResourceCert, strict: bool, check_crl: F ) -> Result<(ResourceCert, RouteOriginAttestation), ValidationError> where F: FnOnce(&Cert) -> Result<(), ValidationError> { let cert = self.signed.validate(issuer, strict)?; check_crl(cert.as_ref())?; self.content.validate(&cert)?; Ok((cert, self.content)) } /// Returns a value encoder for a reference to a ROA. pub fn encode_ref(&self) -> impl encode::Values + '_ { self.signed.encode_ref() } /// Returns a DER encoded Captured for this ROA. pub fn to_captured(&self) -> Captured { self.encode_ref().to_captured(Mode::Der) } /// Returns a reference to the EE certificate of this ROA. pub fn cert(&self) -> &Cert { self.signed.cert() } } //--- Deserialize and Serialize #[cfg(feature = "serde")] impl serde::Serialize for Roa { fn serialize<S: serde::Serializer>( &self, serializer: S ) -> Result<S::Ok, S::Error> { let bytes = self.to_captured().into_bytes(); let b64 = base64::encode(&bytes); b64.serialize(serializer) } } #[cfg(feature = "serde")] impl<'de> serde::Deserialize<'de> for Roa { fn deserialize<D: serde::Deserializer<'de>>( deserializer: D ) -> Result<Self, D::Error> { use serde::de; let string = String::deserialize(deserializer)?; let decoded = base64::decode(&string).map_err(de::Error::custom)?; let bytes = bytes::Bytes::from(decoded); Roa::decode(bytes, true).map_err(de::Error::custom) } } //------------ RouteOriginAttestation ---------------------------------------- #[derive(Clone, Debug)] pub struct RouteOriginAttestation { as_id: AsId, v4_addrs: RoaIpAddresses, v6_addrs: RoaIpAddresses, } impl RouteOriginAttestation { pub fn as_id(&self) -> AsId { self.as_id } pub fn v4_addrs(&self) -> &RoaIpAddresses { &self.v4_addrs } pub fn v6_addrs(&self) -> &RoaIpAddresses { &self.v6_addrs } pub fn iter( &self ) -> impl Iterator<Item=FriendlyRoaIpAddress> + '_ { self.v4_addrs.iter().map(|addr| FriendlyRoaIpAddress::new(addr, true)) .chain( self.v6_addrs.iter() .map(|addr| FriendlyRoaIpAddress::new(addr, false)) ) } } impl RouteOriginAttestation { fn take_from<S: decode::Source>( cons: &mut decode::Constructed<S> ) -> Result<Self, S::Err> { cons.take_sequence(|cons| { // version [0] EXPLICIT INTEGER DEFAULT 0 cons.take_opt_constructed_if(Tag::CTX_0, |c| c.skip_u8_if(0))?; let as_id = AsId::take_from(cons)?; let mut v4 = None; let mut v6 = None; cons.take_sequence(|cons| { while let Some(()) = cons.take_opt_sequence(|cons| { match AddressFamily::take_from(cons)? { AddressFamily::Ipv4 => { if v4.is_some() { xerr!(return Err(decode::Malformed.into())); } v4 = Some(RoaIpAddresses::take_from(cons)?); } AddressFamily::Ipv6 => { if v6.is_some() { xerr!(return Err(decode::Malformed.into())); } v6 = Some(RoaIpAddresses::take_from(cons)?); } } Ok(()) })? { } Ok(()) })?; Ok(RouteOriginAttestation { as_id, v4_addrs: match v4 { Some(addrs) => addrs, None => RoaIpAddresses(Captured::empty(Mode::Der)) }, v6_addrs: match v6 { Some(addrs) => addrs, None => RoaIpAddresses(Captured::empty(Mode::Der)) }, }) }) } fn validate( &mut self, cert: &ResourceCert ) -> Result<(), ValidationError> { if !self.v4_addrs.is_empty() { let blocks = cert.v4_resources(); if blocks.is_empty() { return Err(ValidationError) } for addr in self.v4_addrs.iter() { if !blocks.contains_roa(&addr) { return Err(ValidationError) } } } if !self.v6_addrs.is_empty() { let blocks = cert.v6_resources(); if blocks.is_empty() { return Err(ValidationError) } for addr in self.v6_addrs.iter() { if !blocks.contains_roa(&addr) { return Err(ValidationError) } } } Ok(()) } pub fn encode_ref(&self) -> impl encode::Values + '_ { encode::sequence(( // version is DEFAULT self.as_id.encode(), encode::sequence(( self.v4_addrs.encode_ref_family([0x00, 0x01]), self.v6_addrs.encode_ref_family([0x00, 0x02]), )) )) } } //------------ RoaIpAddresses ------------------------------------------------ #[derive(Clone, Debug)] pub struct RoaIpAddresses(Captured); impl RoaIpAddresses { fn take_from<S: decode::Source>( cons: &mut decode::Constructed<S> ) -> Result<Self, S::Err> { cons.take_sequence(|cons| { cons.capture(|cons| { while let Some(()) = RoaIpAddress::skip_opt_in(cons)? { } Ok(()) }) }).map(RoaIpAddresses) } pub fn is_empty(&self) -> bool { self.0.is_empty() } pub fn iter(&self) -> RoaIpAddressIter { RoaIpAddressIter(self.0.as_ref()) } fn encode_ref_family( &self, family: [u8; 2] ) -> Option<impl encode::Values + '_> { if self.0.is_empty() { None } else { Some(encode::sequence(( OctetString::encode_slice(family), &self.0 ))) } } } //------------ RoaIpAddressIter ---------------------------------------------- #[derive(Clone, Debug)] pub struct RoaIpAddressIter<'a>(&'a [u8]); impl<'a> Iterator for RoaIpAddressIter<'a> { type Item = RoaIpAddress; fn next(&mut self) -> Option<Self::Item> { if self.0.is_empty() { None } else { Mode::Der.decode(&mut self.0, |cons| { RoaIpAddress::take_opt_from(cons) }).unwrap() } } } //------------ RoaIpAddress -------------------------------------------------- #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] pub struct RoaIpAddress { prefix: Prefix, max_length: Option<u8> } impl RoaIpAddress { pub fn new(prefix: Prefix, max_length: Option<u8>) -> Self { RoaIpAddress { prefix, max_length } } pub fn new_addr(addr: IpAddr, len: u8, max_len: Option<u8>) -> Self { RoaIpAddress::new(Prefix::new(addr, len), max_len) } pub fn prefix(self) -> Prefix { self.prefix } pub fn range(self) -> (Addr, Addr) { self.prefix.range() } } impl RoaIpAddress { // Section 3 of RFC 6482 defines ROAIPAddress as // // ```txt // ROAIPAddress ::= SEQUENCE { // address IPAddress, // maxLength INTEGER OPTIONAL } // // IPAddress ::= BIT STRING // ``` // // The address is the same as in section 2.1.1 of RFC 3779, that is, it // is a bit string with all the bits of the prefix. fn take_opt_from<S: decode::Source>( cons: &mut decode::Constructed<S> ) -> Result<Option<Self>, S::Err> { cons.take_opt_sequence(|cons| { Ok(RoaIpAddress { prefix: Prefix::take_from(cons)?, max_length: cons.take_opt_u8()?, }) }) } fn skip_opt_in<S: decode::Source>( cons: &mut decode::Constructed<S> ) -> Result<Option<()>, S::Err> { Self::take_opt_from(cons).map(|res| res.map(|_| ())) } fn encode(&self) -> impl encode::Values { encode::sequence(( self.prefix.encode(), self.max_length.map(|v| v.encode()) )) } } //------------ FriendlyRoaIpAddress ------------------------------------------ #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] pub struct FriendlyRoaIpAddress { addr: RoaIpAddress, v4: bool } impl FriendlyRoaIpAddress { fn new(addr: RoaIpAddress, v4: bool) -> Self { FriendlyRoaIpAddress { addr, v4 } } pub fn prefix(self) -> Prefix { self.addr.prefix } pub fn is_v4(self) -> bool { self.v4 } pub fn address(self) -> IpAddr { if self.v4 { self.addr.prefix.to_v4().into() } else { self.addr.prefix.to_v6().into() } } pub fn address_length(self) -> u8 { self.addr.prefix.addr_len() } pub fn max_length(self) -> u8 { self.addr.max_length.unwrap_or_else(|| self.addr.prefix.addr_len() ) } } //------------ RoaBuilder ---------------------------------------------------- pub struct RoaBuilder { as_id: AsId, v4: RoaIpAddressesBuilder, v6: RoaIpAddressesBuilder, } impl RoaBuilder { pub fn new(as_id: AsId) -> Self { Self::with_addresses( as_id, RoaIpAddressesBuilder::new(), RoaIpAddressesBuilder::new(), ) } pub fn with_addresses( as_id: AsId, v4: RoaIpAddressesBuilder, v6: RoaIpAddressesBuilder ) -> Self { Self { as_id, v4, v6 } } pub fn as_id(&self) -> AsId { self.as_id } pub fn set_as_id(&mut self, as_id: AsId) { self.as_id = as_id } pub fn v4(&self) -> &RoaIpAddressesBuilder { &self.v4 } pub fn v4_mut(&mut self) -> &mut RoaIpAddressesBuilder { &mut self.v4 } pub fn v6(&self) -> &RoaIpAddressesBuilder { &self.v6 } pub fn v6_mut(&mut self) -> &mut RoaIpAddressesBuilder { &mut self.v6 } pub fn push_addr( &mut self, addr: IpAddr, len: u8, max_len: Option<u8> ) { match addr { IpAddr::V4(addr) => self.push_v4_addr(addr, len, max_len), IpAddr::V6(addr) => self.push_v6_addr(addr, len, max_len) } } pub fn push_v4(&mut self, addr: RoaIpAddress) { self.v4_mut().push(addr) } pub fn push_v4_addr( &mut self, addr: Ipv4Addr, len: u8, max_len: Option<u8> ) { self.v4_mut().push_addr(IpAddr::V4(addr), len, max_len) } pub fn extend_v4_from_slice(&mut self, addrs: &[RoaIpAddress]) { self.v4_mut().extend_from_slice(addrs) } pub fn push_v6(&mut self, addr: RoaIpAddress) { self.v6_mut().push(addr) } pub fn push_v6_addr( &mut self, addr: Ipv6Addr, len: u8, max_len: Option<u8> ) { self.v6_mut().push_addr(IpAddr::V6(addr), len, max_len) } pub fn extend_v6_from_slice(&mut self, addrs: &[RoaIpAddress]) { self.v6_mut().extend_from_slice(addrs) } pub fn to_attestation(&self) -> RouteOriginAttestation { RouteOriginAttestation { as_id: self.as_id, v4_addrs: self.v4.to_addresses(), v6_addrs: self.v6.to_addresses(), } } /// Finalizes the builder into a ROA. /// /// # Panic /// /// This method will panic if both the IPv4 and IPv6 addresses are empty /// as that is not allowed and would lead to a malformed ROA. pub fn finalize<S: Signer>( self, mut sigobj: SignedObjectBuilder, signer: &S, issuer_key: &S::KeyId, ) -> Result<Roa, SigningError<S::Error>> { let content = self.to_attestation(); let v4 = self.v4.to_resources(); let v6 = self.v6.to_resources(); // There must be some resources in order to make a valid ROA. assert!(v4.is_present() || v6.is_present()); sigobj.set_v4_resources(v4); sigobj.set_v6_resources(v6); let signed = sigobj.finalize( Oid(oid::ROUTE_ORIGIN_AUTHZ.0.into()), content.encode_ref().to_captured(Mode::Der).into_bytes(), signer, issuer_key, )?; Ok(Roa { signed, content }) } } //------------ RoaIpAddressesBuilder ----------------------------------------- #[derive(Clone, Debug)] pub struct RoaIpAddressesBuilder { addrs: Vec<RoaIpAddress>, } impl RoaIpAddressesBuilder { pub fn new() -> Self { RoaIpAddressesBuilder { addrs: Vec::new() } } pub fn push(&mut self, addr: RoaIpAddress) { self.addrs.push(addr) } pub fn push_addr(&mut self, addr: IpAddr, len: u8, max_len: Option<u8>) { self.push(RoaIpAddress::new_addr(addr, len, max_len)) } pub fn extend_from_slice(&mut self, addrs: &[RoaIpAddress]) { self.addrs.extend_from_slice(addrs) } pub fn to_addresses(&self) -> RoaIpAddresses { RoaIpAddresses( if self.addrs.is_empty() { Captured::empty(Mode::Der) } else { Captured::from_values(Mode::Der, self.encode_ref()) } ) } pub fn to_resources(&self) -> IpResources { IpResources::blocks( self.addrs.iter().map(|addr| addr.prefix.into()).collect() ) } pub fn encode_ref(&self) -> impl encode::Values + '_ { encode::sequence( encode::slice(self.addrs.as_slice(), |v: &RoaIpAddress| v.encode()) ) } } impl Default for RoaIpAddressesBuilder { fn default() -> Self { Self::new() } } impl Extend<RoaIpAddress> for RoaIpAddressesBuilder { fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item=RoaIpAddress> { self.addrs.extend(iter) } } //============ Tests ========================================================= #[cfg(test)] mod test { use super::*; #[test] fn decode_roa() { assert!( Roa::decode( include_bytes!("../../test-data/example-ripe.roa").as_ref(), false ).is_ok() ) } } #[cfg(all(test, feature="softkeys"))] mod signer_test { use std::str::FromStr; use bcder::encode::Values; use crate::uri; use crate::repository::cert::{KeyUsage, Overclaim, TbsCert}; use crate::repository::crypto::{PublicKeyFormat, Signer}; use crate::repository::crypto::softsigner::OpenSslSigner; use crate::repository::resources::{AsId, Prefix}; use crate::repository::tal::TalInfo; use crate::repository::x509::Validity; use super::*; fn make_roa() -> Roa { let mut signer = OpenSslSigner::new(); let key = signer.create_key(PublicKeyFormat::Rsa).unwrap(); let pubkey = signer.get_key_info(&key).unwrap(); let uri = uri::Rsync::from_str("rsync://example.com/m/p").unwrap(); let mut cert = TbsCert::new( 12u64.into(), pubkey.to_subject_name(), Validity::from_secs(86400), None, pubkey, KeyUsage::Ca, Overclaim::Trim ); cert.set_basic_ca(Some(true)); cert.set_ca_repository(Some(uri.clone())); cert.set_rpki_manifest(Some(uri.clone())); cert.build_v4_resource_blocks(|b| b.push(Prefix::new(0, 0))); cert.build_v6_resource_blocks(|b| b.push(Prefix::new(0, 0))); cert.build_as_resource_blocks(|b| b.push((AsId::MIN, AsId::MAX))); let cert = cert.into_cert(&signer, &key).unwrap(); let mut roa = RoaBuilder::new(64496.into()); roa.push_v4_addr(Ipv4Addr::new(192, 0, 2, 0), 24, None); let roa = roa.finalize( SignedObjectBuilder::new( 12u64.into(), Validity::from_secs(86400), uri.clone(), uri.clone(), uri.clone() ), &signer, &key ).unwrap(); let roa = roa.encode_ref().to_captured(Mode::Der); let roa = Roa::decode(roa.as_slice(), true).unwrap(); let cert = cert.validate_ta( TalInfo::from_name("foo".into()).into_arc(), true ).unwrap(); roa.clone().process(&cert, true, |_| Ok(())).unwrap(); roa } #[test] fn encode_roa() { make_roa(); } #[test] #[cfg(feature = "serde")] fn serde_roa() { let roa = make_roa(); let serialized = serde_json::to_string(&roa).unwrap(); let deser_roa: Roa = serde_json::from_str(&serialized).unwrap(); assert_eq!( roa.to_captured().into_bytes(), deser_roa.to_captured().into_bytes() ) } } //============ Specification Documentation =================================== /// ROA Specification. /// /// This is a documentation-only module. It summarizes the specification for /// ROAs, how they are parsed and constructed. /// /// A Route Origin Authorization (ROA) is a [signed object] that assigns a /// number of route prefixes to an AS number. It is specified in [RFC 6482]. /// /// The content of a ROA signed object is of type `RouteOriginAttestation` /// which is defined as follows: /// /// ```txt /// RouteOriginAttestation ::= SEQUENCE { /// version [0] INTEGER DEFAULT 0, /// asID ASID, /// ipAddrBlocks SEQUENCE (SIZE(1..MAX)) OF ROAIPAddressFamily /// } /// /// ASID ::= INTEGER /// /// ROAIPAddressFamily ::= SEQUENCE { /// addressFamily OCTET STRING (SIZE (2..3)), /// addresses SEQUENCE (SIZE (1..MAX)) OF ROAIPAddress /// } /// /// ROAIPAddress ::= SEQUENCE { /// address IPAddress, /// maxLength INTEGER OPTIONAL /// } /// /// IPAddress ::= BIT STRING /// ``` /// /// The _version_ must be 0. The _addressFamily_ is identical to the field /// used in RPKI certificate IP resources, i.e, `"\0\x01"` for IPv4 and /// `"\0\x02"` for IPv6. /// /// [signed object]: ../../sigobj/spec/index.html /// [RFC 6482]: https://tools.ietf.org/html/rfc6482 pub mod spec { }
27.717697
79
0.528604
f54e3fdd3cdd8eba7dfe3572ff9d590b4a886e85
15,838
//! Driver for the Taos TSL2561 light sensor. //! //! <http://www.digikey.com/product-detail/en/ams-taos-usa-inc/TSL2561FN/TSL2561-FNCT-ND/3095298> //! //! > The TSL2560 and TSL2561 are light-to-digital converters that transform //! > light intensity to a digital signal output capable of direct I2C //! > interface. Each device combines one broadband photodiode (visible plus //! > infrared) and one infrared-responding photodiode on a single CMOS //! > integrated circuit capable of providing a near-photopic response over an //! > effective 20-bit dynamic range (16-bit resolution). Two integrating ADCs //! > convert the photodiode currents to a digital output that represents the //! > irradiance measured on each channel. This digital output can be input to a //! > microprocessor where illuminance (ambient light level) in lux is derived //! > using an empirical formula to approximate the human eye response. use core::cell::Cell; use kernel::common::cells::{OptionalCell, TakeCell}; use kernel::hil::gpio; use kernel::hil::i2c; use kernel::{AppId, Callback, Driver, ReturnCode}; /// Syscall driver number. use crate::driver; pub const DRIVER_NUM: usize = driver::NUM::Tsl2561 as usize; // Buffer to use for I2C messages pub static mut BUFFER: [u8; 4] = [0; 4]; /// Command register defines const COMMAND_REG: u8 = 0x80; const WORD_PROTOCOL: u8 = 0x20; /// Control_Reg defines const POWER_ON: u8 = 0x03; const POWER_OFF: u8 = 0x00; /// Timing_Reg defines const INTEGRATE_TIME_101_MS: u8 = 0x01; const LOW_GAIN_MODE: u8 = 0x00; // Interrupt_Control_Reg defines const INTERRUPT_CONTROL_LEVEL: u8 = 0x10; const INTERRUPT_ON_ADC_DONE: u8 = 0x0; // ADC counts to Lux value conversion copied from TSL2561 manual // −−−−------------------------------ // Value scaling factors // −−−−−−−−−−−−−−−------------------- const LUX_SCALE: u16 = 14; // scale by 2^14 const RATIO_SCALE: u16 = 9; // scale ratio by 2^9 // −−−−−−−−−−−−−−−−−−−−−−−----------- // Integration time scaling factors // −−−−−−−−−−−−−−−−−−−−−−−−−−−−------ const CH_SCALE: u16 = 10; // scale channel values by 2^10 #[allow(dead_code)] const CHSCALE_TINT0: u16 = 0x7517; // 322/11 * 2^CH_SCALE const CHSCALE_TINT1: u16 = 0x0fe7; // 322/81 * 2^CH_SCALE // −−−−−−−−−−−−−−−−−−−−−−−−−−−−------ // T, FN, and CL Package coefficients // −−−−−−−−−−−−−−−−−−−−−−−−−−−−------ // For Ch1/Ch0=0.00 to 0.50 // Lux/Ch0=0.0304−0.062*((Ch1/Ch0)^1.4) // piecewise approximation // For Ch1/Ch0=0.00 to 0.125: // Lux/Ch0=0.0304−0.0272*(Ch1/Ch0) // // For Ch1/Ch0=0.125 to 0.250: // Lux/Ch0=0.0325−0.0440*(Ch1/Ch0) // // For Ch1/Ch0=0.250 to 0.375: // Lux/Ch0=0.0351−0.0544*(Ch1/Ch0) // // For Ch1/Ch0=0.375 to 0.50: // Lux/Ch0=0.0381−0.0624*(Ch1/Ch0) // // For Ch1/Ch0=0.50 to 0.61: // Lux/Ch0=0.0224−0.031*(Ch1/Ch0) // // For Ch1/Ch0=0.61 to 0.80: // Lux/Ch0=0.0128−0.0153*(Ch1/Ch0) // // For Ch1/Ch0=0.80 to 1.30: // Lux/Ch0=0.00146−0.00112*(Ch1/Ch0) // // For Ch1/Ch0>1.3: // Lux/Ch0=0 // −−−−−−−−−−−−−−−−−−−−−−−−−−−−------ const K1T: usize = 0x0040; // 0.125 * 2^RATIO_SCALE const B1T: usize = 0x01f2; // 0.0304 * 2^LUX_SCALE const M1T: usize = 0x01be; // 0.0272 * 2^LUX_SCALE const K2T: usize = 0x0080; // 0.250 * 2^RATIO_SCALE const B2T: usize = 0x0214; // 0.0325 * 2^LUX_SCALE const M2T: usize = 0x02d1; // 0.0440 * 2^LUX_SCALE const K3T: usize = 0x00c0; // 0.375 * 2^RATIO_SCALE const B3T: usize = 0x023f; // 0.0351 * 2^LUX_SCALE const M3T: usize = 0x037b; // 0.0544 * 2^LUX_SCALE const K4T: usize = 0x0100; // 0.50 * 2^RATIO_SCALE const B4T: usize = 0x0270; // 0.0381 * 2^LUX_SCALE const M4T: usize = 0x03fe; // 0.0624 * 2^LUX_SCALE const K5T: usize = 0x0138; // 0.61 * 2^RATIO_SCALE const B5T: usize = 0x016f; // 0.0224 * 2^LUX_SCALE const M5T: usize = 0x01fc; // 0.0310 * 2^LUX_SCALE const K6T: usize = 0x019a; // 0.80 * 2^RATIO_SCALE const B6T: usize = 0x00d2; // 0.0128 * 2^LUX_SCALE const M6T: usize = 0x00fb; // 0.0153 * 2^LUX_SCALE const K7T: usize = 0x029a; // 1.3 * 2^RATIO_SCALE const B7T: usize = 0x0018; // 0.00146 * 2^LUX_SCALE const M7T: usize = 0x0012; // 0.00112 * 2^LUX_SCALE const K8T: usize = 0x029a; // 1.3 * 2^RATIO_SCALE const B8T: usize = 0x0000; // 0.000 * 2^LUX_SCALE const M8T: usize = 0x0000; // 0.000 * 2^LUX_SCALE // −−−−−−−−−−−−−−−−−−−−−−−−−−−−------ // CS package coefficients // −−−−−−−−−−−−−−−−−−−−−−−−−−−−------ // For 0 <= Ch1/Ch0 <= 0.52 // Lux/Ch0 = 0.0315−0.0593*((Ch1/Ch0)^1.4) // piecewise approximation // For 0 <= Ch1/Ch0 <= 0.13 // Lux/Ch0 = 0.0315−0.0262*(Ch1/Ch0) // For 0.13 <= Ch1/Ch0 <= 0.26 // Lux/Ch0 = 0.0337−0.0430*(Ch1/Ch0) // For 0.26 <= Ch1/Ch0 <= 0.39 // Lux/Ch0 = 0.0363−0.0529*(Ch1/Ch0) // For 0.39 <= Ch1/Ch0 <= 0.52 // Lux/Ch0 = 0.0392−0.0605*(Ch1/Ch0) // For 0.52 < Ch1/Ch0 <= 0.65 // Lux/Ch0 = 0.0229−0.0291*(Ch1/Ch0) // For 0.65 < Ch1/Ch0 <= 0.80 // Lux/Ch0 = 0.00157−0.00180*(Ch1/Ch0) // For 0.80 < Ch1/Ch0 <= 1.30 // Lux/Ch0 = 0.00338−0.00260*(Ch1/Ch0) // For Ch1/Ch0 > 1.30 // Lux = 0 // −−−−−−−−−−−−−−−−−−−−−−−−−−−−------ // const K1C: usize = 0x0043; // 0.130 * 2^RATIO_SCALE // const B1C: usize = 0x0204; // 0.0315 * 2^LUX_SCALE // const M1C: usize = 0x01ad; // 0.0262 * 2^LUX_SCALE // const K2C: usize = 0x0085; // 0.260 * 2^RATIO_SCALE // const B2C: usize = 0x0228; // 0.0337 * 2^LUX_SCALE // const M2C: usize = 0x02c1; // 0.0430 * 2^LUX_SCALE // const K3C: usize = 0x00c8; // 0.390 * 2^RATIO_SCALE // const B3C: usize = 0x0253; // 0.0363 * 2^LUX_SCALE // const M3C: usize = 0x0363; // 0.0529 * 2^LUX_SCALE // const K4C: usize = 0x010a; // 0.520 * 2^RATIO_SCALE // const B4C: usize = 0x0282; // 0.0392 * 2^LUX_SCALE // const M4C: usize = 0x03df; // 0.0605 * 2^LUX_SCALE // const K5C: usize = 0x014d; // 0.65 * 2^RATIO_SCALE // const B5C: usize = 0x0177; // 0.0229 * 2^LUX_SCALE // const M5C: usize = 0x01dd; // 0.0291 * 2^LUX_SCALE // const K6C: usize = 0x019a; // 0.80 * 2^RATIO_SCALE // const B6C: usize = 0x0101; // 0.0157 * 2^LUX_SCALE // const M6C: usize = 0x0127; // 0.0180 * 2^LUX_SCALE // const K7C: usize = 0x029a; // 1.3 * 2^RATIO_SCALE // const B7C: usize = 0x0037; // 0.00338 * 2^LUX_SCALE // const M7C: usize = 0x002b; // 0.00260 * 2^LUX_SCALE // const K8C: usize = 0x029a; // 1.3 * 2^RATIO_SCALE // const B8C: usize = 0x0000; // 0.000 * 2^LUX_SCALE // const M8C: usize = 0x0000; // 0.000 * 2^LUX_SCALE #[allow(dead_code)] enum Registers { Control = 0x00, Timing = 0x01, ThresholdLowLow = 0x02, ThresholdLowHigh = 0x03, ThresholdHighLow = 0x04, ThresholdHighHigh = 0x05, Interrupt = 0x06, Id = 0x0a, Data0Low = 0x0c, Data0High = 0x0d, Data1Low = 0x0e, Data1High = 0x0f, } #[derive(Clone, Copy, PartialEq)] enum State { Idle, /// Read the Id register. SelectId, ReadingId, /// Process of taking a light measurement. TakeMeasurementTurnOn, TakeMeasurementConfigMeasurement, TakeMeasurementReset1, TakeMeasurementReset2, /// Read the ADC registers. ReadMeasurement1, ReadMeasurement2, ReadMeasurement3, /// Calculate light and call the callback with the value. GotMeasurement, /// Disable I2C and release buffer Done, } pub struct TSL2561<'a> { i2c: &'a dyn i2c::I2CDevice, interrupt_pin: &'a dyn gpio::InterruptPin<'a>, callback: OptionalCell<Callback>, state: Cell<State>, buffer: TakeCell<'static, [u8]>, } impl<'a> TSL2561<'a> { pub fn new( i2c: &'a dyn i2c::I2CDevice, interrupt_pin: &'a dyn gpio::InterruptPin<'a>, buffer: &'static mut [u8], ) -> TSL2561<'a> { // setup and return struct TSL2561 { i2c: i2c, interrupt_pin: interrupt_pin, callback: OptionalCell::empty(), state: Cell::new(State::Idle), buffer: TakeCell::new(buffer), } } pub fn read_id(&self) { self.buffer.take().map(|buffer| { // turn on i2c to send commands self.i2c.enable(); buffer[0] = Registers::Id as u8 | COMMAND_REG; // buffer[0] = Registers::Id as u8; self.i2c.write(buffer, 1); self.state.set(State::SelectId); }); } pub fn take_measurement(&self) { // Need pull up on interrupt pin self.interrupt_pin.make_input(); self.interrupt_pin .enable_interrupts(gpio::InterruptEdge::FallingEdge); self.buffer.take().map(|buf| { // Turn on i2c to send commands self.i2c.enable(); buf[0] = Registers::Control as u8 | COMMAND_REG; buf[1] = POWER_ON; self.i2c.write(buf, 2); self.state.set(State::TakeMeasurementTurnOn); }); } fn calculate_lux(&self, chan0: u16, chan1: u16) -> usize { // First, scale the channel values depending on the gain and integration // time. 16X, 402mS is nominal. Scale if integration time is NOT 402 msec. // let mut ch_scale = CHSCALE_TINT0 as usize; // 13.7ms let mut ch_scale = CHSCALE_TINT1 as usize; // 101ms // let mut ch_scale: usize = 1 << CH_SCALE; // Default // Scale if gain is NOT 16X ch_scale = ch_scale << 4; // scale 1X to 16X // scale the channel values let channel0 = (chan0 as usize * ch_scale) >> CH_SCALE; let channel1 = (chan1 as usize * ch_scale) >> CH_SCALE; // Find the ratio of the channel values (Channel1/Channel0). // Protect against divide by zero. let mut ratio1 = 0; if channel0 != 0 { ratio1 = (channel1 << (RATIO_SCALE + 1)) / channel0; } // round the ratio value let ratio = (ratio1 + 1) >> 1; // is ratio <= eachBreak ? let mut b = 0; let mut m = 0; // T, FN, and CL package if ratio <= K1T { b = B1T; m = M1T; } else if ratio <= K2T { b = B2T; m = M2T; } else if ratio <= K3T { b = B3T; m = M3T; } else if ratio <= K4T { b = B4T; m = M4T; } else if ratio <= K5T { b = B5T; m = M5T; } else if ratio <= K6T { b = B6T; m = M6T; } else if ratio <= K7T { b = B7T; m = M7T; } else if ratio > K8T { b = B8T; m = M8T; } // CS package // if ratio <= K1C { // b=B1C; m=M1C; // } else if ratio <= K2C { // b=B2C; m=M2C; // } else if ratio <= K3C { // b=B3C; m=M3C; // } else if ratio <= K4C { // b=B4C; m=M4C; // } else if ratio <= K5C { // b=B5C; m=M5C; // } else if ratio <= K6C { // b=B6C; m=M6C; // } else if ratio <= K7C { // b=B7C; m=M7C; // } else if ratio > K8C { // b=B8C; m=M8C; // } // Calculate actual lux value let mut val = ((channel0 * b) as isize) - ((channel1 * m) as isize); // Do not allow negative lux value if val < 0 { val = 0; } // round lsb (2^(LUX_SCALE−1)) // val += (1 << (LUX_SCALE−1)); val += 1 << (LUX_SCALE - 1); // strip off fractional portion and return lux let lux = val >> LUX_SCALE; lux as usize } } impl i2c::I2CClient for TSL2561<'_> { fn command_complete(&self, buffer: &'static mut [u8], _error: i2c::Error) { match self.state.get() { State::SelectId => { self.i2c.read(buffer, 1); self.state.set(State::ReadingId); } State::ReadingId => { self.buffer.replace(buffer); self.i2c.disable(); self.state.set(State::Idle); } State::TakeMeasurementTurnOn => { buffer[0] = Registers::Timing as u8 | COMMAND_REG; buffer[1] = INTEGRATE_TIME_101_MS | LOW_GAIN_MODE; self.i2c.write(buffer, 2); self.state.set(State::TakeMeasurementConfigMeasurement); } State::TakeMeasurementConfigMeasurement => { buffer[0] = Registers::Interrupt as u8 | COMMAND_REG; buffer[1] = INTERRUPT_CONTROL_LEVEL | INTERRUPT_ON_ADC_DONE; self.i2c.write(buffer, 2); self.state.set(State::TakeMeasurementReset1); } State::TakeMeasurementReset1 => { buffer[0] = Registers::Control as u8 | COMMAND_REG; buffer[1] = POWER_OFF; self.i2c.write(buffer, 2); self.state.set(State::TakeMeasurementReset2); } State::TakeMeasurementReset2 => { buffer[0] = Registers::Control as u8 | COMMAND_REG; buffer[1] = POWER_ON; self.i2c.write(buffer, 2); self.state.set(State::Done); } State::ReadMeasurement1 => { self.i2c.read(buffer, 2); self.state.set(State::ReadMeasurement2); } State::ReadMeasurement2 => { // Store the previous readings in the buffer where they // won't get overwritten. buffer[2] = buffer[0]; buffer[3] = buffer[1]; buffer[0] = Registers::Data0Low as u8 | COMMAND_REG | WORD_PROTOCOL; self.i2c.write(buffer, 2); self.state.set(State::ReadMeasurement3); } State::ReadMeasurement3 => { self.i2c.read(buffer, 2); self.state.set(State::GotMeasurement); } State::GotMeasurement => { let chan0 = ((buffer[1] as u16) << 8) | (buffer[0] as u16); let chan1 = ((buffer[3] as u16) << 8) | (buffer[2] as u16); let lux = self.calculate_lux(chan0, chan1); self.callback.map(|cb| cb.schedule(0, lux, 0)); buffer[0] = Registers::Control as u8 | COMMAND_REG; buffer[1] = POWER_OFF; self.i2c.write(buffer, 2); self.interrupt_pin.disable_interrupts(); self.state.set(State::Done); } State::Done => { self.buffer.replace(buffer); self.i2c.disable(); self.state.set(State::Idle); } _ => {} } } } impl gpio::Client for TSL2561<'_> { fn fired(&self) { self.buffer.take().map(|buffer| { // turn on i2c to send commands self.i2c.enable(); // Read the first of the ADC registers. buffer[0] = Registers::Data1Low as u8 | COMMAND_REG | WORD_PROTOCOL; self.i2c.write(buffer, 1); self.state.set(State::ReadMeasurement1); }); } } impl Driver for TSL2561<'_> { fn subscribe( &self, subscribe_num: usize, callback: Option<Callback>, _app_id: AppId, ) -> ReturnCode { match subscribe_num { // Set a callback 0 => { // Set callback function self.callback.insert(callback); ReturnCode::SUCCESS } // default _ => ReturnCode::ENOSUPPORT, } } fn command(&self, command_num: usize, _: usize, _: usize, _: AppId) -> ReturnCode { match command_num { 0 /* check if present */ => ReturnCode::SUCCESS, // Take a measurement 1 => { self.take_measurement(); ReturnCode::SUCCESS } // default _ => ReturnCode::ENOSUPPORT, } } }
33.626327
105
0.546976
d6aa86e335e161f0f432179361c83c4dfea71d5f
331
extern crate protoc_rust; use protoc_rust::Customize; fn main() { protoc_rust::run(protoc_rust::Args { out_dir: "src/messages", includes: &["./protobuf"], input: &["./protobuf/messages.proto"], customize: Customize { ..Default::default() }, }) .expect("protoc"); }
20.6875
46
0.555891
50fdb881e79d2b5eb483f80648e4f451647c231a
3,523
// MIT/Apache2 License use super::AuthInfo; use std::{ io::{self, Read}, vec, vec::Vec, }; #[cfg(feature = "async")] use futures_lite::{ io as aio, stream::{self, Stream}, AsyncRead, AsyncReadExt, }; #[cfg(feature = "async")] use std::{boxed::Box, pin::Pin}; /// Create an iterator over the authorization information in an Authority file. #[inline] pub(crate) fn auth_info_reader<R: Read>(r: R) -> impl Iterator<Item = Result<AuthInfo, io::Error>> { AuthInfoReader { reader: io::BufReader::new(r), } } #[derive(Debug)] struct AuthInfoReader<R> { reader: io::BufReader<R>, } impl<R: Read> Iterator for AuthInfoReader<R> { type Item = Result<AuthInfo, io::Error>; #[inline] fn next(&mut self) -> Option<Result<AuthInfo, io::Error>> { read_auth(&mut self.reader).transpose() } } #[inline] fn read_auth<R: Read>(r: &mut R) -> Result<Option<AuthInfo>, io::Error> { let family = match read_short(r) { Ok(family) => family, Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => return Ok(None), Err(e) => return Err(e), }; let address = read_counted_string(r)?; let number = read_counted_string(r)?; let name = read_counted_string(r)?; let data = read_counted_string(r)?; Ok(Some(AuthInfo { family, data, number, name, address, })) } #[inline] fn read_counted_string<R: Read>(r: &mut R) -> Result<Vec<u8>, io::Error> { let len = read_short(r)? as usize; let mut s: Vec<u8> = vec![0u8; len]; r.read_exact(&mut s)?; Ok(s) } #[inline] fn read_short<R: Read>(r: &mut R) -> Result<u16, io::Error> { let mut buffer = [0u8; 2]; r.read_exact(&mut buffer)?; Ok(u16::from_be_bytes(buffer)) } // Async variant /// Create an iterator over the authorization information in an Authority file, async redox. #[cfg(feature = "async")] #[inline] pub(crate) fn auth_info_reader_async<'a, R: AsyncRead + Unpin + 'a>( r: R, ) -> Pin<Box<dyn Stream<Item = Result<AuthInfo, io::Error>> + 'a>> { let r = aio::BufReader::new(r); Box::pin(stream::try_unfold(r, move |mut r| async move { let res = read_auth_async(&mut r).await; match res { Ok(Some(a)) => Ok(Some((a, r))), Ok(None) => Ok(None), Err(e) => Err(e), } })) } #[cfg(feature = "async")] #[inline] async fn read_auth_async<R: AsyncRead + Unpin>(r: &mut R) -> Result<Option<AuthInfo>, io::Error> { let family = match read_short_async(r).await { Ok(family) => family, Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => return Ok(None), Err(e) => return Err(e), }; let address = read_counted_string_async(r).await?; let number = read_counted_string_async(r).await?; let name = read_counted_string_async(r).await?; let data = read_counted_string_async(r).await?; Ok(Some(AuthInfo { family, name, data, number, address, })) } #[cfg(feature = "async")] #[inline] async fn read_counted_string_async<R: AsyncRead + Unpin>(r: &mut R) -> Result<Vec<u8>, io::Error> { let len = read_short_async(r).await? as usize; let mut s = vec![0; len]; r.read_exact(&mut s).await?; Ok(s) } #[cfg(feature = "async")] #[inline] async fn read_short_async<R: AsyncRead + Unpin>(r: &mut R) -> Result<u16, io::Error> { let mut buffer = [0u8; 2]; r.read_exact(&mut buffer).await?; Ok(u16::from_be_bytes(buffer)) }
25.904412
100
0.596651
d97b9d44d445840d963bd16a07efd426d21a6691
2,446
use std::fmt; use std::hash::Hash; use std::collections::hash_set::{HashSet, Iter}; use std::collections::hash_map::{HashMap, Keys}; pub struct Graph<N> { nodes: HashMap<N, HashSet<N>> } enum Mark { InProgress, Done } pub type Nodes<'a, N> = Keys<'a, N, HashSet<N>>; pub type Edges<'a, N> = Iter<'a, N>; impl<N: Eq + Hash + Clone> Graph<N> { pub fn new() -> Graph<N> { Graph { nodes: HashMap::new() } } pub fn add(&mut self, node: N, children: &[N]) { self.nodes.entry(node) .or_insert_with(HashSet::new) .extend(children.iter().cloned()); } pub fn link(&mut self, node: N, child: N) { self.nodes.entry(node).or_insert_with(HashSet::new).insert(child); } pub fn get_nodes(&self) -> &HashMap<N, HashSet<N>> { &self.nodes } pub fn edges(&self, node: &N) -> Option<Edges<N>> { self.nodes.get(node).map(|set| set.iter()) } pub fn sort(&self) -> Option<Vec<N>> { let mut ret = Vec::new(); let mut marks = HashMap::new(); for node in self.nodes.keys() { self.visit(node, &mut ret, &mut marks); } Some(ret) } fn visit(&self, node: &N, dst: &mut Vec<N>, marks: &mut HashMap<N, Mark>) { if marks.contains_key(node) { return; } marks.insert(node.clone(), Mark::InProgress); for child in &self.nodes[node] { self.visit(child, dst, marks); } dst.push(node.clone()); marks.insert(node.clone(), Mark::Done); } pub fn iter(&self) -> Nodes<N> { self.nodes.keys() } } impl<N: Eq + Hash + Clone> Default for Graph<N> { fn default() -> Graph<N> { Graph::new() } } impl<N: fmt::Display + Eq + Hash> fmt::Debug for Graph<N> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { writeln!(fmt, "Graph {{")?; for (n, e) in &self.nodes { writeln!(fmt, " - {}", n)?; for n in e.iter() { writeln!(fmt, " - {}", n)?; } } write!(fmt, "}}")?; Ok(()) } } impl<N: Eq + Hash> PartialEq for Graph<N> { fn eq(&self, other: &Graph<N>) -> bool { self.nodes.eq(&other.nodes) } } impl<N: Eq + Hash> Eq for Graph<N> {} impl<N: Eq + Hash + Clone> Clone for Graph<N> { fn clone(&self) -> Graph<N> { Graph { nodes: self.nodes.clone() } } }
23.075472
79
0.51063
db7b7c9e17d002077171f2c1f0887598c15e8923
8,174
use glow::*; #[cfg(all(target_arch = "wasm32", feature = "stdweb"))] use std_web::{ traits::*, unstable::TryInto, web::{document, html_element::*}, }; #[cfg(all(target_arch = "wasm32", feature = "stdweb"))] use webgl_stdweb::WebGL2RenderingContext; fn main() { unsafe { // Create a context from a WebGL2 context on wasm32 targets #[cfg(all(target_arch = "wasm32", feature = "web-sys"))] let (_window, gl, _events_loop, render_loop, shader_version) = { use wasm_bindgen::JsCast; let canvas = web_sys::window() .unwrap() .document() .unwrap() .get_element_by_id("canvas") .unwrap() .dyn_into::<web_sys::HtmlCanvasElement>() .unwrap(); let webgl2_context = canvas .get_context("webgl2") .unwrap() .unwrap() .dyn_into::<web_sys::WebGl2RenderingContext>() .unwrap(); ( (), glow::Context::from_webgl2_context(webgl2_context), (), glow::RenderLoop::from_request_animation_frame(), "#version 300 es", ) }; #[cfg(all(target_arch = "wasm32", feature = "stdweb"))] let (_window, gl, _events_loop, render_loop, shader_version) = { let canvas: CanvasElement = document() .create_element("canvas") .unwrap() .try_into() .unwrap(); document().body().unwrap().append_child(&canvas); canvas.set_width(640); canvas.set_height(480); let webgl2_context: WebGL2RenderingContext = canvas.get_context().unwrap(); ( (), glow::Context::from_webgl2_context(webgl2_context), (), glow::RenderLoop::from_request_animation_frame(), "#version 300 es", ) }; // Create a context from a glutin window on non-wasm32 targets #[cfg(feature = "window-glutin")] let (gl, event_loop, windowed_context, shader_version) = { let el = glutin::event_loop::EventLoop::new(); let wb = glutin::window::WindowBuilder::new() .with_title("Hello triangle!") .with_inner_size(glutin::dpi::LogicalSize::new(1024.0, 768.0)); let windowed_context = glutin::ContextBuilder::new() .with_vsync(true) .build_windowed(wb, &el) .unwrap(); let windowed_context = windowed_context.make_current().unwrap(); let context = unsafe { glow::Context::from_loader_function(|s| { windowed_context.get_proc_address(s) as *const _ }) }; (context, el, windowed_context, "#version 410") }; // Create a context from a sdl2 window #[cfg(feature = "window-sdl2")] let (gl, mut events_loop, render_loop, shader_version, _gl_context) = { let sdl = sdl2::init().unwrap(); let video = sdl.video().unwrap(); let gl_attr = video.gl_attr(); gl_attr.set_context_profile(sdl2::video::GLProfile::Core); gl_attr.set_context_version(3, 0); let window = video .window("Hello triangle!", 1024, 769) .opengl() .resizable() .build() .unwrap(); let gl_context = window.gl_create_context().unwrap(); let context = unsafe { glow::Context::from_loader_function(|s| video.gl_get_proc_address(s) as *const _) }; let render_loop = glow::RenderLoop::<sdl2::video::Window>::from_sdl_window(window); let event_loop = sdl.event_pump().unwrap(); (context, event_loop, render_loop, "#version 410", gl_context) }; let vertex_array = gl .create_vertex_array() .expect("Cannot create vertex array"); gl.bind_vertex_array(Some(vertex_array)); let program = gl.create_program().expect("Cannot create program"); let (vertex_shader_source, fragment_shader_source) = ( r#"const vec2 verts[3] = vec2[3]( vec2(0.5f, 1.0f), vec2(0.0f, 0.0f), vec2(1.0f, 0.0f) ); out vec2 vert; void main() { vert = verts[gl_VertexID]; gl_Position = vec4(vert - 0.5, 0.0, 1.0); }"#, r#"precision mediump float; in vec2 vert; out vec4 color; void main() { color = vec4(vert, 0.5, 1.0); }"#, ); let shader_sources = [ (glow::VERTEX_SHADER, vertex_shader_source), (glow::FRAGMENT_SHADER, fragment_shader_source), ]; let mut shaders = Vec::with_capacity(shader_sources.len()); for (shader_type, shader_source) in shader_sources.iter() { let shader = gl .create_shader(*shader_type) .expect("Cannot create shader"); gl.shader_source(shader, &format!("{}\n{}", shader_version, shader_source)); gl.compile_shader(shader); if !gl.get_shader_compile_status(shader) { panic!(gl.get_shader_info_log(shader)); } gl.attach_shader(program, shader); shaders.push(shader); } gl.link_program(program); if !gl.get_program_link_status(program) { panic!(gl.get_program_info_log(program)); } for shader in shaders { gl.detach_shader(program, shader); gl.delete_shader(shader); } gl.use_program(Some(program)); gl.clear_color(0.1, 0.2, 0.3, 1.0); // We handle events differently between targets #[cfg(feature = "window-glutin")] { use glutin::event::{Event, WindowEvent}; use glutin::event_loop::ControlFlow; event_loop.run(move |event, _, control_flow| { *control_flow = ControlFlow::Wait; match event { Event::LoopDestroyed => { return; } Event::MainEventsCleared => { windowed_context.window().request_redraw(); } Event::RedrawRequested(_) => { gl.clear(glow::COLOR_BUFFER_BIT); gl.draw_arrays(glow::TRIANGLES, 0, 3); windowed_context.swap_buffers().unwrap(); } Event::WindowEvent { ref event, .. } => match event { WindowEvent::Resized(physical_size) => { windowed_context.resize(*physical_size); } WindowEvent::CloseRequested => { gl.delete_program(program); gl.delete_vertex_array(vertex_array); *control_flow = ControlFlow::Exit } _ => (), }, _ => (), } }); } #[cfg(not(feature = "window-glutin"))] render_loop.run(move |running: &mut bool| { #[cfg(feature = "window-sdl2")] { for event in events_loop.poll_iter() { match event { sdl2::event::Event::Quit { .. } => *running = false, _ => {} } } } gl.clear(glow::COLOR_BUFFER_BIT); gl.draw_arrays(glow::TRIANGLES, 0, 3); if !*running { gl.delete_program(program); gl.delete_vertex_array(vertex_array); } }); } }
36.491071
97
0.488255
d69f2c1b0e2d13f6d1973f9ceae4752b949853aa
5,019
use std::iter; use ra_syntax::{ ast::{ self, edit::{AstNodeEdit, IndentLevel}, make, }, AstNode, }; use crate::{ utils::{render_snippet, Cursor, TryEnum}, AssistContext, AssistId, AssistKind, Assists, }; // Assist: replace_unwrap_with_match // // Replaces `unwrap` a `match` expression. Works for Result and Option. // // ``` // enum Result<T, E> { Ok(T), Err(E) } // fn main() { // let x: Result<i32, i32> = Result::Ok(92); // let y = x.<|>unwrap(); // } // ``` // -> // ``` // enum Result<T, E> { Ok(T), Err(E) } // fn main() { // let x: Result<i32, i32> = Result::Ok(92); // let y = match x { // Ok(a) => a, // $0_ => unreachable!(), // }; // } // ``` pub(crate) fn replace_unwrap_with_match(acc: &mut Assists, ctx: &AssistContext) -> Option<()> { let method_call: ast::MethodCallExpr = ctx.find_node_at_offset()?; let name = method_call.name_ref()?; if name.text() != "unwrap" { return None; } let caller = method_call.expr()?; let ty = ctx.sema.type_of_expr(&caller)?; let happy_variant = TryEnum::from_ty(&ctx.sema, &ty)?.happy_case(); let target = method_call.syntax().text_range(); acc.add( AssistId("replace_unwrap_with_match", AssistKind::RefactorRewrite), "Replace unwrap with match", target, |builder| { let ok_path = make::path_unqualified(make::path_segment(make::name_ref(happy_variant))); let it = make::ident_pat(make::name("a")).into(); let ok_tuple = make::tuple_struct_pat(ok_path, iter::once(it)).into(); let bind_path = make::path_unqualified(make::path_segment(make::name_ref("a"))); let ok_arm = make::match_arm(iter::once(ok_tuple), make::expr_path(bind_path)); let unreachable_call = make::expr_unreachable(); let err_arm = make::match_arm(iter::once(make::wildcard_pat().into()), unreachable_call); let match_arm_list = make::match_arm_list(vec![ok_arm, err_arm]); let match_expr = make::expr_match(caller.clone(), match_arm_list) .indent(IndentLevel::from_node(method_call.syntax())); let range = method_call.syntax().text_range(); match ctx.config.snippet_cap { Some(cap) => { let err_arm = match_expr .syntax() .descendants() .filter_map(ast::MatchArm::cast) .last() .unwrap(); let snippet = render_snippet(cap, match_expr.syntax(), Cursor::Before(err_arm.syntax())); builder.replace_snippet(cap, range, snippet) } None => builder.replace(range, match_expr.to_string()), } }, ) } #[cfg(test)] mod tests { use crate::tests::{check_assist, check_assist_target}; use super::*; #[test] fn test_replace_result_unwrap_with_match() { check_assist( replace_unwrap_with_match, r" enum Result<T, E> { Ok(T), Err(E) } fn i<T>(a: T) -> T { a } fn main() { let x: Result<i32, i32> = Result::Ok(92); let y = i(x).<|>unwrap(); } ", r" enum Result<T, E> { Ok(T), Err(E) } fn i<T>(a: T) -> T { a } fn main() { let x: Result<i32, i32> = Result::Ok(92); let y = match i(x) { Ok(a) => a, $0_ => unreachable!(), }; } ", ) } #[test] fn test_replace_option_unwrap_with_match() { check_assist( replace_unwrap_with_match, r" enum Option<T> { Some(T), None } fn i<T>(a: T) -> T { a } fn main() { let x = Option::Some(92); let y = i(x).<|>unwrap(); } ", r" enum Option<T> { Some(T), None } fn i<T>(a: T) -> T { a } fn main() { let x = Option::Some(92); let y = match i(x) { Some(a) => a, $0_ => unreachable!(), }; } ", ); } #[test] fn test_replace_result_unwrap_with_match_chaining() { check_assist( replace_unwrap_with_match, r" enum Result<T, E> { Ok(T), Err(E) } fn i<T>(a: T) -> T { a } fn main() { let x: Result<i32, i32> = Result::Ok(92); let y = i(x).<|>unwrap().count_zeroes(); } ", r" enum Result<T, E> { Ok(T), Err(E) } fn i<T>(a: T) -> T { a } fn main() { let x: Result<i32, i32> = Result::Ok(92); let y = match i(x) { Ok(a) => a, $0_ => unreachable!(), }.count_zeroes(); } ", ) } #[test] fn replace_unwrap_with_match_target() { check_assist_target( replace_unwrap_with_match, r" enum Option<T> { Some(T), None } fn i<T>(a: T) -> T { a } fn main() { let x = Option::Some(92); let y = i(x).<|>unwrap(); } ", r"i(x).unwrap()", ); } }
26.696809
100
0.508468
01f8fcaf798f416b2e65811d4bfa36b54908b751
13,233
// Copyright 2020-2021 The Datafuse Authors. // // SPDX-License-Identifier: Apache-2.0. use std::sync::Arc; use async_raft::RaftMetrics; use async_raft::State; use pretty_assertions::assert_eq; use tokio::sync::watch::Receiver; use crate::meta_service::ClientRequest; use crate::meta_service::ClientResponse; use crate::meta_service::Cmd; use crate::meta_service::GetReq; use crate::meta_service::MemStoreStateMachine; use crate::meta_service::MetaNode; use crate::meta_service::MetaServiceClient; use crate::meta_service::NodeId; use crate::meta_service::RaftTxId; use crate::tests::Seq; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_state_machine_apply_add() -> anyhow::Result<()> { crate::tests::init_tracing(); let mut sm = MemStoreStateMachine::default(); let cases: Vec<( &str, Option<RaftTxId>, &str, &str, Option<String>, Option<String> )> = vec![ ( "add on none", Some(RaftTxId::new("foo", 1)), "k1", "v1", None, Some("v1".to_string()) ), ( "add on existent", Some(RaftTxId::new("foo", 2)), "k1", "v2", Some("v1".to_string()), None ), ( "dup set with same serial, even with diff key, got the previous result", Some(RaftTxId::new("foo", 2)), "k2", "v3", Some("v1".to_string()), None ), ( "diff client, same serial", Some(RaftTxId::new("bar", 2)), "k2", "v3", None, Some("v3".to_string()) ), ("no txid", None, "k3", "v4", None, Some("v4".to_string())), ]; for (name, txid, k, v, want_prev, want_result) in cases.iter() { let resp = sm.apply(5, &ClientRequest { txid: txid.clone(), cmd: Cmd::AddFile { key: k.to_string(), value: v.to_string() } }); assert_eq!( ClientResponse::String { prev: want_prev.clone(), result: want_result.clone() }, resp.unwrap(), "{}", name ); } Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_state_machine_apply_set() -> anyhow::Result<()> { crate::tests::init_tracing(); let mut sm = MemStoreStateMachine::default(); let cases: Vec<( &str, Option<RaftTxId>, &str, &str, Option<String>, Option<String> )> = vec![ ( "set on none", Some(RaftTxId::new("foo", 1)), "k1", "v1", None, Some("v1".to_string()) ), ( "set on existent", Some(RaftTxId::new("foo", 2)), "k1", "v2", Some("v1".to_string()), Some("v2".to_string()) ), ( "dup set with same serial, even with diff key, got the previous result", Some(RaftTxId::new("foo", 2)), "k2", "v3", Some("v1".to_string()), Some("v2".to_string()) ), ( "diff client, same serial", Some(RaftTxId::new("bar", 2)), "k2", "v3", None, Some("v3".to_string()) ), ( "no txid", None, "k2", "v4", Some("v3".to_string()), Some("v4".to_string()) ), ]; for (name, txid, k, v, want_prev, want_result) in cases.iter() { let resp = sm.apply(5, &ClientRequest { txid: txid.clone(), cmd: Cmd::SetFile { key: k.to_string(), value: v.to_string() } }); assert_eq!( ClientResponse::String { prev: want_prev.clone(), result: want_result.clone() }, resp.unwrap(), "{}", name ); } Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_meta_node_boot() -> anyhow::Result<()> { crate::tests::init_tracing(); // - Start a single node meta service cluster. // - Test the single node is recorded by this cluster. let addr = new_addr(); let resp = MetaNode::boot(0, addr.clone()).await; assert!(resp.is_ok()); let mn = resp.unwrap(); let got = mn.get_node(&0).await; assert_eq!(addr, got.unwrap().address); mn.stop().await?; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_meta_node_graceful_shutdown() -> anyhow::Result<()> { // - Start a leader then shutdown. crate::tests::init_tracing(); let (_nid0, mn0) = setup_leader().await?; let mut rx0 = mn0.raft.metrics(); let joined = mn0.stop().await?; assert_eq!(3, joined); // tx closed: loop { let r = rx0.changed().await; if r.is_err() { tracing::info!("done!!!"); break; } tracing::info!("st: {:?}", rx0.borrow()); } assert!(rx0.changed().await.is_err()); Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_meta_node_sync_to_non_voter() -> anyhow::Result<()> { // - Start a leader and a non-voter; // - Write to leader, check on non-voter. crate::tests::init_tracing(); let (_nid0, mn0) = setup_leader().await?; let (_nid1, mn1) = setup_non_voter(mn0.clone(), 1, &new_addr()).await?; assert_write_synced(vec![mn0.clone(), mn1.clone()], "metakey2").await?; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_meta_node_3_members() -> anyhow::Result<()> { // - Start a cluster with 3 replicas // - Write to leader, check data is replicated. // crate::tests::init_tracing(); // // let (_nid0, mn0) = setup_leader().await?; // let (_nid1, mn1) = setup_non_voter(mn0.clone(), 1, "127.0.0.1:19007").await?; // // assert_write_synced(vec![mn0.clone(), mn1.clone()], "metakey2").await?; Ok(()) } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_meta_node_restart() -> anyhow::Result<()> { // TODO check restarted follower. // Start a leader and a non-voter; // Restart them. // Check old data an new written data. crate::tests::init_tracing(); let (_nid0, mn0) = setup_leader().await?; let (_nid1, mn1) = setup_non_voter(mn0.clone(), 1, &new_addr()).await?; let sto0 = mn0.sto.clone(); let sto1 = mn1.sto.clone(); let meta_nodes = vec![mn0.clone(), mn1.clone()]; assert_write_synced(meta_nodes.clone(), "key1").await?; // stop tracing::info!("shutting down all"); let n = mn0.stop().await?; assert_eq!(3, n); let n = mn1.stop().await?; assert_eq!(3, n); tracing::info!("restart all"); // restart let mn0 = MetaNode::builder().node_id(0).sto(sto0).build().await?; let mut rx0 = mn0.raft.metrics(); let mn1 = MetaNode::builder().node_id(1).sto(sto1).build().await?; let mut rx1 = mn1.raft.metrics(); let meta_nodes = vec![mn0.clone(), mn1.clone()]; wait_for("n0 -> leader", &mut rx0, |x| x.state == State::Leader).await; wait_for("n1 -> non-voter", &mut rx1, |x| x.state == State::NonVoter).await; wait_for("n1.current_leader -> 0", &mut rx1, |x| { x.current_leader == Some(0) }) .await; assert_write_synced(meta_nodes.clone(), "key2").await?; // check old data assert_get_file(meta_nodes, "key1", "key1").await?; Ok(()) } async fn setup_leader() -> anyhow::Result<(NodeId, Arc<MetaNode>)> { // Setup a cluster in which there is a leader and a non-voter. // asserts states are consistent // node-0: voter, becomes leader. let nid0 = 0; let addr0 = new_addr(); // boot up a single-node cluster let mn0 = MetaNode::boot(nid0, addr0.clone()).await?; let mut rx0 = mn0.raft.metrics(); { // ensure n0 is ready assert_connection(&addr0).await?; // assert that boot() adds the node to meta. let got = mn0.get_node(&nid0).await; assert_eq!(addr0, got.unwrap().address, "nid0 is added"); wait_for("n0 -> leader", &mut rx0, |x| x.state == State::Leader).await; wait_for("n0.current_leader -> 0", &mut rx0, |x| { x.current_leader == Some(0) }) .await; } Ok((nid0, mn0)) } async fn setup_non_voter( leader: Arc<MetaNode>, id: NodeId, addr: &str ) -> anyhow::Result<(NodeId, Arc<MetaNode>)> { let mn = MetaNode::boot_non_voter(id, addr).await?; let mut rx = mn.raft.metrics(); { // add node-1 to cluster as non-voter let resp = leader.add_node(id, addr.to_string()).await; match resp.unwrap() { ClientResponse::Node { prev: _, result } => { assert_eq!(addr.to_string(), result.unwrap().address); } _ => { panic!("expect node") } } } { // ensure the MetaNode is ready assert_connection(addr).await?; wait_for(&format!("n{} -> non-voter", id), &mut rx, |x| { x.state == State::NonVoter }) .await; wait_for(&format!("n{}.current_leader -> 0", id), &mut rx, |x| { x.current_leader == Some(0) }) .await; } Ok((id, mn)) } async fn assert_write_synced(meta_nodes: Vec<Arc<MetaNode>>, key: &str) -> anyhow::Result<()> { let leader = meta_nodes[0].clone(); let last_applied = leader.raft.metrics().borrow().last_applied; tracing::info!("leader: last_applied={}", last_applied); { // write a 2nd key to leader leader .write_to_local_leader(ClientRequest { txid: None, cmd: Cmd::SetFile { key: key.to_string(), value: key.to_string() } }) .await?; } assert_applied_index(meta_nodes.clone(), last_applied + 1).await?; assert_get_file(meta_nodes.clone(), key, key).await?; Ok(()) } async fn assert_applied_index(meta_nodes: Vec<Arc<MetaNode>>, at_least: u64) -> anyhow::Result<()> { // wait for nodes for applied index to be upto date: applied >= at_least. for i in 0..meta_nodes.len() { let mn = meta_nodes[i].clone(); // raft.metrics is the status of the cluster, not the status about a node. // E.g., if leader applied 4th log, the next append_entry request updates the applied index to 4 on a follower or non-voter, // no matter whether it actually applied the 4th log. // Thus we check the applied_rx, which is the actually applied index. wait_for_applied( &format!("n{}", i,), &mut mn.sto.applied_rx.clone(), at_least ) .await; } Ok(()) } async fn assert_get_file( meta_nodes: Vec<Arc<MetaNode>>, key: &str, value: &str ) -> anyhow::Result<()> { for i in 0..meta_nodes.len() { let mn = meta_nodes[i].clone(); let got = mn.get_file(key).await; assert_eq!(value.to_string(), got.unwrap(), "n{} applied value", i); } Ok(()) } async fn assert_connection(addr: &str) -> anyhow::Result<()> { tokio::time::sleep(tokio::time::Duration::from_millis(200)).await; let mut client = MetaServiceClient::connect(format!("http://{}", addr)).await?; let req = tonic::Request::new(GetReq { key: "foo".into() }); let rst = client.get(req).await?.into_inner(); assert_eq!("", rst.value, "connected"); Ok(()) } // wait for raft metrics to a state that satisfies `func`. #[tracing::instrument(level = "info", skip(func, mrx))] async fn wait_for<T>(msg: &str, mrx: &mut Receiver<RaftMetrics>, func: T) -> RaftMetrics where T: Fn(&RaftMetrics) -> bool { loop { let latest = mrx.borrow().clone(); tracing::info!("start wait for {:} metrics: {:?}", msg, latest); if func(&latest) { tracing::info!("done wait for {:} metrics: {:?}", msg, latest); return latest; } let changed = mrx.changed().await; assert!(changed.is_ok()); } } // wait for the applied index to be >= `at_least`. #[tracing::instrument(level = "info", skip(rx))] async fn wait_for_applied(msg: &str, rx: &mut Receiver<u64>, at_least: u64) -> u64 { loop { let latest = rx.borrow().clone(); tracing::info!("start wait for {:} latest: {:?}", msg, latest); if latest >= at_least { tracing::info!("done wait for {:} latest: {:?}", msg, latest); return latest; } let changed = rx.changed().await; assert!(changed.is_ok()); } } fn new_addr() -> String { let addr = format!("127.0.0.1:{}", 19000 + *Seq::default()); tracing::info!("new_addr: {}", addr); addr }
28.275641
132
0.536008
113883bdde13af546a897832eaae4f4627c91b25
334
mod caps; mod cargo_target_spec; mod conv; mod main_loop; mod markdown; mod project_model; mod vfs_filter; pub mod req; pub mod init; mod server_world; pub type Result<T> = ::std::result::Result<T, ::failure::Error>; pub use crate::{caps::server_capabilities, main_loop::main_loop, main_loop::LspError, init::InitializationOptions};
23.857143
115
0.766467
e987639b2252e12e055078c09144b627336eb8d9
2,206
//! Function to initialize a new project. use core::errors::Result; use core::{Handle, RelativePath}; const EXAMPLE: &'static [u8] = include_bytes!("example.reproto"); pub fn initialize(handle: &Handle) -> Result<()> { let mut path = RelativePath::new("proto"); let manifest = RelativePath::new("reproto.toml"); let mut with_output = true; let mut maven = false; let mut swift = false; let package = vec!["io", "reproto", "example"]; // looks like a maven project if handle.is_file(RelativePath::new("pom.xml")) { with_output = false; maven = true; } // looks like a swift project if handle.is_file(RelativePath::new("Package.swift")) { with_output = false; swift = true; } if !handle.is_file(manifest) { info!("Writing Manifest: {}", manifest.display()); let mut manifest = handle.create(manifest)?; if with_output { writeln!(manifest, "paths = [")?; writeln!(manifest, " \"{}\"", path.display())?; writeln!(manifest, "]")?; writeln!(manifest, "output = \"target\"")?; } if maven { writeln!(manifest, "[presets.maven]")?; path = RelativePath::new("src/main/reproto"); } if swift { writeln!(manifest, "[presets.swift]")?; } writeln!(manifest, "")?; writeln!(manifest, "[packages]")?; writeln!( manifest, "# File: {}/{}.reproto", path.display(), package.join("/") )?; writeln!(manifest, "\"{}\" = \"*\"", package.join("."))?; } let example = package .iter() .cloned() .fold(path.to_owned(), |p, part| p.join(part)) .with_extension("reproto"); if let Some(parent) = example.parent() { if !handle.is_dir(parent) { info!("Creating: {}", parent.display()); handle.create_dir_all(parent)?; } } if !handle.is_file(&example) { info!("Writing: {}", example.display()); let mut example = handle.create(&example)?; example.write_all(EXAMPLE)?; } Ok(()) }
26.578313
65
0.526745
0ac6153a8f7764ac5844fad43c74476999210245
63,177
//---------------------------------------------------------------------------// // Copyright (c) 2017-2020 Ismael Gutiérrez González. All rights reserved. // // This file is part of the Rusted PackFile Manager (RPFM) project, // which can be found here: https://github.com/Frodo45127/rpfm. // // This file is licensed under the MIT license, which can be found here: // https://github.com/Frodo45127/rpfm/blob/master/LICENSE. //---------------------------------------------------------------------------// /*! Module with the background loop. Basically, this does the heavy load of the program. !*/ use open::that_in_background; use rayon::prelude::*; use uuid::Uuid; use std::collections::BTreeMap; use std::env::temp_dir; use std::fs::File; use std::io::{BufWriter, Read, Write}; use std::path::PathBuf; use std::thread; use rpfm_error::{Error, ErrorKind}; use rpfm_lib::assembly_kit::*; use rpfm_lib::common::*; use rpfm_lib::diagnostics::Diagnostics; use rpfm_lib::dependencies::Dependencies; use rpfm_lib::GAME_SELECTED; use rpfm_lib::packfile::PFHFileType; use rpfm_lib::packedfile::*; use rpfm_lib::packedfile::animpack::AnimPack; use rpfm_lib::packedfile::table::db::DB; use rpfm_lib::packedfile::table::loc::{Loc, TSV_NAME_LOC}; use rpfm_lib::packedfile::text::{Text, TextType}; use rpfm_lib::packfile::{PackFile, PackFileInfo, packedfile::{PackedFile, PackedFileInfo}, PathType, PFHFlags, RESERVED_NAME_NOTES}; use rpfm_lib::schema::*; use rpfm_lib::SCHEMA; use rpfm_lib::SETTINGS; use rpfm_lib::SUPPORTED_GAMES; use rpfm_lib::template::Template; use crate::app_ui::NewPackedFile; use crate::CENTRAL_COMMAND; use crate::communications::{Command, Notification, Response, THREADS_COMMUNICATION_ERROR}; use crate::locale::{tr, tre}; use crate::RPFM_PATH; use crate::views::table::TableType; /// This is the background loop that's going to be executed in a parallel thread to the UI. No UI or "Unsafe" stuff here. /// /// All communication between this and the UI thread is done use the `CENTRAL_COMMAND` static. pub fn background_loop() { //---------------------------------------------------------------------------------------// // Initializing stuff... //---------------------------------------------------------------------------------------// // We need two PackFiles: // - `pack_file_decoded`: This one will hold our opened PackFile. // - `pack_files_decoded_extra`: This one will hold the PackFiles opened for the `add_from_packfile` feature, using their paths as keys. let mut pack_file_decoded = PackFile::new(); let mut pack_files_decoded_extra = BTreeMap::new(); let mut dependencies = Dependencies::default(); //---------------------------------------------------------------------------------------// // Looping forever and ever... //---------------------------------------------------------------------------------------// loop { // Wait until you get something through the channel. This hangs the thread until we got something, // so it doesn't use processing power until we send it a message. let response = CENTRAL_COMMAND.recv_message_rust(); match response { // In case we want to reset the PackFile to his original state (dummy)... Command::ResetPackFile => pack_file_decoded = PackFile::new(), // In case we want to remove a Secondary Packfile from memory... Command::RemovePackFileExtra(path) => { pack_files_decoded_extra.remove(&path); }, // In case we want to create a "New PackFile"... Command::NewPackFile => { let game_selected = GAME_SELECTED.read().unwrap(); let pack_version = SUPPORTED_GAMES.get(&**game_selected).unwrap().pfh_version[0]; pack_file_decoded = PackFile::new_with_name("unknown.pack", pack_version); if let Ok(version_number) = get_game_selected_exe_version_number() { pack_file_decoded.set_game_version(version_number); } } // In case we want to "Open one or more PackFiles"... Command::OpenPackFiles(paths) => { match PackFile::open_packfiles(&paths, SETTINGS.read().unwrap().settings_bool["use_lazy_loading"], false, false) { Ok(pack_file) => { pack_file_decoded = pack_file; // Force decoding of table/locs, so they're in memory for the diagnostics to work. if let Some(ref schema) = *SCHEMA.read().unwrap() { let mut packed_files = pack_file_decoded.get_ref_mut_packed_files_by_types(&[PackedFileType::DB, PackedFileType::Loc], false); packed_files.par_iter_mut().for_each(|x| { let _ = x.decode_no_locks(schema); }); } CENTRAL_COMMAND.send_message_rust(Response::PackFileInfo(PackFileInfo::from(&pack_file_decoded))); } Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } // In case we want to "Open an Extra PackFile" (for "Add from PackFile")... Command::OpenPackFileExtra(path) => { match pack_files_decoded_extra.get(&path) { Some(pack_file) => CENTRAL_COMMAND.send_message_rust(Response::PackFileInfo(PackFileInfo::from(pack_file))), None => match PackFile::open_packfiles(&[path.to_path_buf()], true, false, true) { Ok(pack_file) => { CENTRAL_COMMAND.send_message_rust(Response::PackFileInfo(PackFileInfo::from(&pack_file))); pack_files_decoded_extra.insert(path.to_path_buf(), pack_file); } Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } } // In case we want to "Load All CA PackFiles"... Command::LoadAllCAPackFiles => { match PackFile::open_all_ca_packfiles() { Ok(pack_file) => { pack_file_decoded = pack_file; CENTRAL_COMMAND.send_message_rust(Response::PackFileInfo(PackFileInfo::from(&pack_file_decoded))); } Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } // In case we want to "Save a PackFile"... Command::SavePackFile => { match pack_file_decoded.save(None) { Ok(_) => CENTRAL_COMMAND.send_message_rust(Response::PackFileInfo(From::from(&pack_file_decoded))), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(Error::from(ErrorKind::SavePackFileGeneric(error.to_string())))), } } // In case we want to "Save a PackFile As"... Command::SavePackFileAs(path) => { match pack_file_decoded.save(Some(path.to_path_buf())) { Ok(_) => CENTRAL_COMMAND.send_message_rust(Response::PackFileInfo(From::from(&pack_file_decoded))), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(Error::from(ErrorKind::SavePackFileGeneric(error.to_string())))), } } // In case we want to change the current settings... Command::SetSettings(settings) => { *SETTINGS.write().unwrap() = settings; match SETTINGS.read().unwrap().save() { Ok(()) => CENTRAL_COMMAND.send_message_rust(Response::Success), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } // In case we want to change the current shortcuts... Command::SetShortcuts(shortcuts) => { match shortcuts.save() { Ok(()) => CENTRAL_COMMAND.send_message_rust(Response::Success), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } // In case we want to get the data of a PackFile needed to form the TreeView... Command::GetPackFileDataForTreeView => { // Get the name and the PackedFile list, and send it. CENTRAL_COMMAND.send_message_rust(Response::PackFileInfoVecPackedFileInfo(( From::from(&pack_file_decoded), pack_file_decoded.get_packed_files_all_info(), ))); } // In case we want to get the data of a Secondary PackFile needed to form the TreeView... Command::GetPackFileExtraDataForTreeView(path) => { // Get the name and the PackedFile list, and serialize it. match pack_files_decoded_extra.get(&path) { Some(pack_file) => CENTRAL_COMMAND.send_message_rust(Response::PackFileInfoVecPackedFileInfo(( From::from(pack_file), pack_file.get_packed_files_all_info(), ))), None => CENTRAL_COMMAND.send_message_rust(Response::Error(ErrorKind::CannotFindExtraPackFile(path).into())), } } // In case we want to get the info of one PackedFile from the TreeView. Command::GetPackedFileInfo(path) => { CENTRAL_COMMAND.send_message_rust(Response::OptionPackedFileInfo( pack_file_decoded.get_packed_file_info_by_path(&path) )); } // In case we want to get the info of more than one PackedFiles from the TreeView. Command::GetPackedFilesInfo(paths) => { CENTRAL_COMMAND.send_message_rust(Response::VecOptionPackedFileInfo( paths.iter().map(|x| pack_file_decoded.get_packed_file_info_by_path(x)).collect() )); } // In case we want to launch a global search on a `PackFile`... Command::GlobalSearch(mut global_search) => { global_search.search(&mut pack_file_decoded); let packed_files_info = global_search.get_results_packed_file_info(&mut pack_file_decoded); CENTRAL_COMMAND.send_message_global_search_update_to_qt((global_search, packed_files_info)); } // In case we want to update the results of a global search on a `PackFile`... Command::GlobalSearchUpdate(mut global_search, path_types) => { global_search.update(&mut pack_file_decoded, &path_types); let packed_files_info = global_search.get_update_paths_packed_file_info(&mut pack_file_decoded, &path_types); CENTRAL_COMMAND.send_message_global_search_update_to_qt((global_search, packed_files_info)); } // In case we want to change the current `Game Selected`... Command::SetGameSelected(game_selected) => { *GAME_SELECTED.write().unwrap() = game_selected.to_owned(); // Try to load the Schema for this game but, before it, PURGE THE DAMN SCHEMA-RELATED CACHE AND REBUIILD IT AFTERWARDS. pack_file_decoded.get_ref_mut_packed_files_by_type(PackedFileType::DB, false).par_iter_mut().for_each(|x| { let _ = x.encode_and_clean_cache(); }); *SCHEMA.write().unwrap() = Schema::load(&SUPPORTED_GAMES.get(&*game_selected).unwrap().schema).ok(); if let Some(ref schema) = *SCHEMA.read().unwrap() { pack_file_decoded.get_ref_mut_packed_files_by_type(PackedFileType::DB, false).par_iter_mut().for_each(|x| { let _ = x.decode_no_locks(&schema); }); } // Send a response, so we can unlock the UI. CENTRAL_COMMAND.send_message_rust(Response::Success); // If there is a PackFile open, change his id to match the one of the new `Game Selected`. if !pack_file_decoded.get_file_name().is_empty() { pack_file_decoded.set_pfh_version(SUPPORTED_GAMES.get(&**GAME_SELECTED.read().unwrap()).unwrap().pfh_version[0]); if let Ok(version_number) = get_game_selected_exe_version_number() { pack_file_decoded.set_game_version(version_number); } } } // In case we want to generate a new Pak File for our Game Selected... Command::GeneratePakFile(path, version) => { match generate_pak_file(&path, version, &dependencies) { Ok(_) => CENTRAL_COMMAND.send_message_rust(Response::Success), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } // Reload the `fake dependency_database` for that game. *dependencies.get_ref_mut_fake_dependency_database() = DB::read_pak_file(); } // In case we want to update the Schema for our Game Selected... Command::UpdateCurrentSchemaFromAssKit(path) => { match update_schema_from_raw_files(path, &dependencies) { Ok(_) => CENTRAL_COMMAND.send_message_rust(Response::Success), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } // In case we want to optimize our PackFile... Command::OptimizePackFile => { CENTRAL_COMMAND.send_message_rust(Response::VecVecString(pack_file_decoded.optimize(&dependencies))); } // In case we want to Patch the SiegeAI of a PackFile... Command::PatchSiegeAI => { match pack_file_decoded.patch_siege_ai() { Ok(result) => CENTRAL_COMMAND.send_message_rust(Response::StringVecVecString(result)), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)) } } // In case we want to change the PackFile's Type... Command::SetPackFileType(new_type) => pack_file_decoded.set_pfh_file_type(new_type), // In case we want to change the "Include Last Modified Date" setting of the PackFile... Command::ChangeIndexIncludesTimestamp(state) => pack_file_decoded.get_ref_mut_bitmask().set(PFHFlags::HAS_INDEX_WITH_TIMESTAMPS, state), // In case we want to compress/decompress the PackedFiles of the currently open PackFile... Command::ChangeDataIsCompressed(state) => pack_file_decoded.toggle_compression(state), // In case we want to get the path of the currently open `PackFile`. Command::GetPackFilePath => CENTRAL_COMMAND.send_message_rust(Response::PathBuf(pack_file_decoded.get_file_path().to_path_buf())), // In case we want to get the Dependency PackFiles of our PackFile... Command::GetDependencyPackFilesList => CENTRAL_COMMAND.send_message_rust(Response::VecString(pack_file_decoded.get_packfiles_list().to_vec())), // In case we want to set the Dependency PackFiles of our PackFile... Command::SetDependencyPackFilesList(pack_files) => pack_file_decoded.set_packfiles_list(&pack_files), // In case we want to check if there is a Dependency Database loaded... Command::IsThereADependencyDatabase => CENTRAL_COMMAND.send_message_rust(Response::Bool(!dependencies.get_ref_dependency_database().is_empty())), // In case we want to check if there is a Schema loaded... Command::IsThereASchema => CENTRAL_COMMAND.send_message_rust(Response::Bool(SCHEMA.read().unwrap().is_some())), // In case we want to create a PackedFile from scratch... Command::NewPackedFile(path, new_packed_file) => { if let Some(ref schema) = *SCHEMA.read().unwrap() { let decoded = match new_packed_file { NewPackedFile::AnimPack(_) => { let packed_file = AnimPack::new(); DecodedPackedFile::AnimPack(packed_file) }, NewPackedFile::DB(_, table, version) => { match schema.get_ref_versioned_file_db(&table) { Ok(versioned_file) => { match versioned_file.get_version(version) { Ok(definition) => DecodedPackedFile::DB(DB::new(&table, None, definition)), Err(error) => { CENTRAL_COMMAND.send_message_rust(Response::Error(error)); continue; } } } Err(error) => { CENTRAL_COMMAND.send_message_rust(Response::Error(error)); continue; } } }, NewPackedFile::Loc(_) => { match schema.get_ref_last_definition_loc() { Ok(definition) => DecodedPackedFile::Loc(Loc::new(definition)), Err(error) => { CENTRAL_COMMAND.send_message_rust(Response::Error(error)); continue; } } } NewPackedFile::Text(_, text_type) => { let mut packed_file = Text::new(); packed_file.set_text_type(text_type); DecodedPackedFile::Text(packed_file) }, }; let packed_file = PackedFile::new_from_decoded(&decoded, &path); match pack_file_decoded.add_packed_file(&packed_file, false) { Ok(_) => CENTRAL_COMMAND.send_message_rust(Response::Success), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } else { CENTRAL_COMMAND.send_message_rust(Response::Error(ErrorKind::SchemaNotFound.into())); } } // When we want to add one or more PackedFiles to our PackFile. Command::AddPackedFiles((source_paths, destination_paths, paths_to_ignore)) => { let mut added_paths = vec![]; let mut it_broke = None; for (source_path, destination_path) in source_paths.iter().zip(destination_paths.iter()) { // Skip ignored paths. if let Some(ref paths_to_ignore) = paths_to_ignore { if paths_to_ignore.iter().any(|x| source_path.starts_with(x)) { continue; } } match pack_file_decoded.add_from_file(source_path, destination_path.to_vec(), true) { Ok(path) => added_paths.push(PathType::File(path.to_vec())), Err(error) => it_broke = Some(error), } } if let Some(error) = it_broke { CENTRAL_COMMAND.send_message_rust(Response::VecPathType(added_paths)); CENTRAL_COMMAND.send_message_rust(Response::Error(error)); } else { CENTRAL_COMMAND.send_message_rust(Response::VecPathType(added_paths)); CENTRAL_COMMAND.send_message_rust(Response::Success); } } // In case we want to add one or more entire folders to our PackFile... Command::AddPackedFilesFromFolder(paths, paths_to_ignore) => { match pack_file_decoded.add_from_folders(&paths, &paths_to_ignore, true) { Ok(paths) => CENTRAL_COMMAND.send_message_rust(Response::VecPathType(paths.iter().map(|x| PathType::File(x.to_vec())).collect())), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } // In case we want to move stuff from one PackFile to another... Command::AddPackedFilesFromPackFile((pack_file_path, paths)) => { match pack_files_decoded_extra.get(&pack_file_path) { // Try to add the PackedFile to the main PackFile. Some(pack_file) => match pack_file_decoded.add_from_packfile(&pack_file, &paths, true) { Ok(paths) => CENTRAL_COMMAND.send_message_rust(Response::VecPathType(paths)), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } None => CENTRAL_COMMAND.send_message_rust(Response::Error(ErrorKind::CannotFindExtraPackFile(pack_file_path).into())), } } // In case we want to move stuff from our PackFile to an Animpack... Command::AddPackedFilesFromPackFileToAnimpack((anim_pack_path, paths)) => { let packed_files_to_add = pack_file_decoded.get_packed_files_by_path_type(&paths); match pack_file_decoded.get_ref_mut_packed_file_by_path(&anim_pack_path) { Some(packed_file) => { let packed_file_decoded = packed_file.get_ref_mut_decoded(); match packed_file_decoded { DecodedPackedFile::AnimPack(anim_pack) => match anim_pack.add_packed_files(&packed_files_to_add) { Ok(paths) => CENTRAL_COMMAND.send_message_rust(Response::VecPathType(paths)), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } _ => CENTRAL_COMMAND.send_message_rust(Response::Error(ErrorKind::PackedFileTypeIsNotWhatWeExpected(PackedFileType::AnimPack.to_string(), PackedFileType::from(&*packed_file_decoded).to_string()).into())), } } None => CENTRAL_COMMAND.send_message_rust(Response::Error(ErrorKind::PackedFileNotFound.into())), } } // In case we want to move stuff from an Animpack to our PackFile... Command::AddPackedFilesFromAnimpack((anim_pack_path, paths)) => { let packed_files_to_add = match pack_file_decoded.get_ref_packed_file_by_path(&anim_pack_path) { Some(ref packed_file) => { let packed_file_decoded = packed_file.get_ref_decoded(); match packed_file_decoded { DecodedPackedFile::AnimPack(anim_pack) => anim_pack.get_anim_packed_as_packed_files(&paths), _ => { CENTRAL_COMMAND.send_message_rust(Response::Error(ErrorKind::PackedFileTypeIsNotWhatWeExpected(PackedFileType::AnimPack.to_string(), PackedFileType::from(&*packed_file_decoded).to_string()).into())); continue; } } } None => { CENTRAL_COMMAND.send_message_rust(Response::Error(ErrorKind::PackedFileNotFound.into())); continue; }, }; let packed_files_to_add = packed_files_to_add.iter().collect::<Vec<&PackedFile>>(); match pack_file_decoded.add_packed_files(&packed_files_to_add, true) { Ok(paths) => CENTRAL_COMMAND.send_message_rust(Response::VecPathType(paths.iter().map(|x| PathType::File(x.to_vec())).collect())), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } // In case we want to delete files from an Animpack... Command::DeleteFromAnimpack((anim_pack_path, paths)) => { match pack_file_decoded.get_ref_mut_packed_file_by_path(&anim_pack_path) { Some(packed_file) => { let packed_file_decoded = packed_file.get_ref_mut_decoded(); match packed_file_decoded { DecodedPackedFile::AnimPack(anim_pack) => { anim_pack.remove_packed_file_by_path_types(&paths); CENTRAL_COMMAND.send_message_rust(Response::Success); } _ => CENTRAL_COMMAND.send_message_rust(Response::Error(ErrorKind::PackedFileTypeIsNotWhatWeExpected(PackedFileType::AnimPack.to_string(), PackedFileType::from(&*packed_file_decoded).to_string()).into())), } } None => CENTRAL_COMMAND.send_message_rust(Response::Error(ErrorKind::PackedFileNotFound.into())), } } // In case we want to decode a RigidModel PackedFile... Command::DecodePackedFile(path) => { if path == [RESERVED_NAME_NOTES.to_owned()] { let mut note = Text::new(); note.set_text_type(TextType::Markdown); match pack_file_decoded.get_notes() { Some(notes) => { note.set_contents(notes); CENTRAL_COMMAND.send_message_rust(Response::Text(note)); } None => CENTRAL_COMMAND.send_message_rust(Response::Text(note)), } } else { // Find the PackedFile we want and send back the response. match pack_file_decoded.get_ref_mut_packed_file_by_path(&path) { Some(ref mut packed_file) => { match packed_file.decode_return_ref() { Ok(packed_file_data) => { match packed_file_data { DecodedPackedFile::AnimFragment(data) => CENTRAL_COMMAND.send_message_rust(Response::AnimFragmentPackedFileInfo((data.clone(), From::from(&**packed_file)))), DecodedPackedFile::AnimPack(data) => CENTRAL_COMMAND.send_message_rust(Response::AnimPackPackedFileInfo((data.get_as_pack_file_info(&path), From::from(&**packed_file)))), DecodedPackedFile::AnimTable(data) => CENTRAL_COMMAND.send_message_rust(Response::AnimTablePackedFileInfo((data.clone(), From::from(&**packed_file)))), DecodedPackedFile::CaVp8(data) => CENTRAL_COMMAND.send_message_rust(Response::CaVp8PackedFileInfo((data.clone(), From::from(&**packed_file)))), DecodedPackedFile::DB(table) => CENTRAL_COMMAND.send_message_rust(Response::DBPackedFileInfo((table.clone(), From::from(&**packed_file)))), DecodedPackedFile::Image(image) => CENTRAL_COMMAND.send_message_rust(Response::ImagePackedFileInfo((image.clone(), From::from(&**packed_file)))), DecodedPackedFile::Loc(table) => CENTRAL_COMMAND.send_message_rust(Response::LocPackedFileInfo((table.clone(), From::from(&**packed_file)))), DecodedPackedFile::MatchedCombat(data) => CENTRAL_COMMAND.send_message_rust(Response::MatchedCombatPackedFileInfo((data.clone(), From::from(&**packed_file)))), DecodedPackedFile::RigidModel(rigid_model) => CENTRAL_COMMAND.send_message_rust(Response::RigidModelPackedFileInfo((rigid_model.clone(), From::from(&**packed_file)))), DecodedPackedFile::Text(text) => CENTRAL_COMMAND.send_message_rust(Response::TextPackedFileInfo((text.clone(), From::from(&**packed_file)))), _ => CENTRAL_COMMAND.send_message_rust(Response::Unknown), } } Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } None => CENTRAL_COMMAND.send_message_rust(Response::Error(Error::from(ErrorKind::PackedFileNotFound))), } } } // When we want to save a PackedFile from the view.... Command::SavePackedFileFromView(path, decoded_packed_file) => { if path == [RESERVED_NAME_NOTES.to_owned()] { if let DecodedPackedFile::Text(data) = decoded_packed_file { let note = if data.get_ref_contents().is_empty() { None } else { Some(data.get_ref_contents().to_owned()) }; pack_file_decoded.set_notes(&note); } } else if let Some(packed_file) = pack_file_decoded.get_ref_mut_packed_file_by_path(&path) { *packed_file.get_ref_mut_decoded() = decoded_packed_file; } CENTRAL_COMMAND.send_message_save_packedfile(Response::Success); } // In case we want to delete PackedFiles from a PackFile... Command::DeletePackedFiles(item_types) => { CENTRAL_COMMAND.send_message_rust(Response::VecPathType(pack_file_decoded.remove_packed_files_by_type(&item_types))); } // In case we want to extract PackedFiles from a PackFile... Command::ExtractPackedFiles(item_types, path) => { match pack_file_decoded.extract_packed_files_by_type(&item_types, &path) { Ok(result) => CENTRAL_COMMAND.send_message_rust(Response::String(tre("files_extracted_success", &[&result.to_string()]))), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } // In case we want to rename one or more PackedFiles... Command::RenamePackedFiles(renaming_data) => { CENTRAL_COMMAND.send_message_rust(Response::VecPathTypeVecString(pack_file_decoded.rename_packedfiles(&renaming_data, false))); } // In case we want to Mass-Import TSV Files... Command::MassImportTSV(paths, name) => { match pack_file_decoded.mass_import_tsv(&paths, name, true) { Ok(result) => CENTRAL_COMMAND.send_message_rust(Response::VecVecStringVecVecString(result)), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } // In case we want to Mass-Export TSV Files... Command::MassExportTSV(path_types, path) => { match pack_file_decoded.mass_export_tsv(&path_types, &path) { Ok(result) => CENTRAL_COMMAND.send_message_rust(Response::String(result)), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } // In case we want to know if a Folder exists, knowing his path... Command::FolderExists(path) => { CENTRAL_COMMAND.send_message_rust(Response::Bool(pack_file_decoded.folder_exists(&path))); } // In case we want to know if PackedFile exists, knowing his path... Command::PackedFileExists(path) => { CENTRAL_COMMAND.send_message_rust(Response::Bool(pack_file_decoded.packedfile_exists(&path))); } // In case we want to get the list of tables in the dependency database... Command::GetTableListFromDependencyPackFile => { let tables = (*dependencies.get_ref_dependency_database()).par_iter().filter(|x| x.get_path().len() > 2).filter(|x| x.get_path()[1].ends_with("_tables")).map(|x| x.get_path()[1].to_owned()).collect::<Vec<String>>(); CENTRAL_COMMAND.send_message_rust(Response::VecString(tables)); } // In case we want to get the version of an specific table from the dependency database... Command::GetTableVersionFromDependencyPackFile(table_name) => { if let Some(ref schema) = *SCHEMA.read().unwrap() { match schema.get_ref_last_definition_db(&table_name, &dependencies) { Ok(definition) => CENTRAL_COMMAND.send_message_rust(Response::I32(definition.get_version())), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } else { CENTRAL_COMMAND.send_message_rust(Response::Error(ErrorKind::SchemaNotFound.into())); } } // In case we want to merge DB or Loc Tables from a PackFile... Command::MergeTables(paths, name, delete_source_files) => { match pack_file_decoded.merge_tables(&paths, &name, delete_source_files) { Ok(data) => CENTRAL_COMMAND.send_message_rust(Response::VecString(data)), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } // In case we want to update a table... Command::UpdateTable(path_type) => { if let PathType::File(path) = path_type { if let Some(packed_file) = pack_file_decoded.get_ref_mut_packed_file_by_path(&path) { match packed_file.decode_return_ref_mut() { Ok(packed_file) => match packed_file.update_table(&dependencies) { Ok(data) => CENTRAL_COMMAND.send_message_rust(Response::I32I32(data)), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } else { CENTRAL_COMMAND.send_message_rust(Response::Error(ErrorKind::PackedFileNotFound.into())); } } else { CENTRAL_COMMAND.send_message_rust(Response::Error(ErrorKind::PackedFileNotFound.into())); } } // In case we want to replace all matches in a Global Search... Command::GlobalSearchReplaceMatches(mut global_search, matches) => { let _ = global_search.replace_matches(&mut pack_file_decoded, &matches); let packed_files_info = global_search.get_results_packed_file_info(&mut pack_file_decoded); CENTRAL_COMMAND.send_message_rust(Response::GlobalSearchVecPackedFileInfo((global_search, packed_files_info))); } // In case we want to replace all matches in a Global Search... Command::GlobalSearchReplaceAll(mut global_search) => { let _ = global_search.replace_all(&mut pack_file_decoded); let packed_files_info = global_search.get_results_packed_file_info(&mut pack_file_decoded); CENTRAL_COMMAND.send_message_rust(Response::GlobalSearchVecPackedFileInfo((global_search, packed_files_info))); } // In case we want to get the reference data for a definition... Command::GetReferenceDataFromDefinition(table_name, definition, files_to_ignore) => { let dependency_data = DB::get_dependency_data( &pack_file_decoded, &table_name, &definition, &dependencies, &files_to_ignore, ); CENTRAL_COMMAND.send_message_rust(Response::BTreeMapI32DependencyData(dependency_data)); } // In case we want to return an entire PackedFile to the UI. Command::GetPackedFile(path) => CENTRAL_COMMAND.send_message_rust(Response::OptionPackedFile(pack_file_decoded.get_packed_file_by_path(&path))), // In case we want to change the format of a ca_vp8 video... Command::SetCaVp8Format((path, format)) => { match pack_file_decoded.get_ref_mut_packed_file_by_path(&path) { Some(ref mut packed_file) => { match packed_file.decode_return_ref_mut() { Ok(data) => { if let DecodedPackedFile::CaVp8(ref mut data) = data { data.set_format(format); } // TODO: Put an error here. } Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } None => CENTRAL_COMMAND.send_message_rust(Response::Error(Error::from(ErrorKind::PackedFileNotFound))), } }, // In case we want to save an schema to disk... Command::SaveSchema(mut schema) => { match schema.save(&SUPPORTED_GAMES.get(&**GAME_SELECTED.read().unwrap()).unwrap().schema) { Ok(_) => { *SCHEMA.write().unwrap() = Some(schema); CENTRAL_COMMAND.send_message_rust(Response::Success); }, Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } // In case we want to clean the cache of one or more PackedFiles... Command::CleanCache(paths) => { let mut packed_files = pack_file_decoded.get_ref_mut_packed_files_by_paths(paths.iter().map(|x| x.as_ref()).collect::<Vec<&[String]>>()); packed_files.iter_mut().for_each(|x| { let _ = x.encode_and_clean_cache(); }); } // In case we want to export a PackedFile as a TSV file... Command::ExportTSV((internal_path, external_path)) => { match pack_file_decoded.get_ref_mut_packed_file_by_path(&internal_path) { Some(packed_file) => match packed_file.get_decoded() { DecodedPackedFile::DB(data) => match data.export_tsv(&external_path, &internal_path[1]) { Ok(_) => CENTRAL_COMMAND.send_message_rust(Response::Success), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), }, DecodedPackedFile::Loc(data) => match data.export_tsv(&external_path, &TSV_NAME_LOC) { Ok(_) => CENTRAL_COMMAND.send_message_rust(Response::Success), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), }, /* DecodedPackedFile::DependencyPackFileList(data) => match data.export_tsv(&[external_path]) { Ok(_) => CENTRAL_COMMAND.send_message_rust(Response::Success), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), },*/ _ => unimplemented!() } None => CENTRAL_COMMAND.send_message_rust(Response::Error(ErrorKind::PackedFileNotFound.into())), } } // In case we want to import a TSV as a PackedFile... Command::ImportTSV((internal_path, external_path)) => { match pack_file_decoded.get_ref_mut_packed_file_by_path(&internal_path) { Some(packed_file) => match packed_file.get_decoded() { DecodedPackedFile::DB(data) => match DB::import_tsv(&data.get_definition(), &external_path, &internal_path[1]) { Ok(data) => CENTRAL_COMMAND.send_message_rust(Response::TableType(TableType::DB(data))), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), }, DecodedPackedFile::Loc(data) => match Loc::import_tsv(&data.get_definition(), &external_path, &TSV_NAME_LOC) { Ok(data) => CENTRAL_COMMAND.send_message_rust(Response::TableType(TableType::Loc(data))), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), }, /* DecodedPackedFile::DependencyPackFileList(data) => match data.export_tsv(&[external_path]) { Ok(_) => CENTRAL_COMMAND.send_message_rust(Response::Success), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), },*/ _ => unimplemented!() } None => CENTRAL_COMMAND.send_message_rust(Response::Error(ErrorKind::PackedFileNotFound.into())), } } // In case we want to open a PackFile's location in the file manager... Command::OpenContainingFolder => { // If the path exists, try to open it. If not, throw an error. if pack_file_decoded.get_file_path().exists() { let mut temp_path = pack_file_decoded.get_file_path().to_path_buf(); temp_path.pop(); if open::that(&temp_path).is_err() { CENTRAL_COMMAND.send_message_rust(Response::Error(ErrorKind::PackFileIsNotAFile.into())); } else { CENTRAL_COMMAND.send_message_rust(Response::Success); } } else { CENTRAL_COMMAND.send_message_rust(Response::Error(ErrorKind::PackFileIsNotAFile.into())); } }, // When we want to open a PackedFile in a external program... Command::OpenPackedFileInExternalProgram(path) => { match pack_file_decoded.get_ref_mut_packed_file_by_path(&path) { Some(packed_file) => { let extension = path.last().unwrap().rsplitn(2, '.').next().unwrap(); let name = format!("{}.{}", Uuid::new_v4(), extension); let mut temporal_file_path = temp_dir(); temporal_file_path.push(name); match packed_file.get_packed_file_type_by_path() { // Tables we extract them as TSV. PackedFileType::DB => { match packed_file.decode_return_clean_cache() { Ok(data) => { if let DecodedPackedFile::DB(data) = data { temporal_file_path.set_extension("tsv"); match data.export_tsv(&temporal_file_path, &path[1]) { Ok(_) => { that_in_background(&temporal_file_path); CENTRAL_COMMAND.send_message_rust(Response::PathBuf(temporal_file_path)); } Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } }, Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } }, PackedFileType::Loc => { match packed_file.decode_return_clean_cache() { Ok(data) => { if let DecodedPackedFile::Loc(data) = data { temporal_file_path.set_extension("tsv"); match data.export_tsv(&temporal_file_path, &TSV_NAME_LOC) { Ok(_) => { that_in_background(&temporal_file_path); CENTRAL_COMMAND.send_message_rust(Response::PathBuf(temporal_file_path)); } Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } }, Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } }, // The rest of the files, we extract them as we have them. _ => { match packed_file.get_raw_data_and_clean_cache() { Ok(data) => { match File::create(&temporal_file_path) { Ok(mut file) => { if file.write_all(&data).is_ok() { that_in_background(&temporal_file_path); CENTRAL_COMMAND.send_message_rust(Response::PathBuf(temporal_file_path)); } else { CENTRAL_COMMAND.send_message_rust(Response::Error(Error::from(ErrorKind::IOGenericWrite(vec![temporal_file_path.display().to_string();1])))); } } Err(_) => CENTRAL_COMMAND.send_message_rust(Response::Error(Error::from(ErrorKind::IOGenericWrite(vec![temporal_file_path.display().to_string();1])))), } } Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } } } None => CENTRAL_COMMAND.send_message_rust(Response::Error(ErrorKind::PackedFileNotFound.into())), } } // When we want to save a PackedFile from the external view.... Command::SavePackedFileFromExternalView((path, external_path)) => { match pack_file_decoded.get_ref_mut_packed_file_by_path(&path) { Some(packed_file) => { match packed_file.get_packed_file_type_by_path() { // Tables we extract them as TSV. PackedFileType::DB | PackedFileType::Loc => { match packed_file.decode_return_ref_mut() { Ok(data) => { if let DecodedPackedFile::DB(ref mut data) = data { match DB::import_tsv(&data.get_definition(), &external_path, &path[1]) { Ok(new_data) => { *data = new_data; match packed_file.encode_and_clean_cache() { Ok(_) => CENTRAL_COMMAND.send_message_save_packedfile(Response::Success), Err(error) => CENTRAL_COMMAND.send_message_save_packedfile(Response::Error(error)), } } Err(error) => CENTRAL_COMMAND.send_message_save_packedfile(Response::Error(error)), } } else if let DecodedPackedFile::Loc(ref mut data) = data { match Loc::import_tsv(&data.get_definition(), &external_path, &TSV_NAME_LOC) { Ok(new_data) => { *data = new_data; match packed_file.encode_and_clean_cache() { Ok(_) => CENTRAL_COMMAND.send_message_save_packedfile(Response::Success), Err(error) => CENTRAL_COMMAND.send_message_save_packedfile(Response::Error(error)), } } Err(error) => CENTRAL_COMMAND.send_message_save_packedfile(Response::Error(error)), } } else { unimplemented!() } }, Err(error) => CENTRAL_COMMAND.send_message_save_packedfile(Response::Error(error)), } }, _ => { match File::open(external_path) { Ok(mut file) => { let mut data = vec![]; match file.read_to_end(&mut data) { Ok(_) => { packed_file.set_raw_data(&data); CENTRAL_COMMAND.send_message_save_packedfile(Response::Success); } Err(_) => CENTRAL_COMMAND.send_message_save_packedfile(Response::Error(ErrorKind::IOGeneric.into())), } } Err(_) => CENTRAL_COMMAND.send_message_save_packedfile(Response::Error(ErrorKind::IOGeneric.into())), } } } } None => CENTRAL_COMMAND.send_message_save_packedfile(Response::Error(ErrorKind::PackedFileNotFound.into())), } } // When we want to apply a template over the open PackFile... Command::ApplyTemplate(mut template, options, params, is_custom) => { match template.apply_template(&options, &params, &mut pack_file_decoded, &dependencies, is_custom) { Ok(result) => CENTRAL_COMMAND.send_message_rust(Response::VecVecString(result)), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } // When we want to apply a template over the open PackFile... Command::SaveTemplate(mut template) => { match template.save_from_packfile(&mut pack_file_decoded) { Ok(_) => CENTRAL_COMMAND.send_message_rust(Response::Success), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } // When we want to update the templates.. Command::UpdateTemplates => { match Template::update() { Ok(_) => CENTRAL_COMMAND.send_message_rust(Response::Success), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } // When we want to update our schemas... Command::UpdateSchemas => { match Schema::update_schema_repo() { // If it worked, we have to update the currently open schema with the one we just downloaded and rebuild cache/dependencies with it. Ok(_) => { CENTRAL_COMMAND.send_message_rust(Response::Success); let game_selected = GAME_SELECTED.read().unwrap().to_owned(); pack_file_decoded.get_ref_mut_packed_files_by_type(PackedFileType::DB, false).par_iter_mut().for_each(|x| { let _ = x.encode_and_clean_cache(); }); *SCHEMA.write().unwrap() = Schema::load(&SUPPORTED_GAMES.get(&*game_selected).unwrap().schema).ok(); if let Some(ref schema) = *SCHEMA.read().unwrap() { pack_file_decoded.get_ref_mut_packed_files_by_type(PackedFileType::DB, false).par_iter_mut().for_each(|x| { let _ = x.decode_no_locks(&schema); }); } dependencies.rebuild(pack_file_decoded.get_packfiles_list()); }, Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } // When we want to update our program... Command::UpdateMainProgram => { match rpfm_lib::updater::update_main_program() { Ok(_) => CENTRAL_COMMAND.send_message_rust(Response::Success), Err(error) => CENTRAL_COMMAND.send_message_rust(Response::Error(error)), } } // When we want to update our program... Command::TriggerBackupAutosave => { match get_oldest_file_in_folder(&get_backup_autosave_path().unwrap()) { Ok(file) => match file { Some(file) => { match pack_file_decoded.clone().save(Some(file)) { Ok(_) => CENTRAL_COMMAND.send_message_notification_to_qt(Notification::Done), Err(error) => CENTRAL_COMMAND.send_message_notification_to_qt(Notification::Error(Error::from(ErrorKind::SavePackFileGeneric(error.to_string())))), } } None => CENTRAL_COMMAND.send_message_notification_to_qt(Notification::Error(Error::from(ErrorKind::SavePackFileGeneric("No autosave files found.".to_owned())))), } Err(_) => CENTRAL_COMMAND.send_message_notification_to_qt(Notification::Error(Error::from(ErrorKind::SavePackFileGeneric("No autosave files found.".to_string())))), } } // In case we want to "Open one or more PackFiles"... Command::DiagnosticsCheck => { thread::spawn(clone!( mut dependencies, mut pack_file_decoded => move || { let mut diag = Diagnostics::default(); if pack_file_decoded.get_pfh_file_type() == PFHFileType::Mod || pack_file_decoded.get_pfh_file_type() == PFHFileType::Movie { diag.check(&pack_file_decoded, &dependencies); } CENTRAL_COMMAND.send_message_diagnostics_to_qt(diag); })); } // In case we want to "Open one or more PackFiles"... Command::DiagnosticsUpdate((mut diagnostics, path_types)) => { diagnostics.update(&pack_file_decoded, &path_types, &dependencies); let packed_files_info = diagnostics.get_update_paths_packed_file_info(&pack_file_decoded, &path_types); CENTRAL_COMMAND.send_message_diagnostics_update_to_qt((diagnostics, packed_files_info)); } // In case we want to get the open PackFile's Settings... Command::GetPackFileSettings => { CENTRAL_COMMAND.send_message_rust(Response::PackFileSettings(pack_file_decoded.get_settings().clone())); } Command::SetPackFileSettings(settings) => { pack_file_decoded.set_settings(&settings); } Command::GetDefinitionList => { let tables = pack_file_decoded.get_ref_packed_files_by_types(&[PackedFileType::DB, PackedFileType::Loc], false); let definitions = tables.iter().filter_map(|x| x.get_decoded_from_memory().ok()).filter_map(|y| match y { DecodedPackedFile::DB(table) => Some((table.get_table_name(), table.get_definition())), DecodedPackedFile::Loc(table) => Some(("loc".to_string(), table.get_definition())), _ => None, }).collect::<Vec<(String, Definition)>>(); CENTRAL_COMMAND.send_message_rust(Response::VecStringDefinition(definitions)); } Command::GetMissingDefinitions => { // Test to see if every DB Table can be decoded. This is slow and only useful when // a new patch lands and you want to know what tables you need to decode. So, unless you want // to decode new tables, leave the setting as false. if SETTINGS.read().unwrap().settings_bool["check_for_missing_table_definitions"] { let mut counter = 0; let mut table_list = String::new(); if let Some(ref schema) = *SCHEMA.read().unwrap() { for packed_file in pack_file_decoded.get_ref_mut_packed_files_by_type(PackedFileType::DB, false) { if packed_file.decode_return_ref_no_locks(schema).is_err() { if let Ok(raw_data) = packed_file.get_raw_data() { if let Ok((_, _, _, entry_count, _)) = DB::read_header(&raw_data) { if entry_count > 0 { counter += 1; table_list.push_str(&format!("{}, {:?}\n", counter, packed_file.get_path())) } } } } } } // Try to save the file. let path = RPFM_PATH.to_path_buf().join(PathBuf::from("missing_table_definitions.txt")); let mut file = BufWriter::new(File::create(path).unwrap()); file.write_all(table_list.as_bytes()).unwrap(); } } Command::RebuildDependencies => dependencies.rebuild(pack_file_decoded.get_packfiles_list()), Command::CascadeEdition(editions) => { let edited_paths = DB::cascade_edition(&editions, &mut pack_file_decoded); let edited_paths_2 = edited_paths.iter().map(|x| &**x).collect::<Vec<&[String]>>(); let packed_files_info = pack_file_decoded.get_ref_packed_files_by_paths(edited_paths_2).iter().map(|x| PackedFileInfo::from(*x)).collect::<Vec<PackedFileInfo>>(); CENTRAL_COMMAND.send_message_rust(Response::VecVecStringVecPackedFileInfo(edited_paths, packed_files_info)); } Command::GoToDefinition(ref_table, ref_column, ref_data) => { let packed_files = pack_file_decoded.get_ref_packed_files_by_path_start(&["db".to_owned(), ref_table + "_tables"]); let mut found = false; for packed_file in &packed_files { if let Ok(DecodedPackedFile::DB(data)) = packed_file.get_decoded_from_memory() { if let Some((column_index, row_index)) = data.get_ref_table().get_source_location_of_reference_data(&ref_column, &ref_data) { CENTRAL_COMMAND.send_message_rust(Response::VecStringUsizeUsize(packed_file.get_path().to_vec(), column_index, row_index)); found = true; break; } } } if !found { CENTRAL_COMMAND.send_message_rust(Response::Error(ErrorKind::GeneticHTMLError(tr("source_data_for_field_not_found")).into())); } }, Command::GoToLoc(loc_key) => { let packed_files = pack_file_decoded.get_ref_packed_files_by_type(PackedFileType::Loc, false); let mut found = false; for packed_file in &packed_files { if let Ok(DecodedPackedFile::Loc(data)) = packed_file.get_decoded_from_memory() { if let Some((column_index, row_index)) = data.get_ref_table().get_source_location_of_reference_data("key", &loc_key) { CENTRAL_COMMAND.send_message_rust(Response::VecStringUsizeUsize(packed_file.get_path().to_vec(), column_index, row_index)); found = true; break; } } } if !found { CENTRAL_COMMAND.send_message_rust(Response::Error(ErrorKind::GeneticHTMLError(tr("loc_key_not_found")).into())); } }, Command::GetSourceDataFromLocKey(loc_key) => CENTRAL_COMMAND.send_message_rust(Response::OptionStringStringString(Loc::get_source_location_of_loc_key(&loc_key, &dependencies))), // These two belong to the network thread, not to this one!!!! Command::CheckUpdates | Command::CheckSchemaUpdates | Command::CheckTemplateUpdates => panic!("{}{:?}", THREADS_COMMUNICATION_ERROR, response), } } }
59.265478
232
0.533343
1d87702231d3e8834c56a4df5d2c0e911ed5b8b5
332
// usage: cargo run --release --example ns_btreemap_string use std::collections::BTreeMap; fn main() -> Result<(), fasteval::Error> { let mut map : BTreeMap<String,f64> = BTreeMap::new(); map.insert("x".to_string(), 2.0); let val = fasteval::ez_eval("x * (x + 1)", &mut map)?; assert_eq!(val, 6.0); Ok(()) }
25.538462
59
0.599398
2237663b539df0e29b75bab595794b55348999cf
1,441
use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::path::{Path, PathBuf}; /// TODO(doc): @doitian #[derive(Clone, Debug, Default, Serialize, Deserialize)] pub struct Config { /// TODO(doc): @doitian #[serde(default)] pub path: PathBuf, /// The capacity of RocksDB cache, which caches uncompressed data blocks, indexes and filters, default is 128MB #[serde(default)] pub cache_size: Option<usize>, /// TODO(doc): @doitian #[serde(default)] pub options: HashMap<String, String>, /// TODO(doc): @doitian pub options_file: Option<PathBuf>, } impl Config { /// TODO(doc): @doitian pub fn adjust<P: AsRef<Path>>(&mut self, root_dir: &Path, data_dir: P, name: &str) { // If path is not set, use the default path if self.path.to_str().is_none() || self.path.to_str() == Some("") { self.path = data_dir.as_ref().to_path_buf().join(name); } else if self.path.is_relative() { // If the path is relative, set the base path to `ckb.toml` self.path = root_dir.to_path_buf().join(&self.path) } // If options file is a relative path, set the base path to `ckb.toml` if let Some(file) = self.options_file.iter_mut().next() { if file.is_relative() { let file_new = root_dir.to_path_buf().join(&file); *file = file_new; } } } }
36.025
115
0.600278
14b53520e3df7b72ace986c261775b8f196f28aa
3,315
//! where it all starts use std::env; use std::fs; use crate::{engine, floor::Floor, profile::Profile, starter, ui, Player}; /// This is exposed to the [`Player`](crate::player::Player) to get things /// started. Their profile is loaded (from .profile) and then the /// [`engine`](crate::engine) is fired up. If the current level is /// completed successfully, then the README.md file and their profile are /// updated. pub struct Game { pub profile: Profile, } impl Default for Game { fn default() -> Game { // TODO: epic mode? let profile = load_profile(); Game { profile } } } impl Game { pub fn new() -> Game { Game::default() } /// The main entry point when playing the game. /// /// After loading the player profile and initializing the current /// level, the game consists of repeatedly calling `play_turn` /// on the player's `Player` instance. pub fn play(player_generator: fn() -> Box<dyn Player + Send + Sync>) { let mut game = Game::new(); game.start(player_generator); } fn start(&mut self, player_generator: fn() -> Box<dyn Player + Send + Sync>) { let level; if self.profile.maximus_oxidus { println!("Now that you have earned the title Maximus Oxidus, you may choose to hone your skills on any level."); level = ui::select_level(); starter::write_readme(&self.profile, level, None); println!("See (updated) README.md for level {} instructions.", level); } else { level = self.profile.level; } println!("Starting Level {}", level); let floor = Floor::load(level); match engine::start( self.profile.name.clone(), self.profile.level, floor, player_generator, ) { Ok(_) => { self.level_completed(); } Err(err) => { println!("{}", err); } } } fn level_completed(&mut self) { // TODO: tally points if self.profile.maximus_oxidus { println!("Success! You have found the stairs."); } else if Floor::exists(self.profile.level + 1) { println!("Success! You have found the stairs."); if env::var("NO_PROMPT").is_ok() { return; } if ui::ask("Would you like to continue on to the next level?") { self.profile.increment_level(); starter::write_readme(&self.profile, self.profile.level, None); starter::write_profile(&self.profile, None); println!("See (updated) README.md for your next instructions."); } else { // TODO: "Try to earn more points next time." println!("Staying on current level."); } } else { println!("CONGRATULATIONS! You have climbed to the top of the tower and have earned the title Maximus Oxidus."); self.profile.maximus_oxidus = true; starter::write_profile(&self.profile, None); } } } fn load_profile() -> Profile { let contents = fs::read_to_string(".profile").expect("error loading .profile"); Profile::from_toml(&contents) }
33.826531
124
0.565008
89f774fc0d77be5c36f547f21dddeb92aefab420
2,697
use error_chain::error_chain; error_chain! { types { CommitError, CommitErrorKind, ResultExt, Result; } links { CommsError(super::CommsError, super::CommsErrorKind); ValidationError(super::ValidationError, super::ValidationErrorKind); TransformError(super::TransformError, super::TransformErrorKind); LintError(super::LintError, super::LintErrorKind); TimeError(super::TimeError, super::TimeErrorKind); SinkError(super::SinkError, super::SinkErrorKind); SerializationError(super::SerializationError, super::SerializationErrorKind); } foreign_links { IO(::tokio::io::Error); } errors { Aborted { description("the transaction aborted before it could be completed"), display("the transaction aborted before it could be completed"), } Timeout(elapsed: String) { description("the transaction has timed out"), display("the transaction has timed out after {}", elapsed), } ReadOnly { description("the chain of trust is currently read only"), display("the chain of trust is currently read only") } LockError(err: super::CommsErrorKind) { description("failed to lock the data due to an error in communication"), display("failed to lock the data due to an error in communication - {}", err.to_string()), } NewRootsAreDisabled { description("new root objects are currently not allowed for this chain"), display("new root objects are currently not allowed for this chain"), } PipeError(err: String) { description("failed to commit the data due to an error receiving the result in the interprocess pipe"), display("failed to commit the data due to an error receiving the result in the interprocess pipe - {}", err.to_string()), } RootError(err: String) { description("failed to commit the data due to an error at the root server while processing the events"), display("failed to commit the data due to an error at the root server while processing the events - {}", err.to_string()), } } } impl<T> From<tokio::sync::mpsc::error::SendError<T>> for CommitError { fn from(err: tokio::sync::mpsc::error::SendError<T>) -> CommitError { CommitErrorKind::PipeError(err.to_string()).into() } } impl<T> From<tokio::sync::broadcast::error::SendError<T>> for CommitError { fn from(err: tokio::sync::broadcast::error::SendError<T>) -> CommitError { CommitErrorKind::PipeError(err.to_string()).into() } }
43.5
134
0.644049
f4f65481a8b325909881d914f1db5c9f0af03091
151,131
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateScheduleOutput { /// <p>The name of the schedule that was updated.</p> pub name: std::option::Option<std::string::String>, } impl std::fmt::Debug for UpdateScheduleOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateScheduleOutput"); formatter.field("name", &self.name); formatter.finish() } } /// See [`UpdateScheduleOutput`](crate::output::UpdateScheduleOutput) pub mod update_schedule_output { /// A builder for [`UpdateScheduleOutput`](crate::output::UpdateScheduleOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) name: std::option::Option<std::string::String>, } impl Builder { /// <p>The name of the schedule that was updated.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// Consumes the builder and constructs a [`UpdateScheduleOutput`](crate::output::UpdateScheduleOutput) pub fn build(self) -> crate::output::UpdateScheduleOutput { crate::output::UpdateScheduleOutput { name: self.name } } } } impl UpdateScheduleOutput { /// Creates a new builder-style object to manufacture [`UpdateScheduleOutput`](crate::output::UpdateScheduleOutput) pub fn builder() -> crate::output::update_schedule_output::Builder { crate::output::update_schedule_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateRecipeJobOutput { /// <p>The name of the job that you updated.</p> pub name: std::option::Option<std::string::String>, } impl std::fmt::Debug for UpdateRecipeJobOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateRecipeJobOutput"); formatter.field("name", &self.name); formatter.finish() } } /// See [`UpdateRecipeJobOutput`](crate::output::UpdateRecipeJobOutput) pub mod update_recipe_job_output { /// A builder for [`UpdateRecipeJobOutput`](crate::output::UpdateRecipeJobOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) name: std::option::Option<std::string::String>, } impl Builder { /// <p>The name of the job that you updated.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// Consumes the builder and constructs a [`UpdateRecipeJobOutput`](crate::output::UpdateRecipeJobOutput) pub fn build(self) -> crate::output::UpdateRecipeJobOutput { crate::output::UpdateRecipeJobOutput { name: self.name } } } } impl UpdateRecipeJobOutput { /// Creates a new builder-style object to manufacture [`UpdateRecipeJobOutput`](crate::output::UpdateRecipeJobOutput) pub fn builder() -> crate::output::update_recipe_job_output::Builder { crate::output::update_recipe_job_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateRecipeOutput { /// <p>The name of the recipe that was updated.</p> pub name: std::option::Option<std::string::String>, } impl std::fmt::Debug for UpdateRecipeOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateRecipeOutput"); formatter.field("name", &self.name); formatter.finish() } } /// See [`UpdateRecipeOutput`](crate::output::UpdateRecipeOutput) pub mod update_recipe_output { /// A builder for [`UpdateRecipeOutput`](crate::output::UpdateRecipeOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) name: std::option::Option<std::string::String>, } impl Builder { /// <p>The name of the recipe that was updated.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// Consumes the builder and constructs a [`UpdateRecipeOutput`](crate::output::UpdateRecipeOutput) pub fn build(self) -> crate::output::UpdateRecipeOutput { crate::output::UpdateRecipeOutput { name: self.name } } } } impl UpdateRecipeOutput { /// Creates a new builder-style object to manufacture [`UpdateRecipeOutput`](crate::output::UpdateRecipeOutput) pub fn builder() -> crate::output::update_recipe_output::Builder { crate::output::update_recipe_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateProjectOutput { /// <p>The date and time that the project was last modified.</p> pub last_modified_date: std::option::Option<smithy_types::Instant>, /// <p>The name of the project that you updated.</p> pub name: std::option::Option<std::string::String>, } impl std::fmt::Debug for UpdateProjectOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateProjectOutput"); formatter.field("last_modified_date", &self.last_modified_date); formatter.field("name", &self.name); formatter.finish() } } /// See [`UpdateProjectOutput`](crate::output::UpdateProjectOutput) pub mod update_project_output { /// A builder for [`UpdateProjectOutput`](crate::output::UpdateProjectOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) last_modified_date: std::option::Option<smithy_types::Instant>, pub(crate) name: std::option::Option<std::string::String>, } impl Builder { /// <p>The date and time that the project was last modified.</p> pub fn last_modified_date(mut self, input: smithy_types::Instant) -> Self { self.last_modified_date = Some(input); self } pub fn set_last_modified_date( mut self, input: std::option::Option<smithy_types::Instant>, ) -> Self { self.last_modified_date = input; self } /// <p>The name of the project that you updated.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// Consumes the builder and constructs a [`UpdateProjectOutput`](crate::output::UpdateProjectOutput) pub fn build(self) -> crate::output::UpdateProjectOutput { crate::output::UpdateProjectOutput { last_modified_date: self.last_modified_date, name: self.name, } } } } impl UpdateProjectOutput { /// Creates a new builder-style object to manufacture [`UpdateProjectOutput`](crate::output::UpdateProjectOutput) pub fn builder() -> crate::output::update_project_output::Builder { crate::output::update_project_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateProfileJobOutput { /// <p>The name of the job that was updated.</p> pub name: std::option::Option<std::string::String>, } impl std::fmt::Debug for UpdateProfileJobOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateProfileJobOutput"); formatter.field("name", &self.name); formatter.finish() } } /// See [`UpdateProfileJobOutput`](crate::output::UpdateProfileJobOutput) pub mod update_profile_job_output { /// A builder for [`UpdateProfileJobOutput`](crate::output::UpdateProfileJobOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) name: std::option::Option<std::string::String>, } impl Builder { /// <p>The name of the job that was updated.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// Consumes the builder and constructs a [`UpdateProfileJobOutput`](crate::output::UpdateProfileJobOutput) pub fn build(self) -> crate::output::UpdateProfileJobOutput { crate::output::UpdateProfileJobOutput { name: self.name } } } } impl UpdateProfileJobOutput { /// Creates a new builder-style object to manufacture [`UpdateProfileJobOutput`](crate::output::UpdateProfileJobOutput) pub fn builder() -> crate::output::update_profile_job_output::Builder { crate::output::update_profile_job_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateDatasetOutput { /// <p>The name of the dataset that you updated.</p> pub name: std::option::Option<std::string::String>, } impl std::fmt::Debug for UpdateDatasetOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateDatasetOutput"); formatter.field("name", &self.name); formatter.finish() } } /// See [`UpdateDatasetOutput`](crate::output::UpdateDatasetOutput) pub mod update_dataset_output { /// A builder for [`UpdateDatasetOutput`](crate::output::UpdateDatasetOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) name: std::option::Option<std::string::String>, } impl Builder { /// <p>The name of the dataset that you updated.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// Consumes the builder and constructs a [`UpdateDatasetOutput`](crate::output::UpdateDatasetOutput) pub fn build(self) -> crate::output::UpdateDatasetOutput { crate::output::UpdateDatasetOutput { name: self.name } } } } impl UpdateDatasetOutput { /// Creates a new builder-style object to manufacture [`UpdateDatasetOutput`](crate::output::UpdateDatasetOutput) pub fn builder() -> crate::output::update_dataset_output::Builder { crate::output::update_dataset_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UntagResourceOutput {} impl std::fmt::Debug for UntagResourceOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UntagResourceOutput"); formatter.finish() } } /// See [`UntagResourceOutput`](crate::output::UntagResourceOutput) pub mod untag_resource_output { /// A builder for [`UntagResourceOutput`](crate::output::UntagResourceOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder {} impl Builder { /// Consumes the builder and constructs a [`UntagResourceOutput`](crate::output::UntagResourceOutput) pub fn build(self) -> crate::output::UntagResourceOutput { crate::output::UntagResourceOutput {} } } } impl UntagResourceOutput { /// Creates a new builder-style object to manufacture [`UntagResourceOutput`](crate::output::UntagResourceOutput) pub fn builder() -> crate::output::untag_resource_output::Builder { crate::output::untag_resource_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct TagResourceOutput {} impl std::fmt::Debug for TagResourceOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("TagResourceOutput"); formatter.finish() } } /// See [`TagResourceOutput`](crate::output::TagResourceOutput) pub mod tag_resource_output { /// A builder for [`TagResourceOutput`](crate::output::TagResourceOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder {} impl Builder { /// Consumes the builder and constructs a [`TagResourceOutput`](crate::output::TagResourceOutput) pub fn build(self) -> crate::output::TagResourceOutput { crate::output::TagResourceOutput {} } } } impl TagResourceOutput { /// Creates a new builder-style object to manufacture [`TagResourceOutput`](crate::output::TagResourceOutput) pub fn builder() -> crate::output::tag_resource_output::Builder { crate::output::tag_resource_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct StopJobRunOutput { /// <p>The ID of the job run that you stopped.</p> pub run_id: std::option::Option<std::string::String>, } impl std::fmt::Debug for StopJobRunOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("StopJobRunOutput"); formatter.field("run_id", &self.run_id); formatter.finish() } } /// See [`StopJobRunOutput`](crate::output::StopJobRunOutput) pub mod stop_job_run_output { /// A builder for [`StopJobRunOutput`](crate::output::StopJobRunOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) run_id: std::option::Option<std::string::String>, } impl Builder { /// <p>The ID of the job run that you stopped.</p> pub fn run_id(mut self, input: impl Into<std::string::String>) -> Self { self.run_id = Some(input.into()); self } pub fn set_run_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.run_id = input; self } /// Consumes the builder and constructs a [`StopJobRunOutput`](crate::output::StopJobRunOutput) pub fn build(self) -> crate::output::StopJobRunOutput { crate::output::StopJobRunOutput { run_id: self.run_id, } } } } impl StopJobRunOutput { /// Creates a new builder-style object to manufacture [`StopJobRunOutput`](crate::output::StopJobRunOutput) pub fn builder() -> crate::output::stop_job_run_output::Builder { crate::output::stop_job_run_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct StartProjectSessionOutput { /// <p>The name of the project to be acted upon.</p> pub name: std::option::Option<std::string::String>, /// <p>A system-generated identifier for the session.</p> pub client_session_id: std::option::Option<std::string::String>, } impl std::fmt::Debug for StartProjectSessionOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("StartProjectSessionOutput"); formatter.field("name", &self.name); formatter.field("client_session_id", &self.client_session_id); formatter.finish() } } /// See [`StartProjectSessionOutput`](crate::output::StartProjectSessionOutput) pub mod start_project_session_output { /// A builder for [`StartProjectSessionOutput`](crate::output::StartProjectSessionOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) name: std::option::Option<std::string::String>, pub(crate) client_session_id: std::option::Option<std::string::String>, } impl Builder { /// <p>The name of the project to be acted upon.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// <p>A system-generated identifier for the session.</p> pub fn client_session_id(mut self, input: impl Into<std::string::String>) -> Self { self.client_session_id = Some(input.into()); self } pub fn set_client_session_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.client_session_id = input; self } /// Consumes the builder and constructs a [`StartProjectSessionOutput`](crate::output::StartProjectSessionOutput) pub fn build(self) -> crate::output::StartProjectSessionOutput { crate::output::StartProjectSessionOutput { name: self.name, client_session_id: self.client_session_id, } } } } impl StartProjectSessionOutput { /// Creates a new builder-style object to manufacture [`StartProjectSessionOutput`](crate::output::StartProjectSessionOutput) pub fn builder() -> crate::output::start_project_session_output::Builder { crate::output::start_project_session_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct StartJobRunOutput { /// <p>A system-generated identifier for this particular job run.</p> pub run_id: std::option::Option<std::string::String>, } impl std::fmt::Debug for StartJobRunOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("StartJobRunOutput"); formatter.field("run_id", &self.run_id); formatter.finish() } } /// See [`StartJobRunOutput`](crate::output::StartJobRunOutput) pub mod start_job_run_output { /// A builder for [`StartJobRunOutput`](crate::output::StartJobRunOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) run_id: std::option::Option<std::string::String>, } impl Builder { /// <p>A system-generated identifier for this particular job run.</p> pub fn run_id(mut self, input: impl Into<std::string::String>) -> Self { self.run_id = Some(input.into()); self } pub fn set_run_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.run_id = input; self } /// Consumes the builder and constructs a [`StartJobRunOutput`](crate::output::StartJobRunOutput) pub fn build(self) -> crate::output::StartJobRunOutput { crate::output::StartJobRunOutput { run_id: self.run_id, } } } } impl StartJobRunOutput { /// Creates a new builder-style object to manufacture [`StartJobRunOutput`](crate::output::StartJobRunOutput) pub fn builder() -> crate::output::start_job_run_output::Builder { crate::output::start_job_run_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct SendProjectSessionActionOutput { /// <p>A message indicating the result of performing the action.</p> pub result: std::option::Option<std::string::String>, /// <p>The name of the project that was affected by the action.</p> pub name: std::option::Option<std::string::String>, /// <p>A unique identifier for the action that was performed.</p> pub action_id: std::option::Option<i32>, } impl std::fmt::Debug for SendProjectSessionActionOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("SendProjectSessionActionOutput"); formatter.field("result", &self.result); formatter.field("name", &self.name); formatter.field("action_id", &self.action_id); formatter.finish() } } /// See [`SendProjectSessionActionOutput`](crate::output::SendProjectSessionActionOutput) pub mod send_project_session_action_output { /// A builder for [`SendProjectSessionActionOutput`](crate::output::SendProjectSessionActionOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) result: std::option::Option<std::string::String>, pub(crate) name: std::option::Option<std::string::String>, pub(crate) action_id: std::option::Option<i32>, } impl Builder { /// <p>A message indicating the result of performing the action.</p> pub fn result(mut self, input: impl Into<std::string::String>) -> Self { self.result = Some(input.into()); self } pub fn set_result(mut self, input: std::option::Option<std::string::String>) -> Self { self.result = input; self } /// <p>The name of the project that was affected by the action.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// <p>A unique identifier for the action that was performed.</p> pub fn action_id(mut self, input: i32) -> Self { self.action_id = Some(input); self } pub fn set_action_id(mut self, input: std::option::Option<i32>) -> Self { self.action_id = input; self } /// Consumes the builder and constructs a [`SendProjectSessionActionOutput`](crate::output::SendProjectSessionActionOutput) pub fn build(self) -> crate::output::SendProjectSessionActionOutput { crate::output::SendProjectSessionActionOutput { result: self.result, name: self.name, action_id: self.action_id, } } } } impl SendProjectSessionActionOutput { /// Creates a new builder-style object to manufacture [`SendProjectSessionActionOutput`](crate::output::SendProjectSessionActionOutput) pub fn builder() -> crate::output::send_project_session_action_output::Builder { crate::output::send_project_session_action_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct PublishRecipeOutput { /// <p>The name of the recipe that you published.</p> pub name: std::option::Option<std::string::String>, } impl std::fmt::Debug for PublishRecipeOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("PublishRecipeOutput"); formatter.field("name", &self.name); formatter.finish() } } /// See [`PublishRecipeOutput`](crate::output::PublishRecipeOutput) pub mod publish_recipe_output { /// A builder for [`PublishRecipeOutput`](crate::output::PublishRecipeOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) name: std::option::Option<std::string::String>, } impl Builder { /// <p>The name of the recipe that you published.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// Consumes the builder and constructs a [`PublishRecipeOutput`](crate::output::PublishRecipeOutput) pub fn build(self) -> crate::output::PublishRecipeOutput { crate::output::PublishRecipeOutput { name: self.name } } } } impl PublishRecipeOutput { /// Creates a new builder-style object to manufacture [`PublishRecipeOutput`](crate::output::PublishRecipeOutput) pub fn builder() -> crate::output::publish_recipe_output::Builder { crate::output::publish_recipe_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListTagsForResourceOutput { /// <p>A list of tags associated with the DataBrew resource.</p> pub tags: std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>, } impl std::fmt::Debug for ListTagsForResourceOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListTagsForResourceOutput"); formatter.field("tags", &self.tags); formatter.finish() } } /// See [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput) pub mod list_tags_for_resource_output { /// A builder for [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) tags: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, } impl Builder { pub fn tags( mut self, k: impl Into<std::string::String>, v: impl Into<std::string::String>, ) -> Self { let mut hash_map = self.tags.unwrap_or_default(); hash_map.insert(k.into(), v.into()); self.tags = Some(hash_map); self } pub fn set_tags( mut self, input: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, ) -> Self { self.tags = input; self } /// Consumes the builder and constructs a [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput) pub fn build(self) -> crate::output::ListTagsForResourceOutput { crate::output::ListTagsForResourceOutput { tags: self.tags } } } } impl ListTagsForResourceOutput { /// Creates a new builder-style object to manufacture [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput) pub fn builder() -> crate::output::list_tags_for_resource_output::Builder { crate::output::list_tags_for_resource_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListSchedulesOutput { /// <p>A list of schedules that are defined.</p> pub schedules: std::option::Option<std::vec::Vec<crate::model::Schedule>>, /// <p>A token that you can use in a subsequent call to retrieve the next set of /// results.</p> pub next_token: std::option::Option<std::string::String>, } impl std::fmt::Debug for ListSchedulesOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListSchedulesOutput"); formatter.field("schedules", &self.schedules); formatter.field("next_token", &self.next_token); formatter.finish() } } /// See [`ListSchedulesOutput`](crate::output::ListSchedulesOutput) pub mod list_schedules_output { /// A builder for [`ListSchedulesOutput`](crate::output::ListSchedulesOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) schedules: std::option::Option<std::vec::Vec<crate::model::Schedule>>, pub(crate) next_token: std::option::Option<std::string::String>, } impl Builder { pub fn schedules(mut self, input: impl Into<crate::model::Schedule>) -> Self { let mut v = self.schedules.unwrap_or_default(); v.push(input.into()); self.schedules = Some(v); self } pub fn set_schedules( mut self, input: std::option::Option<std::vec::Vec<crate::model::Schedule>>, ) -> Self { self.schedules = input; self } /// <p>A token that you can use in a subsequent call to retrieve the next set of /// results.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Consumes the builder and constructs a [`ListSchedulesOutput`](crate::output::ListSchedulesOutput) pub fn build(self) -> crate::output::ListSchedulesOutput { crate::output::ListSchedulesOutput { schedules: self.schedules, next_token: self.next_token, } } } } impl ListSchedulesOutput { /// Creates a new builder-style object to manufacture [`ListSchedulesOutput`](crate::output::ListSchedulesOutput) pub fn builder() -> crate::output::list_schedules_output::Builder { crate::output::list_schedules_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListRecipeVersionsOutput { /// <p>A token that you can use in a subsequent call to retrieve the next set of /// results.</p> pub next_token: std::option::Option<std::string::String>, /// <p>A list of versions for the specified recipe.</p> pub recipes: std::option::Option<std::vec::Vec<crate::model::Recipe>>, } impl std::fmt::Debug for ListRecipeVersionsOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListRecipeVersionsOutput"); formatter.field("next_token", &self.next_token); formatter.field("recipes", &self.recipes); formatter.finish() } } /// See [`ListRecipeVersionsOutput`](crate::output::ListRecipeVersionsOutput) pub mod list_recipe_versions_output { /// A builder for [`ListRecipeVersionsOutput`](crate::output::ListRecipeVersionsOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) next_token: std::option::Option<std::string::String>, pub(crate) recipes: std::option::Option<std::vec::Vec<crate::model::Recipe>>, } impl Builder { /// <p>A token that you can use in a subsequent call to retrieve the next set of /// results.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } pub fn recipes(mut self, input: impl Into<crate::model::Recipe>) -> Self { let mut v = self.recipes.unwrap_or_default(); v.push(input.into()); self.recipes = Some(v); self } pub fn set_recipes( mut self, input: std::option::Option<std::vec::Vec<crate::model::Recipe>>, ) -> Self { self.recipes = input; self } /// Consumes the builder and constructs a [`ListRecipeVersionsOutput`](crate::output::ListRecipeVersionsOutput) pub fn build(self) -> crate::output::ListRecipeVersionsOutput { crate::output::ListRecipeVersionsOutput { next_token: self.next_token, recipes: self.recipes, } } } } impl ListRecipeVersionsOutput { /// Creates a new builder-style object to manufacture [`ListRecipeVersionsOutput`](crate::output::ListRecipeVersionsOutput) pub fn builder() -> crate::output::list_recipe_versions_output::Builder { crate::output::list_recipe_versions_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListRecipesOutput { /// <p>A list of recipes that are defined.</p> pub recipes: std::option::Option<std::vec::Vec<crate::model::Recipe>>, /// <p>A token that you can use in a subsequent call to retrieve the next set of /// results.</p> pub next_token: std::option::Option<std::string::String>, } impl std::fmt::Debug for ListRecipesOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListRecipesOutput"); formatter.field("recipes", &self.recipes); formatter.field("next_token", &self.next_token); formatter.finish() } } /// See [`ListRecipesOutput`](crate::output::ListRecipesOutput) pub mod list_recipes_output { /// A builder for [`ListRecipesOutput`](crate::output::ListRecipesOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) recipes: std::option::Option<std::vec::Vec<crate::model::Recipe>>, pub(crate) next_token: std::option::Option<std::string::String>, } impl Builder { pub fn recipes(mut self, input: impl Into<crate::model::Recipe>) -> Self { let mut v = self.recipes.unwrap_or_default(); v.push(input.into()); self.recipes = Some(v); self } pub fn set_recipes( mut self, input: std::option::Option<std::vec::Vec<crate::model::Recipe>>, ) -> Self { self.recipes = input; self } /// <p>A token that you can use in a subsequent call to retrieve the next set of /// results.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Consumes the builder and constructs a [`ListRecipesOutput`](crate::output::ListRecipesOutput) pub fn build(self) -> crate::output::ListRecipesOutput { crate::output::ListRecipesOutput { recipes: self.recipes, next_token: self.next_token, } } } } impl ListRecipesOutput { /// Creates a new builder-style object to manufacture [`ListRecipesOutput`](crate::output::ListRecipesOutput) pub fn builder() -> crate::output::list_recipes_output::Builder { crate::output::list_recipes_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListProjectsOutput { /// <p>A list of projects that are defined .</p> pub projects: std::option::Option<std::vec::Vec<crate::model::Project>>, /// <p>A token that you can use in a subsequent call to retrieve the next set of /// results.</p> pub next_token: std::option::Option<std::string::String>, } impl std::fmt::Debug for ListProjectsOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListProjectsOutput"); formatter.field("projects", &self.projects); formatter.field("next_token", &self.next_token); formatter.finish() } } /// See [`ListProjectsOutput`](crate::output::ListProjectsOutput) pub mod list_projects_output { /// A builder for [`ListProjectsOutput`](crate::output::ListProjectsOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) projects: std::option::Option<std::vec::Vec<crate::model::Project>>, pub(crate) next_token: std::option::Option<std::string::String>, } impl Builder { pub fn projects(mut self, input: impl Into<crate::model::Project>) -> Self { let mut v = self.projects.unwrap_or_default(); v.push(input.into()); self.projects = Some(v); self } pub fn set_projects( mut self, input: std::option::Option<std::vec::Vec<crate::model::Project>>, ) -> Self { self.projects = input; self } /// <p>A token that you can use in a subsequent call to retrieve the next set of /// results.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Consumes the builder and constructs a [`ListProjectsOutput`](crate::output::ListProjectsOutput) pub fn build(self) -> crate::output::ListProjectsOutput { crate::output::ListProjectsOutput { projects: self.projects, next_token: self.next_token, } } } } impl ListProjectsOutput { /// Creates a new builder-style object to manufacture [`ListProjectsOutput`](crate::output::ListProjectsOutput) pub fn builder() -> crate::output::list_projects_output::Builder { crate::output::list_projects_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListJobsOutput { /// <p>A list of jobs that are defined.</p> pub jobs: std::option::Option<std::vec::Vec<crate::model::Job>>, /// <p>A token that you can use in a subsequent call to retrieve the next set of /// results.</p> pub next_token: std::option::Option<std::string::String>, } impl std::fmt::Debug for ListJobsOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListJobsOutput"); formatter.field("jobs", &self.jobs); formatter.field("next_token", &self.next_token); formatter.finish() } } /// See [`ListJobsOutput`](crate::output::ListJobsOutput) pub mod list_jobs_output { /// A builder for [`ListJobsOutput`](crate::output::ListJobsOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) jobs: std::option::Option<std::vec::Vec<crate::model::Job>>, pub(crate) next_token: std::option::Option<std::string::String>, } impl Builder { pub fn jobs(mut self, input: impl Into<crate::model::Job>) -> Self { let mut v = self.jobs.unwrap_or_default(); v.push(input.into()); self.jobs = Some(v); self } pub fn set_jobs( mut self, input: std::option::Option<std::vec::Vec<crate::model::Job>>, ) -> Self { self.jobs = input; self } /// <p>A token that you can use in a subsequent call to retrieve the next set of /// results.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Consumes the builder and constructs a [`ListJobsOutput`](crate::output::ListJobsOutput) pub fn build(self) -> crate::output::ListJobsOutput { crate::output::ListJobsOutput { jobs: self.jobs, next_token: self.next_token, } } } } impl ListJobsOutput { /// Creates a new builder-style object to manufacture [`ListJobsOutput`](crate::output::ListJobsOutput) pub fn builder() -> crate::output::list_jobs_output::Builder { crate::output::list_jobs_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListJobRunsOutput { /// <p>A list of job runs that have occurred for the specified job.</p> pub job_runs: std::option::Option<std::vec::Vec<crate::model::JobRun>>, /// <p>A token that you can use in a subsequent call to retrieve the next set of /// results.</p> pub next_token: std::option::Option<std::string::String>, } impl std::fmt::Debug for ListJobRunsOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListJobRunsOutput"); formatter.field("job_runs", &self.job_runs); formatter.field("next_token", &self.next_token); formatter.finish() } } /// See [`ListJobRunsOutput`](crate::output::ListJobRunsOutput) pub mod list_job_runs_output { /// A builder for [`ListJobRunsOutput`](crate::output::ListJobRunsOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) job_runs: std::option::Option<std::vec::Vec<crate::model::JobRun>>, pub(crate) next_token: std::option::Option<std::string::String>, } impl Builder { pub fn job_runs(mut self, input: impl Into<crate::model::JobRun>) -> Self { let mut v = self.job_runs.unwrap_or_default(); v.push(input.into()); self.job_runs = Some(v); self } pub fn set_job_runs( mut self, input: std::option::Option<std::vec::Vec<crate::model::JobRun>>, ) -> Self { self.job_runs = input; self } /// <p>A token that you can use in a subsequent call to retrieve the next set of /// results.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Consumes the builder and constructs a [`ListJobRunsOutput`](crate::output::ListJobRunsOutput) pub fn build(self) -> crate::output::ListJobRunsOutput { crate::output::ListJobRunsOutput { job_runs: self.job_runs, next_token: self.next_token, } } } } impl ListJobRunsOutput { /// Creates a new builder-style object to manufacture [`ListJobRunsOutput`](crate::output::ListJobRunsOutput) pub fn builder() -> crate::output::list_job_runs_output::Builder { crate::output::list_job_runs_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListDatasetsOutput { /// <p>A list of datasets that are defined.</p> pub datasets: std::option::Option<std::vec::Vec<crate::model::Dataset>>, /// <p>A token that you can use in a subsequent call to retrieve the next set of /// results.</p> pub next_token: std::option::Option<std::string::String>, } impl std::fmt::Debug for ListDatasetsOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListDatasetsOutput"); formatter.field("datasets", &self.datasets); formatter.field("next_token", &self.next_token); formatter.finish() } } /// See [`ListDatasetsOutput`](crate::output::ListDatasetsOutput) pub mod list_datasets_output { /// A builder for [`ListDatasetsOutput`](crate::output::ListDatasetsOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) datasets: std::option::Option<std::vec::Vec<crate::model::Dataset>>, pub(crate) next_token: std::option::Option<std::string::String>, } impl Builder { pub fn datasets(mut self, input: impl Into<crate::model::Dataset>) -> Self { let mut v = self.datasets.unwrap_or_default(); v.push(input.into()); self.datasets = Some(v); self } pub fn set_datasets( mut self, input: std::option::Option<std::vec::Vec<crate::model::Dataset>>, ) -> Self { self.datasets = input; self } /// <p>A token that you can use in a subsequent call to retrieve the next set of /// results.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Consumes the builder and constructs a [`ListDatasetsOutput`](crate::output::ListDatasetsOutput) pub fn build(self) -> crate::output::ListDatasetsOutput { crate::output::ListDatasetsOutput { datasets: self.datasets, next_token: self.next_token, } } } } impl ListDatasetsOutput { /// Creates a new builder-style object to manufacture [`ListDatasetsOutput`](crate::output::ListDatasetsOutput) pub fn builder() -> crate::output::list_datasets_output::Builder { crate::output::list_datasets_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DescribeScheduleOutput { /// <p>The date and time that the schedule was created.</p> pub create_date: std::option::Option<smithy_types::Instant>, /// <p>The identifier (user name) of the user who created the schedule. </p> pub created_by: std::option::Option<std::string::String>, /// <p>The name or names of one or more jobs to be run by using the schedule.</p> pub job_names: std::option::Option<std::vec::Vec<std::string::String>>, /// <p>The identifier (user name) of the user who last modified the schedule.</p> pub last_modified_by: std::option::Option<std::string::String>, /// <p>The date and time that the schedule was last modified.</p> pub last_modified_date: std::option::Option<smithy_types::Instant>, /// <p>The Amazon Resource Name (ARN) of the schedule.</p> pub resource_arn: std::option::Option<std::string::String>, /// <p>The date or dates and time or times when the jobs are to be run for the schedule. For /// more information, see <a href="https://docs.aws.amazon.com/databrew/latest/dg/jobs.cron.html">Cron expressions</a> in the /// <i>Glue DataBrew Developer Guide</i>.</p> pub cron_expression: std::option::Option<std::string::String>, /// <p>Metadata tags associated with this schedule.</p> pub tags: std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>, /// <p>The name of the schedule.</p> pub name: std::option::Option<std::string::String>, } impl std::fmt::Debug for DescribeScheduleOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DescribeScheduleOutput"); formatter.field("create_date", &self.create_date); formatter.field("created_by", &self.created_by); formatter.field("job_names", &self.job_names); formatter.field("last_modified_by", &self.last_modified_by); formatter.field("last_modified_date", &self.last_modified_date); formatter.field("resource_arn", &self.resource_arn); formatter.field("cron_expression", &self.cron_expression); formatter.field("tags", &self.tags); formatter.field("name", &self.name); formatter.finish() } } /// See [`DescribeScheduleOutput`](crate::output::DescribeScheduleOutput) pub mod describe_schedule_output { /// A builder for [`DescribeScheduleOutput`](crate::output::DescribeScheduleOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) create_date: std::option::Option<smithy_types::Instant>, pub(crate) created_by: std::option::Option<std::string::String>, pub(crate) job_names: std::option::Option<std::vec::Vec<std::string::String>>, pub(crate) last_modified_by: std::option::Option<std::string::String>, pub(crate) last_modified_date: std::option::Option<smithy_types::Instant>, pub(crate) resource_arn: std::option::Option<std::string::String>, pub(crate) cron_expression: std::option::Option<std::string::String>, pub(crate) tags: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, pub(crate) name: std::option::Option<std::string::String>, } impl Builder { /// <p>The date and time that the schedule was created.</p> pub fn create_date(mut self, input: smithy_types::Instant) -> Self { self.create_date = Some(input); self } pub fn set_create_date( mut self, input: std::option::Option<smithy_types::Instant>, ) -> Self { self.create_date = input; self } /// <p>The identifier (user name) of the user who created the schedule. </p> pub fn created_by(mut self, input: impl Into<std::string::String>) -> Self { self.created_by = Some(input.into()); self } pub fn set_created_by(mut self, input: std::option::Option<std::string::String>) -> Self { self.created_by = input; self } pub fn job_names(mut self, input: impl Into<std::string::String>) -> Self { let mut v = self.job_names.unwrap_or_default(); v.push(input.into()); self.job_names = Some(v); self } pub fn set_job_names( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.job_names = input; self } /// <p>The identifier (user name) of the user who last modified the schedule.</p> pub fn last_modified_by(mut self, input: impl Into<std::string::String>) -> Self { self.last_modified_by = Some(input.into()); self } pub fn set_last_modified_by( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.last_modified_by = input; self } /// <p>The date and time that the schedule was last modified.</p> pub fn last_modified_date(mut self, input: smithy_types::Instant) -> Self { self.last_modified_date = Some(input); self } pub fn set_last_modified_date( mut self, input: std::option::Option<smithy_types::Instant>, ) -> Self { self.last_modified_date = input; self } /// <p>The Amazon Resource Name (ARN) of the schedule.</p> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.resource_arn = Some(input.into()); self } pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.resource_arn = input; self } /// <p>The date or dates and time or times when the jobs are to be run for the schedule. For /// more information, see <a href="https://docs.aws.amazon.com/databrew/latest/dg/jobs.cron.html">Cron expressions</a> in the /// <i>Glue DataBrew Developer Guide</i>.</p> pub fn cron_expression(mut self, input: impl Into<std::string::String>) -> Self { self.cron_expression = Some(input.into()); self } pub fn set_cron_expression( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.cron_expression = input; self } pub fn tags( mut self, k: impl Into<std::string::String>, v: impl Into<std::string::String>, ) -> Self { let mut hash_map = self.tags.unwrap_or_default(); hash_map.insert(k.into(), v.into()); self.tags = Some(hash_map); self } pub fn set_tags( mut self, input: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, ) -> Self { self.tags = input; self } /// <p>The name of the schedule.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// Consumes the builder and constructs a [`DescribeScheduleOutput`](crate::output::DescribeScheduleOutput) pub fn build(self) -> crate::output::DescribeScheduleOutput { crate::output::DescribeScheduleOutput { create_date: self.create_date, created_by: self.created_by, job_names: self.job_names, last_modified_by: self.last_modified_by, last_modified_date: self.last_modified_date, resource_arn: self.resource_arn, cron_expression: self.cron_expression, tags: self.tags, name: self.name, } } } } impl DescribeScheduleOutput { /// Creates a new builder-style object to manufacture [`DescribeScheduleOutput`](crate::output::DescribeScheduleOutput) pub fn builder() -> crate::output::describe_schedule_output::Builder { crate::output::describe_schedule_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DescribeRecipeOutput { /// <p>The identifier (user name) of the user who created the recipe.</p> pub created_by: std::option::Option<std::string::String>, /// <p>The date and time that the recipe was created.</p> pub create_date: std::option::Option<smithy_types::Instant>, /// <p>The identifier (user name) of the user who last modified the recipe.</p> pub last_modified_by: std::option::Option<std::string::String>, /// <p>The date and time that the recipe was last modified.</p> pub last_modified_date: std::option::Option<smithy_types::Instant>, /// <p>The name of the project associated with this recipe.</p> pub project_name: std::option::Option<std::string::String>, /// <p>The identifier (user name) of the user who last published the recipe.</p> pub published_by: std::option::Option<std::string::String>, /// <p>The date and time when the recipe was last published.</p> pub published_date: std::option::Option<smithy_types::Instant>, /// <p>The description of the recipe.</p> pub description: std::option::Option<std::string::String>, /// <p>The name of the recipe.</p> pub name: std::option::Option<std::string::String>, /// <p>One or more steps to be performed by the recipe. Each step consists of an action, and /// the conditions under which the action should succeed.</p> pub steps: std::option::Option<std::vec::Vec<crate::model::RecipeStep>>, /// <p>Metadata tags associated with this project.</p> pub tags: std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>, /// <p>The ARN of the recipe.</p> pub resource_arn: std::option::Option<std::string::String>, /// <p>The recipe version identifier.</p> pub recipe_version: std::option::Option<std::string::String>, } impl std::fmt::Debug for DescribeRecipeOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DescribeRecipeOutput"); formatter.field("created_by", &self.created_by); formatter.field("create_date", &self.create_date); formatter.field("last_modified_by", &self.last_modified_by); formatter.field("last_modified_date", &self.last_modified_date); formatter.field("project_name", &self.project_name); formatter.field("published_by", &self.published_by); formatter.field("published_date", &self.published_date); formatter.field("description", &self.description); formatter.field("name", &self.name); formatter.field("steps", &self.steps); formatter.field("tags", &self.tags); formatter.field("resource_arn", &self.resource_arn); formatter.field("recipe_version", &self.recipe_version); formatter.finish() } } /// See [`DescribeRecipeOutput`](crate::output::DescribeRecipeOutput) pub mod describe_recipe_output { /// A builder for [`DescribeRecipeOutput`](crate::output::DescribeRecipeOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) created_by: std::option::Option<std::string::String>, pub(crate) create_date: std::option::Option<smithy_types::Instant>, pub(crate) last_modified_by: std::option::Option<std::string::String>, pub(crate) last_modified_date: std::option::Option<smithy_types::Instant>, pub(crate) project_name: std::option::Option<std::string::String>, pub(crate) published_by: std::option::Option<std::string::String>, pub(crate) published_date: std::option::Option<smithy_types::Instant>, pub(crate) description: std::option::Option<std::string::String>, pub(crate) name: std::option::Option<std::string::String>, pub(crate) steps: std::option::Option<std::vec::Vec<crate::model::RecipeStep>>, pub(crate) tags: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, pub(crate) resource_arn: std::option::Option<std::string::String>, pub(crate) recipe_version: std::option::Option<std::string::String>, } impl Builder { /// <p>The identifier (user name) of the user who created the recipe.</p> pub fn created_by(mut self, input: impl Into<std::string::String>) -> Self { self.created_by = Some(input.into()); self } pub fn set_created_by(mut self, input: std::option::Option<std::string::String>) -> Self { self.created_by = input; self } /// <p>The date and time that the recipe was created.</p> pub fn create_date(mut self, input: smithy_types::Instant) -> Self { self.create_date = Some(input); self } pub fn set_create_date( mut self, input: std::option::Option<smithy_types::Instant>, ) -> Self { self.create_date = input; self } /// <p>The identifier (user name) of the user who last modified the recipe.</p> pub fn last_modified_by(mut self, input: impl Into<std::string::String>) -> Self { self.last_modified_by = Some(input.into()); self } pub fn set_last_modified_by( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.last_modified_by = input; self } /// <p>The date and time that the recipe was last modified.</p> pub fn last_modified_date(mut self, input: smithy_types::Instant) -> Self { self.last_modified_date = Some(input); self } pub fn set_last_modified_date( mut self, input: std::option::Option<smithy_types::Instant>, ) -> Self { self.last_modified_date = input; self } /// <p>The name of the project associated with this recipe.</p> pub fn project_name(mut self, input: impl Into<std::string::String>) -> Self { self.project_name = Some(input.into()); self } pub fn set_project_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.project_name = input; self } /// <p>The identifier (user name) of the user who last published the recipe.</p> pub fn published_by(mut self, input: impl Into<std::string::String>) -> Self { self.published_by = Some(input.into()); self } pub fn set_published_by(mut self, input: std::option::Option<std::string::String>) -> Self { self.published_by = input; self } /// <p>The date and time when the recipe was last published.</p> pub fn published_date(mut self, input: smithy_types::Instant) -> Self { self.published_date = Some(input); self } pub fn set_published_date( mut self, input: std::option::Option<smithy_types::Instant>, ) -> Self { self.published_date = input; self } /// <p>The description of the recipe.</p> pub fn description(mut self, input: impl Into<std::string::String>) -> Self { self.description = Some(input.into()); self } pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self { self.description = input; self } /// <p>The name of the recipe.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } pub fn steps(mut self, input: impl Into<crate::model::RecipeStep>) -> Self { let mut v = self.steps.unwrap_or_default(); v.push(input.into()); self.steps = Some(v); self } pub fn set_steps( mut self, input: std::option::Option<std::vec::Vec<crate::model::RecipeStep>>, ) -> Self { self.steps = input; self } pub fn tags( mut self, k: impl Into<std::string::String>, v: impl Into<std::string::String>, ) -> Self { let mut hash_map = self.tags.unwrap_or_default(); hash_map.insert(k.into(), v.into()); self.tags = Some(hash_map); self } pub fn set_tags( mut self, input: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, ) -> Self { self.tags = input; self } /// <p>The ARN of the recipe.</p> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.resource_arn = Some(input.into()); self } pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.resource_arn = input; self } /// <p>The recipe version identifier.</p> pub fn recipe_version(mut self, input: impl Into<std::string::String>) -> Self { self.recipe_version = Some(input.into()); self } pub fn set_recipe_version( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.recipe_version = input; self } /// Consumes the builder and constructs a [`DescribeRecipeOutput`](crate::output::DescribeRecipeOutput) pub fn build(self) -> crate::output::DescribeRecipeOutput { crate::output::DescribeRecipeOutput { created_by: self.created_by, create_date: self.create_date, last_modified_by: self.last_modified_by, last_modified_date: self.last_modified_date, project_name: self.project_name, published_by: self.published_by, published_date: self.published_date, description: self.description, name: self.name, steps: self.steps, tags: self.tags, resource_arn: self.resource_arn, recipe_version: self.recipe_version, } } } } impl DescribeRecipeOutput { /// Creates a new builder-style object to manufacture [`DescribeRecipeOutput`](crate::output::DescribeRecipeOutput) pub fn builder() -> crate::output::describe_recipe_output::Builder { crate::output::describe_recipe_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DescribeProjectOutput { /// <p>The date and time that the project was created.</p> pub create_date: std::option::Option<smithy_types::Instant>, /// <p>The identifier (user name) of the user who created the project.</p> pub created_by: std::option::Option<std::string::String>, /// <p>The dataset associated with the project.</p> pub dataset_name: std::option::Option<std::string::String>, /// <p>The date and time that the project was last modified.</p> pub last_modified_date: std::option::Option<smithy_types::Instant>, /// <p>The identifier (user name) of the user who last modified the project.</p> pub last_modified_by: std::option::Option<std::string::String>, /// <p>The name of the project.</p> pub name: std::option::Option<std::string::String>, /// <p>The recipe associated with this job.</p> pub recipe_name: std::option::Option<std::string::String>, /// <p>The Amazon Resource Name (ARN) of the project.</p> pub resource_arn: std::option::Option<std::string::String>, /// <p>Represents the sample size and sampling type for DataBrew to use for interactive data /// analysis.</p> pub sample: std::option::Option<crate::model::Sample>, /// <p>The ARN of the Identity and Access Management (IAM) role to be assumed when /// DataBrew runs the job.</p> pub role_arn: std::option::Option<std::string::String>, /// <p>Metadata tags associated with this project.</p> pub tags: std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>, /// <p>Describes the current state of the session:</p> /// <ul> /// <li> /// <p> /// <code>PROVISIONING</code> - allocating resources for the session.</p> /// </li> /// <li> /// <p> /// <code>INITIALIZING</code> - getting the session ready for first use.</p> /// </li> /// <li> /// <p> /// <code>ASSIGNED</code> - the session is ready for use.</p> /// </li> /// </ul> pub session_status: std::option::Option<crate::model::SessionStatus>, /// <p>The identifier (user name) of the user that opened the project for use. </p> pub opened_by: std::option::Option<std::string::String>, /// <p>The date and time when the project was opened. </p> pub open_date: std::option::Option<smithy_types::Instant>, } impl std::fmt::Debug for DescribeProjectOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DescribeProjectOutput"); formatter.field("create_date", &self.create_date); formatter.field("created_by", &self.created_by); formatter.field("dataset_name", &self.dataset_name); formatter.field("last_modified_date", &self.last_modified_date); formatter.field("last_modified_by", &self.last_modified_by); formatter.field("name", &self.name); formatter.field("recipe_name", &self.recipe_name); formatter.field("resource_arn", &self.resource_arn); formatter.field("sample", &self.sample); formatter.field("role_arn", &self.role_arn); formatter.field("tags", &self.tags); formatter.field("session_status", &self.session_status); formatter.field("opened_by", &self.opened_by); formatter.field("open_date", &self.open_date); formatter.finish() } } /// See [`DescribeProjectOutput`](crate::output::DescribeProjectOutput) pub mod describe_project_output { /// A builder for [`DescribeProjectOutput`](crate::output::DescribeProjectOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) create_date: std::option::Option<smithy_types::Instant>, pub(crate) created_by: std::option::Option<std::string::String>, pub(crate) dataset_name: std::option::Option<std::string::String>, pub(crate) last_modified_date: std::option::Option<smithy_types::Instant>, pub(crate) last_modified_by: std::option::Option<std::string::String>, pub(crate) name: std::option::Option<std::string::String>, pub(crate) recipe_name: std::option::Option<std::string::String>, pub(crate) resource_arn: std::option::Option<std::string::String>, pub(crate) sample: std::option::Option<crate::model::Sample>, pub(crate) role_arn: std::option::Option<std::string::String>, pub(crate) tags: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, pub(crate) session_status: std::option::Option<crate::model::SessionStatus>, pub(crate) opened_by: std::option::Option<std::string::String>, pub(crate) open_date: std::option::Option<smithy_types::Instant>, } impl Builder { /// <p>The date and time that the project was created.</p> pub fn create_date(mut self, input: smithy_types::Instant) -> Self { self.create_date = Some(input); self } pub fn set_create_date( mut self, input: std::option::Option<smithy_types::Instant>, ) -> Self { self.create_date = input; self } /// <p>The identifier (user name) of the user who created the project.</p> pub fn created_by(mut self, input: impl Into<std::string::String>) -> Self { self.created_by = Some(input.into()); self } pub fn set_created_by(mut self, input: std::option::Option<std::string::String>) -> Self { self.created_by = input; self } /// <p>The dataset associated with the project.</p> pub fn dataset_name(mut self, input: impl Into<std::string::String>) -> Self { self.dataset_name = Some(input.into()); self } pub fn set_dataset_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.dataset_name = input; self } /// <p>The date and time that the project was last modified.</p> pub fn last_modified_date(mut self, input: smithy_types::Instant) -> Self { self.last_modified_date = Some(input); self } pub fn set_last_modified_date( mut self, input: std::option::Option<smithy_types::Instant>, ) -> Self { self.last_modified_date = input; self } /// <p>The identifier (user name) of the user who last modified the project.</p> pub fn last_modified_by(mut self, input: impl Into<std::string::String>) -> Self { self.last_modified_by = Some(input.into()); self } pub fn set_last_modified_by( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.last_modified_by = input; self } /// <p>The name of the project.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// <p>The recipe associated with this job.</p> pub fn recipe_name(mut self, input: impl Into<std::string::String>) -> Self { self.recipe_name = Some(input.into()); self } pub fn set_recipe_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.recipe_name = input; self } /// <p>The Amazon Resource Name (ARN) of the project.</p> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.resource_arn = Some(input.into()); self } pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.resource_arn = input; self } /// <p>Represents the sample size and sampling type for DataBrew to use for interactive data /// analysis.</p> pub fn sample(mut self, input: crate::model::Sample) -> Self { self.sample = Some(input); self } pub fn set_sample(mut self, input: std::option::Option<crate::model::Sample>) -> Self { self.sample = input; self } /// <p>The ARN of the Identity and Access Management (IAM) role to be assumed when /// DataBrew runs the job.</p> pub fn role_arn(mut self, input: impl Into<std::string::String>) -> Self { self.role_arn = Some(input.into()); self } pub fn set_role_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.role_arn = input; self } pub fn tags( mut self, k: impl Into<std::string::String>, v: impl Into<std::string::String>, ) -> Self { let mut hash_map = self.tags.unwrap_or_default(); hash_map.insert(k.into(), v.into()); self.tags = Some(hash_map); self } pub fn set_tags( mut self, input: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, ) -> Self { self.tags = input; self } /// <p>Describes the current state of the session:</p> /// <ul> /// <li> /// <p> /// <code>PROVISIONING</code> - allocating resources for the session.</p> /// </li> /// <li> /// <p> /// <code>INITIALIZING</code> - getting the session ready for first use.</p> /// </li> /// <li> /// <p> /// <code>ASSIGNED</code> - the session is ready for use.</p> /// </li> /// </ul> pub fn session_status(mut self, input: crate::model::SessionStatus) -> Self { self.session_status = Some(input); self } pub fn set_session_status( mut self, input: std::option::Option<crate::model::SessionStatus>, ) -> Self { self.session_status = input; self } /// <p>The identifier (user name) of the user that opened the project for use. </p> pub fn opened_by(mut self, input: impl Into<std::string::String>) -> Self { self.opened_by = Some(input.into()); self } pub fn set_opened_by(mut self, input: std::option::Option<std::string::String>) -> Self { self.opened_by = input; self } /// <p>The date and time when the project was opened. </p> pub fn open_date(mut self, input: smithy_types::Instant) -> Self { self.open_date = Some(input); self } pub fn set_open_date(mut self, input: std::option::Option<smithy_types::Instant>) -> Self { self.open_date = input; self } /// Consumes the builder and constructs a [`DescribeProjectOutput`](crate::output::DescribeProjectOutput) pub fn build(self) -> crate::output::DescribeProjectOutput { crate::output::DescribeProjectOutput { create_date: self.create_date, created_by: self.created_by, dataset_name: self.dataset_name, last_modified_date: self.last_modified_date, last_modified_by: self.last_modified_by, name: self.name, recipe_name: self.recipe_name, resource_arn: self.resource_arn, sample: self.sample, role_arn: self.role_arn, tags: self.tags, session_status: self.session_status, opened_by: self.opened_by, open_date: self.open_date, } } } } impl DescribeProjectOutput { /// Creates a new builder-style object to manufacture [`DescribeProjectOutput`](crate::output::DescribeProjectOutput) pub fn builder() -> crate::output::describe_project_output::Builder { crate::output::describe_project_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DescribeJobRunOutput { /// <p>The number of times that DataBrew has attempted to run the job.</p> pub attempt: i32, /// <p>The date and time when the job completed processing.</p> pub completed_on: std::option::Option<smithy_types::Instant>, /// <p>The name of the dataset for the job to process.</p> pub dataset_name: std::option::Option<std::string::String>, /// <p>A message indicating an error (if any) that was encountered when the job ran.</p> pub error_message: std::option::Option<std::string::String>, /// <p>The amount of time, in seconds, during which the job run consumed resources.</p> pub execution_time: i32, /// <p>The name of the job being processed during this run.</p> pub job_name: std::option::Option<std::string::String>, /// <p>Configuration for profile jobs. Used to select columns, do evaluations, /// and override default parameters of evaluations. When configuration is null, the /// profile job will run with default settings.</p> pub profile_configuration: std::option::Option<crate::model::ProfileConfiguration>, /// <p>The unique identifier of the job run.</p> pub run_id: std::option::Option<std::string::String>, /// <p>The current state of the job run entity itself.</p> pub state: std::option::Option<crate::model::JobRunState>, /// <p>The current status of Amazon CloudWatch logging for the job run.</p> pub log_subscription: std::option::Option<crate::model::LogSubscription>, /// <p>The name of an Amazon CloudWatch log group, where the job writes diagnostic messages /// when it runs.</p> pub log_group_name: std::option::Option<std::string::String>, /// <p>One or more output artifacts from a job run.</p> pub outputs: std::option::Option<std::vec::Vec<crate::model::Output>>, /// <p>One or more artifacts that represent the Glue Data Catalog output from running the job.</p> pub data_catalog_outputs: std::option::Option<std::vec::Vec<crate::model::DataCatalogOutput>>, /// <p>Represents a list of JDBC database output objects which defines the output /// destination for a DataBrew recipe job to write into.</p> pub database_outputs: std::option::Option<std::vec::Vec<crate::model::DatabaseOutput>>, /// <p>Represents the name and version of a DataBrew recipe.</p> pub recipe_reference: std::option::Option<crate::model::RecipeReference>, /// <p>The Amazon Resource Name (ARN) of the user who started the job run.</p> pub started_by: std::option::Option<std::string::String>, /// <p>The date and time when the job run began.</p> pub started_on: std::option::Option<smithy_types::Instant>, /// <p>Sample configuration for profile jobs only. Determines the number of rows on which the /// profile job will be executed. If a JobSample value is not provided, the default value /// will be used. The default value is CUSTOM_ROWS for the mode parameter and 20000 for the /// size parameter.</p> pub job_sample: std::option::Option<crate::model::JobSample>, } impl std::fmt::Debug for DescribeJobRunOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DescribeJobRunOutput"); formatter.field("attempt", &self.attempt); formatter.field("completed_on", &self.completed_on); formatter.field("dataset_name", &self.dataset_name); formatter.field("error_message", &self.error_message); formatter.field("execution_time", &self.execution_time); formatter.field("job_name", &self.job_name); formatter.field("profile_configuration", &self.profile_configuration); formatter.field("run_id", &self.run_id); formatter.field("state", &self.state); formatter.field("log_subscription", &self.log_subscription); formatter.field("log_group_name", &self.log_group_name); formatter.field("outputs", &self.outputs); formatter.field("data_catalog_outputs", &self.data_catalog_outputs); formatter.field("database_outputs", &self.database_outputs); formatter.field("recipe_reference", &self.recipe_reference); formatter.field("started_by", &self.started_by); formatter.field("started_on", &self.started_on); formatter.field("job_sample", &self.job_sample); formatter.finish() } } /// See [`DescribeJobRunOutput`](crate::output::DescribeJobRunOutput) pub mod describe_job_run_output { /// A builder for [`DescribeJobRunOutput`](crate::output::DescribeJobRunOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) attempt: std::option::Option<i32>, pub(crate) completed_on: std::option::Option<smithy_types::Instant>, pub(crate) dataset_name: std::option::Option<std::string::String>, pub(crate) error_message: std::option::Option<std::string::String>, pub(crate) execution_time: std::option::Option<i32>, pub(crate) job_name: std::option::Option<std::string::String>, pub(crate) profile_configuration: std::option::Option<crate::model::ProfileConfiguration>, pub(crate) run_id: std::option::Option<std::string::String>, pub(crate) state: std::option::Option<crate::model::JobRunState>, pub(crate) log_subscription: std::option::Option<crate::model::LogSubscription>, pub(crate) log_group_name: std::option::Option<std::string::String>, pub(crate) outputs: std::option::Option<std::vec::Vec<crate::model::Output>>, pub(crate) data_catalog_outputs: std::option::Option<std::vec::Vec<crate::model::DataCatalogOutput>>, pub(crate) database_outputs: std::option::Option<std::vec::Vec<crate::model::DatabaseOutput>>, pub(crate) recipe_reference: std::option::Option<crate::model::RecipeReference>, pub(crate) started_by: std::option::Option<std::string::String>, pub(crate) started_on: std::option::Option<smithy_types::Instant>, pub(crate) job_sample: std::option::Option<crate::model::JobSample>, } impl Builder { /// <p>The number of times that DataBrew has attempted to run the job.</p> pub fn attempt(mut self, input: i32) -> Self { self.attempt = Some(input); self } pub fn set_attempt(mut self, input: std::option::Option<i32>) -> Self { self.attempt = input; self } /// <p>The date and time when the job completed processing.</p> pub fn completed_on(mut self, input: smithy_types::Instant) -> Self { self.completed_on = Some(input); self } pub fn set_completed_on( mut self, input: std::option::Option<smithy_types::Instant>, ) -> Self { self.completed_on = input; self } /// <p>The name of the dataset for the job to process.</p> pub fn dataset_name(mut self, input: impl Into<std::string::String>) -> Self { self.dataset_name = Some(input.into()); self } pub fn set_dataset_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.dataset_name = input; self } /// <p>A message indicating an error (if any) that was encountered when the job ran.</p> pub fn error_message(mut self, input: impl Into<std::string::String>) -> Self { self.error_message = Some(input.into()); self } pub fn set_error_message( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.error_message = input; self } /// <p>The amount of time, in seconds, during which the job run consumed resources.</p> pub fn execution_time(mut self, input: i32) -> Self { self.execution_time = Some(input); self } pub fn set_execution_time(mut self, input: std::option::Option<i32>) -> Self { self.execution_time = input; self } /// <p>The name of the job being processed during this run.</p> pub fn job_name(mut self, input: impl Into<std::string::String>) -> Self { self.job_name = Some(input.into()); self } pub fn set_job_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.job_name = input; self } /// <p>Configuration for profile jobs. Used to select columns, do evaluations, /// and override default parameters of evaluations. When configuration is null, the /// profile job will run with default settings.</p> pub fn profile_configuration(mut self, input: crate::model::ProfileConfiguration) -> Self { self.profile_configuration = Some(input); self } pub fn set_profile_configuration( mut self, input: std::option::Option<crate::model::ProfileConfiguration>, ) -> Self { self.profile_configuration = input; self } /// <p>The unique identifier of the job run.</p> pub fn run_id(mut self, input: impl Into<std::string::String>) -> Self { self.run_id = Some(input.into()); self } pub fn set_run_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.run_id = input; self } /// <p>The current state of the job run entity itself.</p> pub fn state(mut self, input: crate::model::JobRunState) -> Self { self.state = Some(input); self } pub fn set_state(mut self, input: std::option::Option<crate::model::JobRunState>) -> Self { self.state = input; self } /// <p>The current status of Amazon CloudWatch logging for the job run.</p> pub fn log_subscription(mut self, input: crate::model::LogSubscription) -> Self { self.log_subscription = Some(input); self } pub fn set_log_subscription( mut self, input: std::option::Option<crate::model::LogSubscription>, ) -> Self { self.log_subscription = input; self } /// <p>The name of an Amazon CloudWatch log group, where the job writes diagnostic messages /// when it runs.</p> pub fn log_group_name(mut self, input: impl Into<std::string::String>) -> Self { self.log_group_name = Some(input.into()); self } pub fn set_log_group_name( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.log_group_name = input; self } pub fn outputs(mut self, input: impl Into<crate::model::Output>) -> Self { let mut v = self.outputs.unwrap_or_default(); v.push(input.into()); self.outputs = Some(v); self } pub fn set_outputs( mut self, input: std::option::Option<std::vec::Vec<crate::model::Output>>, ) -> Self { self.outputs = input; self } pub fn data_catalog_outputs( mut self, input: impl Into<crate::model::DataCatalogOutput>, ) -> Self { let mut v = self.data_catalog_outputs.unwrap_or_default(); v.push(input.into()); self.data_catalog_outputs = Some(v); self } pub fn set_data_catalog_outputs( mut self, input: std::option::Option<std::vec::Vec<crate::model::DataCatalogOutput>>, ) -> Self { self.data_catalog_outputs = input; self } pub fn database_outputs(mut self, input: impl Into<crate::model::DatabaseOutput>) -> Self { let mut v = self.database_outputs.unwrap_or_default(); v.push(input.into()); self.database_outputs = Some(v); self } pub fn set_database_outputs( mut self, input: std::option::Option<std::vec::Vec<crate::model::DatabaseOutput>>, ) -> Self { self.database_outputs = input; self } /// <p>Represents the name and version of a DataBrew recipe.</p> pub fn recipe_reference(mut self, input: crate::model::RecipeReference) -> Self { self.recipe_reference = Some(input); self } pub fn set_recipe_reference( mut self, input: std::option::Option<crate::model::RecipeReference>, ) -> Self { self.recipe_reference = input; self } /// <p>The Amazon Resource Name (ARN) of the user who started the job run.</p> pub fn started_by(mut self, input: impl Into<std::string::String>) -> Self { self.started_by = Some(input.into()); self } pub fn set_started_by(mut self, input: std::option::Option<std::string::String>) -> Self { self.started_by = input; self } /// <p>The date and time when the job run began.</p> pub fn started_on(mut self, input: smithy_types::Instant) -> Self { self.started_on = Some(input); self } pub fn set_started_on(mut self, input: std::option::Option<smithy_types::Instant>) -> Self { self.started_on = input; self } /// <p>Sample configuration for profile jobs only. Determines the number of rows on which the /// profile job will be executed. If a JobSample value is not provided, the default value /// will be used. The default value is CUSTOM_ROWS for the mode parameter and 20000 for the /// size parameter.</p> pub fn job_sample(mut self, input: crate::model::JobSample) -> Self { self.job_sample = Some(input); self } pub fn set_job_sample( mut self, input: std::option::Option<crate::model::JobSample>, ) -> Self { self.job_sample = input; self } /// Consumes the builder and constructs a [`DescribeJobRunOutput`](crate::output::DescribeJobRunOutput) pub fn build(self) -> crate::output::DescribeJobRunOutput { crate::output::DescribeJobRunOutput { attempt: self.attempt.unwrap_or_default(), completed_on: self.completed_on, dataset_name: self.dataset_name, error_message: self.error_message, execution_time: self.execution_time.unwrap_or_default(), job_name: self.job_name, profile_configuration: self.profile_configuration, run_id: self.run_id, state: self.state, log_subscription: self.log_subscription, log_group_name: self.log_group_name, outputs: self.outputs, data_catalog_outputs: self.data_catalog_outputs, database_outputs: self.database_outputs, recipe_reference: self.recipe_reference, started_by: self.started_by, started_on: self.started_on, job_sample: self.job_sample, } } } } impl DescribeJobRunOutput { /// Creates a new builder-style object to manufacture [`DescribeJobRunOutput`](crate::output::DescribeJobRunOutput) pub fn builder() -> crate::output::describe_job_run_output::Builder { crate::output::describe_job_run_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DescribeJobOutput { /// <p>The date and time that the job was created.</p> pub create_date: std::option::Option<smithy_types::Instant>, /// <p>The identifier (user name) of the user associated with the creation of the job.</p> pub created_by: std::option::Option<std::string::String>, /// <p>The dataset that the job acts upon.</p> pub dataset_name: std::option::Option<std::string::String>, /// <p>The Amazon Resource Name (ARN) of an encryption key that is used to protect the /// job.</p> pub encryption_key_arn: std::option::Option<std::string::String>, /// <p>The encryption mode for the job, which can be one of the following:</p> /// <ul> /// <li> /// <p> /// <code>SSE-KMS</code> - Server-side encryption with keys managed by KMS.</p> /// </li> /// <li> /// <p> /// <code>SSE-S3</code> - Server-side encryption with keys managed by Amazon /// S3.</p> /// </li> /// </ul> pub encryption_mode: std::option::Option<crate::model::EncryptionMode>, /// <p>The name of the job.</p> pub name: std::option::Option<std::string::String>, /// <p>The job type, which must be one of the following:</p> /// <ul> /// <li> /// <p> /// <code>PROFILE</code> - The job analyzes the dataset to determine its size, /// data types, data distribution, and more.</p> /// </li> /// <li> /// <p> /// <code>RECIPE</code> - The job applies one or more transformations to a /// dataset.</p> /// </li> /// </ul> pub r#type: std::option::Option<crate::model::JobType>, /// <p>The identifier (user name) of the user who last modified the job.</p> pub last_modified_by: std::option::Option<std::string::String>, /// <p>The date and time that the job was last modified.</p> pub last_modified_date: std::option::Option<smithy_types::Instant>, /// <p>Indicates whether Amazon CloudWatch logging is enabled for this job.</p> pub log_subscription: std::option::Option<crate::model::LogSubscription>, /// <p>The maximum number of compute nodes that DataBrew can consume when the job processes /// data.</p> pub max_capacity: i32, /// <p>The maximum number of times to retry the job after a job run fails.</p> pub max_retries: i32, /// <p>One or more artifacts that represent the output from running the job.</p> pub outputs: std::option::Option<std::vec::Vec<crate::model::Output>>, /// <p>One or more artifacts that represent the Glue Data Catalog output from running the job.</p> pub data_catalog_outputs: std::option::Option<std::vec::Vec<crate::model::DataCatalogOutput>>, /// <p>Represents a list of JDBC database output objects which defines the output /// destination for a DataBrew recipe job to write into.</p> pub database_outputs: std::option::Option<std::vec::Vec<crate::model::DatabaseOutput>>, /// <p>The DataBrew project associated with this job.</p> pub project_name: std::option::Option<std::string::String>, /// <p>Configuration for profile jobs. Used to select columns, do evaluations, /// and override default parameters of evaluations. When configuration is null, the /// profile job will run with default settings.</p> pub profile_configuration: std::option::Option<crate::model::ProfileConfiguration>, /// <p>Represents the name and version of a DataBrew recipe.</p> pub recipe_reference: std::option::Option<crate::model::RecipeReference>, /// <p>The Amazon Resource Name (ARN) of the job.</p> pub resource_arn: std::option::Option<std::string::String>, /// <p>The ARN of the Identity and Access Management (IAM) role to be assumed when /// DataBrew runs the job.</p> pub role_arn: std::option::Option<std::string::String>, /// <p>Metadata tags associated with this job.</p> pub tags: std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>, /// <p>The job's timeout in minutes. A job that attempts to run longer than this timeout /// period ends with a status of <code>TIMEOUT</code>.</p> pub timeout: i32, /// <p>Sample configuration for profile jobs only. Determines the number of rows on which the /// profile job will be executed.</p> pub job_sample: std::option::Option<crate::model::JobSample>, } impl std::fmt::Debug for DescribeJobOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DescribeJobOutput"); formatter.field("create_date", &self.create_date); formatter.field("created_by", &self.created_by); formatter.field("dataset_name", &self.dataset_name); formatter.field("encryption_key_arn", &self.encryption_key_arn); formatter.field("encryption_mode", &self.encryption_mode); formatter.field("name", &self.name); formatter.field("r#type", &self.r#type); formatter.field("last_modified_by", &self.last_modified_by); formatter.field("last_modified_date", &self.last_modified_date); formatter.field("log_subscription", &self.log_subscription); formatter.field("max_capacity", &self.max_capacity); formatter.field("max_retries", &self.max_retries); formatter.field("outputs", &self.outputs); formatter.field("data_catalog_outputs", &self.data_catalog_outputs); formatter.field("database_outputs", &self.database_outputs); formatter.field("project_name", &self.project_name); formatter.field("profile_configuration", &self.profile_configuration); formatter.field("recipe_reference", &self.recipe_reference); formatter.field("resource_arn", &self.resource_arn); formatter.field("role_arn", &self.role_arn); formatter.field("tags", &self.tags); formatter.field("timeout", &self.timeout); formatter.field("job_sample", &self.job_sample); formatter.finish() } } /// See [`DescribeJobOutput`](crate::output::DescribeJobOutput) pub mod describe_job_output { /// A builder for [`DescribeJobOutput`](crate::output::DescribeJobOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) create_date: std::option::Option<smithy_types::Instant>, pub(crate) created_by: std::option::Option<std::string::String>, pub(crate) dataset_name: std::option::Option<std::string::String>, pub(crate) encryption_key_arn: std::option::Option<std::string::String>, pub(crate) encryption_mode: std::option::Option<crate::model::EncryptionMode>, pub(crate) name: std::option::Option<std::string::String>, pub(crate) r#type: std::option::Option<crate::model::JobType>, pub(crate) last_modified_by: std::option::Option<std::string::String>, pub(crate) last_modified_date: std::option::Option<smithy_types::Instant>, pub(crate) log_subscription: std::option::Option<crate::model::LogSubscription>, pub(crate) max_capacity: std::option::Option<i32>, pub(crate) max_retries: std::option::Option<i32>, pub(crate) outputs: std::option::Option<std::vec::Vec<crate::model::Output>>, pub(crate) data_catalog_outputs: std::option::Option<std::vec::Vec<crate::model::DataCatalogOutput>>, pub(crate) database_outputs: std::option::Option<std::vec::Vec<crate::model::DatabaseOutput>>, pub(crate) project_name: std::option::Option<std::string::String>, pub(crate) profile_configuration: std::option::Option<crate::model::ProfileConfiguration>, pub(crate) recipe_reference: std::option::Option<crate::model::RecipeReference>, pub(crate) resource_arn: std::option::Option<std::string::String>, pub(crate) role_arn: std::option::Option<std::string::String>, pub(crate) tags: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, pub(crate) timeout: std::option::Option<i32>, pub(crate) job_sample: std::option::Option<crate::model::JobSample>, } impl Builder { /// <p>The date and time that the job was created.</p> pub fn create_date(mut self, input: smithy_types::Instant) -> Self { self.create_date = Some(input); self } pub fn set_create_date( mut self, input: std::option::Option<smithy_types::Instant>, ) -> Self { self.create_date = input; self } /// <p>The identifier (user name) of the user associated with the creation of the job.</p> pub fn created_by(mut self, input: impl Into<std::string::String>) -> Self { self.created_by = Some(input.into()); self } pub fn set_created_by(mut self, input: std::option::Option<std::string::String>) -> Self { self.created_by = input; self } /// <p>The dataset that the job acts upon.</p> pub fn dataset_name(mut self, input: impl Into<std::string::String>) -> Self { self.dataset_name = Some(input.into()); self } pub fn set_dataset_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.dataset_name = input; self } /// <p>The Amazon Resource Name (ARN) of an encryption key that is used to protect the /// job.</p> pub fn encryption_key_arn(mut self, input: impl Into<std::string::String>) -> Self { self.encryption_key_arn = Some(input.into()); self } pub fn set_encryption_key_arn( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.encryption_key_arn = input; self } /// <p>The encryption mode for the job, which can be one of the following:</p> /// <ul> /// <li> /// <p> /// <code>SSE-KMS</code> - Server-side encryption with keys managed by KMS.</p> /// </li> /// <li> /// <p> /// <code>SSE-S3</code> - Server-side encryption with keys managed by Amazon /// S3.</p> /// </li> /// </ul> pub fn encryption_mode(mut self, input: crate::model::EncryptionMode) -> Self { self.encryption_mode = Some(input); self } pub fn set_encryption_mode( mut self, input: std::option::Option<crate::model::EncryptionMode>, ) -> Self { self.encryption_mode = input; self } /// <p>The name of the job.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// <p>The job type, which must be one of the following:</p> /// <ul> /// <li> /// <p> /// <code>PROFILE</code> - The job analyzes the dataset to determine its size, /// data types, data distribution, and more.</p> /// </li> /// <li> /// <p> /// <code>RECIPE</code> - The job applies one or more transformations to a /// dataset.</p> /// </li> /// </ul> pub fn r#type(mut self, input: crate::model::JobType) -> Self { self.r#type = Some(input); self } pub fn set_type(mut self, input: std::option::Option<crate::model::JobType>) -> Self { self.r#type = input; self } /// <p>The identifier (user name) of the user who last modified the job.</p> pub fn last_modified_by(mut self, input: impl Into<std::string::String>) -> Self { self.last_modified_by = Some(input.into()); self } pub fn set_last_modified_by( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.last_modified_by = input; self } /// <p>The date and time that the job was last modified.</p> pub fn last_modified_date(mut self, input: smithy_types::Instant) -> Self { self.last_modified_date = Some(input); self } pub fn set_last_modified_date( mut self, input: std::option::Option<smithy_types::Instant>, ) -> Self { self.last_modified_date = input; self } /// <p>Indicates whether Amazon CloudWatch logging is enabled for this job.</p> pub fn log_subscription(mut self, input: crate::model::LogSubscription) -> Self { self.log_subscription = Some(input); self } pub fn set_log_subscription( mut self, input: std::option::Option<crate::model::LogSubscription>, ) -> Self { self.log_subscription = input; self } /// <p>The maximum number of compute nodes that DataBrew can consume when the job processes /// data.</p> pub fn max_capacity(mut self, input: i32) -> Self { self.max_capacity = Some(input); self } pub fn set_max_capacity(mut self, input: std::option::Option<i32>) -> Self { self.max_capacity = input; self } /// <p>The maximum number of times to retry the job after a job run fails.</p> pub fn max_retries(mut self, input: i32) -> Self { self.max_retries = Some(input); self } pub fn set_max_retries(mut self, input: std::option::Option<i32>) -> Self { self.max_retries = input; self } pub fn outputs(mut self, input: impl Into<crate::model::Output>) -> Self { let mut v = self.outputs.unwrap_or_default(); v.push(input.into()); self.outputs = Some(v); self } pub fn set_outputs( mut self, input: std::option::Option<std::vec::Vec<crate::model::Output>>, ) -> Self { self.outputs = input; self } pub fn data_catalog_outputs( mut self, input: impl Into<crate::model::DataCatalogOutput>, ) -> Self { let mut v = self.data_catalog_outputs.unwrap_or_default(); v.push(input.into()); self.data_catalog_outputs = Some(v); self } pub fn set_data_catalog_outputs( mut self, input: std::option::Option<std::vec::Vec<crate::model::DataCatalogOutput>>, ) -> Self { self.data_catalog_outputs = input; self } pub fn database_outputs(mut self, input: impl Into<crate::model::DatabaseOutput>) -> Self { let mut v = self.database_outputs.unwrap_or_default(); v.push(input.into()); self.database_outputs = Some(v); self } pub fn set_database_outputs( mut self, input: std::option::Option<std::vec::Vec<crate::model::DatabaseOutput>>, ) -> Self { self.database_outputs = input; self } /// <p>The DataBrew project associated with this job.</p> pub fn project_name(mut self, input: impl Into<std::string::String>) -> Self { self.project_name = Some(input.into()); self } pub fn set_project_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.project_name = input; self } /// <p>Configuration for profile jobs. Used to select columns, do evaluations, /// and override default parameters of evaluations. When configuration is null, the /// profile job will run with default settings.</p> pub fn profile_configuration(mut self, input: crate::model::ProfileConfiguration) -> Self { self.profile_configuration = Some(input); self } pub fn set_profile_configuration( mut self, input: std::option::Option<crate::model::ProfileConfiguration>, ) -> Self { self.profile_configuration = input; self } /// <p>Represents the name and version of a DataBrew recipe.</p> pub fn recipe_reference(mut self, input: crate::model::RecipeReference) -> Self { self.recipe_reference = Some(input); self } pub fn set_recipe_reference( mut self, input: std::option::Option<crate::model::RecipeReference>, ) -> Self { self.recipe_reference = input; self } /// <p>The Amazon Resource Name (ARN) of the job.</p> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.resource_arn = Some(input.into()); self } pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.resource_arn = input; self } /// <p>The ARN of the Identity and Access Management (IAM) role to be assumed when /// DataBrew runs the job.</p> pub fn role_arn(mut self, input: impl Into<std::string::String>) -> Self { self.role_arn = Some(input.into()); self } pub fn set_role_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.role_arn = input; self } pub fn tags( mut self, k: impl Into<std::string::String>, v: impl Into<std::string::String>, ) -> Self { let mut hash_map = self.tags.unwrap_or_default(); hash_map.insert(k.into(), v.into()); self.tags = Some(hash_map); self } pub fn set_tags( mut self, input: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, ) -> Self { self.tags = input; self } /// <p>The job's timeout in minutes. A job that attempts to run longer than this timeout /// period ends with a status of <code>TIMEOUT</code>.</p> pub fn timeout(mut self, input: i32) -> Self { self.timeout = Some(input); self } pub fn set_timeout(mut self, input: std::option::Option<i32>) -> Self { self.timeout = input; self } /// <p>Sample configuration for profile jobs only. Determines the number of rows on which the /// profile job will be executed.</p> pub fn job_sample(mut self, input: crate::model::JobSample) -> Self { self.job_sample = Some(input); self } pub fn set_job_sample( mut self, input: std::option::Option<crate::model::JobSample>, ) -> Self { self.job_sample = input; self } /// Consumes the builder and constructs a [`DescribeJobOutput`](crate::output::DescribeJobOutput) pub fn build(self) -> crate::output::DescribeJobOutput { crate::output::DescribeJobOutput { create_date: self.create_date, created_by: self.created_by, dataset_name: self.dataset_name, encryption_key_arn: self.encryption_key_arn, encryption_mode: self.encryption_mode, name: self.name, r#type: self.r#type, last_modified_by: self.last_modified_by, last_modified_date: self.last_modified_date, log_subscription: self.log_subscription, max_capacity: self.max_capacity.unwrap_or_default(), max_retries: self.max_retries.unwrap_or_default(), outputs: self.outputs, data_catalog_outputs: self.data_catalog_outputs, database_outputs: self.database_outputs, project_name: self.project_name, profile_configuration: self.profile_configuration, recipe_reference: self.recipe_reference, resource_arn: self.resource_arn, role_arn: self.role_arn, tags: self.tags, timeout: self.timeout.unwrap_or_default(), job_sample: self.job_sample, } } } } impl DescribeJobOutput { /// Creates a new builder-style object to manufacture [`DescribeJobOutput`](crate::output::DescribeJobOutput) pub fn builder() -> crate::output::describe_job_output::Builder { crate::output::describe_job_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DescribeDatasetOutput { /// <p>The identifier (user name) of the user who created the dataset.</p> pub created_by: std::option::Option<std::string::String>, /// <p>The date and time that the dataset was created.</p> pub create_date: std::option::Option<smithy_types::Instant>, /// <p>The name of the dataset.</p> pub name: std::option::Option<std::string::String>, /// <p>The file format of a dataset that is created from an Amazon S3 file or folder.</p> pub format: std::option::Option<crate::model::InputFormat>, /// <p>Represents a set of options that define the structure of either comma-separated value (CSV), /// Excel, or JSON input.</p> pub format_options: std::option::Option<crate::model::FormatOptions>, /// <p>Represents information on how DataBrew can find data, in either the Glue Data Catalog or /// Amazon S3.</p> pub input: std::option::Option<crate::model::Input>, /// <p>The date and time that the dataset was last modified.</p> pub last_modified_date: std::option::Option<smithy_types::Instant>, /// <p>The identifier (user name) of the user who last modified the dataset.</p> pub last_modified_by: std::option::Option<std::string::String>, /// <p>The location of the data for this dataset, Amazon S3 or the Glue Data Catalog.</p> pub source: std::option::Option<crate::model::Source>, /// <p>A set of options that defines how DataBrew interprets an Amazon S3 path of the dataset.</p> pub path_options: std::option::Option<crate::model::PathOptions>, /// <p>Metadata tags associated with this dataset.</p> pub tags: std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>, /// <p>The Amazon Resource Name (ARN) of the dataset.</p> pub resource_arn: std::option::Option<std::string::String>, } impl std::fmt::Debug for DescribeDatasetOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DescribeDatasetOutput"); formatter.field("created_by", &self.created_by); formatter.field("create_date", &self.create_date); formatter.field("name", &self.name); formatter.field("format", &self.format); formatter.field("format_options", &self.format_options); formatter.field("input", &self.input); formatter.field("last_modified_date", &self.last_modified_date); formatter.field("last_modified_by", &self.last_modified_by); formatter.field("source", &self.source); formatter.field("path_options", &self.path_options); formatter.field("tags", &self.tags); formatter.field("resource_arn", &self.resource_arn); formatter.finish() } } /// See [`DescribeDatasetOutput`](crate::output::DescribeDatasetOutput) pub mod describe_dataset_output { /// A builder for [`DescribeDatasetOutput`](crate::output::DescribeDatasetOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) created_by: std::option::Option<std::string::String>, pub(crate) create_date: std::option::Option<smithy_types::Instant>, pub(crate) name: std::option::Option<std::string::String>, pub(crate) format: std::option::Option<crate::model::InputFormat>, pub(crate) format_options: std::option::Option<crate::model::FormatOptions>, pub(crate) input: std::option::Option<crate::model::Input>, pub(crate) last_modified_date: std::option::Option<smithy_types::Instant>, pub(crate) last_modified_by: std::option::Option<std::string::String>, pub(crate) source: std::option::Option<crate::model::Source>, pub(crate) path_options: std::option::Option<crate::model::PathOptions>, pub(crate) tags: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, pub(crate) resource_arn: std::option::Option<std::string::String>, } impl Builder { /// <p>The identifier (user name) of the user who created the dataset.</p> pub fn created_by(mut self, input: impl Into<std::string::String>) -> Self { self.created_by = Some(input.into()); self } pub fn set_created_by(mut self, input: std::option::Option<std::string::String>) -> Self { self.created_by = input; self } /// <p>The date and time that the dataset was created.</p> pub fn create_date(mut self, input: smithy_types::Instant) -> Self { self.create_date = Some(input); self } pub fn set_create_date( mut self, input: std::option::Option<smithy_types::Instant>, ) -> Self { self.create_date = input; self } /// <p>The name of the dataset.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// <p>The file format of a dataset that is created from an Amazon S3 file or folder.</p> pub fn format(mut self, input: crate::model::InputFormat) -> Self { self.format = Some(input); self } pub fn set_format(mut self, input: std::option::Option<crate::model::InputFormat>) -> Self { self.format = input; self } /// <p>Represents a set of options that define the structure of either comma-separated value (CSV), /// Excel, or JSON input.</p> pub fn format_options(mut self, input: crate::model::FormatOptions) -> Self { self.format_options = Some(input); self } pub fn set_format_options( mut self, input: std::option::Option<crate::model::FormatOptions>, ) -> Self { self.format_options = input; self } /// <p>Represents information on how DataBrew can find data, in either the Glue Data Catalog or /// Amazon S3.</p> pub fn input(mut self, input: crate::model::Input) -> Self { self.input = Some(input); self } pub fn set_input(mut self, input: std::option::Option<crate::model::Input>) -> Self { self.input = input; self } /// <p>The date and time that the dataset was last modified.</p> pub fn last_modified_date(mut self, input: smithy_types::Instant) -> Self { self.last_modified_date = Some(input); self } pub fn set_last_modified_date( mut self, input: std::option::Option<smithy_types::Instant>, ) -> Self { self.last_modified_date = input; self } /// <p>The identifier (user name) of the user who last modified the dataset.</p> pub fn last_modified_by(mut self, input: impl Into<std::string::String>) -> Self { self.last_modified_by = Some(input.into()); self } pub fn set_last_modified_by( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.last_modified_by = input; self } /// <p>The location of the data for this dataset, Amazon S3 or the Glue Data Catalog.</p> pub fn source(mut self, input: crate::model::Source) -> Self { self.source = Some(input); self } pub fn set_source(mut self, input: std::option::Option<crate::model::Source>) -> Self { self.source = input; self } /// <p>A set of options that defines how DataBrew interprets an Amazon S3 path of the dataset.</p> pub fn path_options(mut self, input: crate::model::PathOptions) -> Self { self.path_options = Some(input); self } pub fn set_path_options( mut self, input: std::option::Option<crate::model::PathOptions>, ) -> Self { self.path_options = input; self } pub fn tags( mut self, k: impl Into<std::string::String>, v: impl Into<std::string::String>, ) -> Self { let mut hash_map = self.tags.unwrap_or_default(); hash_map.insert(k.into(), v.into()); self.tags = Some(hash_map); self } pub fn set_tags( mut self, input: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, ) -> Self { self.tags = input; self } /// <p>The Amazon Resource Name (ARN) of the dataset.</p> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.resource_arn = Some(input.into()); self } pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.resource_arn = input; self } /// Consumes the builder and constructs a [`DescribeDatasetOutput`](crate::output::DescribeDatasetOutput) pub fn build(self) -> crate::output::DescribeDatasetOutput { crate::output::DescribeDatasetOutput { created_by: self.created_by, create_date: self.create_date, name: self.name, format: self.format, format_options: self.format_options, input: self.input, last_modified_date: self.last_modified_date, last_modified_by: self.last_modified_by, source: self.source, path_options: self.path_options, tags: self.tags, resource_arn: self.resource_arn, } } } } impl DescribeDatasetOutput { /// Creates a new builder-style object to manufacture [`DescribeDatasetOutput`](crate::output::DescribeDatasetOutput) pub fn builder() -> crate::output::describe_dataset_output::Builder { crate::output::describe_dataset_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DeleteScheduleOutput { /// <p>The name of the schedule that was deleted.</p> pub name: std::option::Option<std::string::String>, } impl std::fmt::Debug for DeleteScheduleOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DeleteScheduleOutput"); formatter.field("name", &self.name); formatter.finish() } } /// See [`DeleteScheduleOutput`](crate::output::DeleteScheduleOutput) pub mod delete_schedule_output { /// A builder for [`DeleteScheduleOutput`](crate::output::DeleteScheduleOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) name: std::option::Option<std::string::String>, } impl Builder { /// <p>The name of the schedule that was deleted.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// Consumes the builder and constructs a [`DeleteScheduleOutput`](crate::output::DeleteScheduleOutput) pub fn build(self) -> crate::output::DeleteScheduleOutput { crate::output::DeleteScheduleOutput { name: self.name } } } } impl DeleteScheduleOutput { /// Creates a new builder-style object to manufacture [`DeleteScheduleOutput`](crate::output::DeleteScheduleOutput) pub fn builder() -> crate::output::delete_schedule_output::Builder { crate::output::delete_schedule_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DeleteRecipeVersionOutput { /// <p>The name of the recipe that was deleted.</p> pub name: std::option::Option<std::string::String>, /// <p>The version of the recipe that was deleted.</p> pub recipe_version: std::option::Option<std::string::String>, } impl std::fmt::Debug for DeleteRecipeVersionOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DeleteRecipeVersionOutput"); formatter.field("name", &self.name); formatter.field("recipe_version", &self.recipe_version); formatter.finish() } } /// See [`DeleteRecipeVersionOutput`](crate::output::DeleteRecipeVersionOutput) pub mod delete_recipe_version_output { /// A builder for [`DeleteRecipeVersionOutput`](crate::output::DeleteRecipeVersionOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) name: std::option::Option<std::string::String>, pub(crate) recipe_version: std::option::Option<std::string::String>, } impl Builder { /// <p>The name of the recipe that was deleted.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// <p>The version of the recipe that was deleted.</p> pub fn recipe_version(mut self, input: impl Into<std::string::String>) -> Self { self.recipe_version = Some(input.into()); self } pub fn set_recipe_version( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.recipe_version = input; self } /// Consumes the builder and constructs a [`DeleteRecipeVersionOutput`](crate::output::DeleteRecipeVersionOutput) pub fn build(self) -> crate::output::DeleteRecipeVersionOutput { crate::output::DeleteRecipeVersionOutput { name: self.name, recipe_version: self.recipe_version, } } } } impl DeleteRecipeVersionOutput { /// Creates a new builder-style object to manufacture [`DeleteRecipeVersionOutput`](crate::output::DeleteRecipeVersionOutput) pub fn builder() -> crate::output::delete_recipe_version_output::Builder { crate::output::delete_recipe_version_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DeleteProjectOutput { /// <p>The name of the project that you deleted.</p> pub name: std::option::Option<std::string::String>, } impl std::fmt::Debug for DeleteProjectOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DeleteProjectOutput"); formatter.field("name", &self.name); formatter.finish() } } /// See [`DeleteProjectOutput`](crate::output::DeleteProjectOutput) pub mod delete_project_output { /// A builder for [`DeleteProjectOutput`](crate::output::DeleteProjectOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) name: std::option::Option<std::string::String>, } impl Builder { /// <p>The name of the project that you deleted.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// Consumes the builder and constructs a [`DeleteProjectOutput`](crate::output::DeleteProjectOutput) pub fn build(self) -> crate::output::DeleteProjectOutput { crate::output::DeleteProjectOutput { name: self.name } } } } impl DeleteProjectOutput { /// Creates a new builder-style object to manufacture [`DeleteProjectOutput`](crate::output::DeleteProjectOutput) pub fn builder() -> crate::output::delete_project_output::Builder { crate::output::delete_project_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DeleteJobOutput { /// <p>The name of the job that you deleted.</p> pub name: std::option::Option<std::string::String>, } impl std::fmt::Debug for DeleteJobOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DeleteJobOutput"); formatter.field("name", &self.name); formatter.finish() } } /// See [`DeleteJobOutput`](crate::output::DeleteJobOutput) pub mod delete_job_output { /// A builder for [`DeleteJobOutput`](crate::output::DeleteJobOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) name: std::option::Option<std::string::String>, } impl Builder { /// <p>The name of the job that you deleted.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// Consumes the builder and constructs a [`DeleteJobOutput`](crate::output::DeleteJobOutput) pub fn build(self) -> crate::output::DeleteJobOutput { crate::output::DeleteJobOutput { name: self.name } } } } impl DeleteJobOutput { /// Creates a new builder-style object to manufacture [`DeleteJobOutput`](crate::output::DeleteJobOutput) pub fn builder() -> crate::output::delete_job_output::Builder { crate::output::delete_job_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DeleteDatasetOutput { /// <p>The name of the dataset that you deleted.</p> pub name: std::option::Option<std::string::String>, } impl std::fmt::Debug for DeleteDatasetOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DeleteDatasetOutput"); formatter.field("name", &self.name); formatter.finish() } } /// See [`DeleteDatasetOutput`](crate::output::DeleteDatasetOutput) pub mod delete_dataset_output { /// A builder for [`DeleteDatasetOutput`](crate::output::DeleteDatasetOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) name: std::option::Option<std::string::String>, } impl Builder { /// <p>The name of the dataset that you deleted.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// Consumes the builder and constructs a [`DeleteDatasetOutput`](crate::output::DeleteDatasetOutput) pub fn build(self) -> crate::output::DeleteDatasetOutput { crate::output::DeleteDatasetOutput { name: self.name } } } } impl DeleteDatasetOutput { /// Creates a new builder-style object to manufacture [`DeleteDatasetOutput`](crate::output::DeleteDatasetOutput) pub fn builder() -> crate::output::delete_dataset_output::Builder { crate::output::delete_dataset_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CreateScheduleOutput { /// <p>The name of the schedule that was created.</p> pub name: std::option::Option<std::string::String>, } impl std::fmt::Debug for CreateScheduleOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CreateScheduleOutput"); formatter.field("name", &self.name); formatter.finish() } } /// See [`CreateScheduleOutput`](crate::output::CreateScheduleOutput) pub mod create_schedule_output { /// A builder for [`CreateScheduleOutput`](crate::output::CreateScheduleOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) name: std::option::Option<std::string::String>, } impl Builder { /// <p>The name of the schedule that was created.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// Consumes the builder and constructs a [`CreateScheduleOutput`](crate::output::CreateScheduleOutput) pub fn build(self) -> crate::output::CreateScheduleOutput { crate::output::CreateScheduleOutput { name: self.name } } } } impl CreateScheduleOutput { /// Creates a new builder-style object to manufacture [`CreateScheduleOutput`](crate::output::CreateScheduleOutput) pub fn builder() -> crate::output::create_schedule_output::Builder { crate::output::create_schedule_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CreateRecipeJobOutput { /// <p>The name of the job that you created.</p> pub name: std::option::Option<std::string::String>, } impl std::fmt::Debug for CreateRecipeJobOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CreateRecipeJobOutput"); formatter.field("name", &self.name); formatter.finish() } } /// See [`CreateRecipeJobOutput`](crate::output::CreateRecipeJobOutput) pub mod create_recipe_job_output { /// A builder for [`CreateRecipeJobOutput`](crate::output::CreateRecipeJobOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) name: std::option::Option<std::string::String>, } impl Builder { /// <p>The name of the job that you created.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// Consumes the builder and constructs a [`CreateRecipeJobOutput`](crate::output::CreateRecipeJobOutput) pub fn build(self) -> crate::output::CreateRecipeJobOutput { crate::output::CreateRecipeJobOutput { name: self.name } } } } impl CreateRecipeJobOutput { /// Creates a new builder-style object to manufacture [`CreateRecipeJobOutput`](crate::output::CreateRecipeJobOutput) pub fn builder() -> crate::output::create_recipe_job_output::Builder { crate::output::create_recipe_job_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CreateRecipeOutput { /// <p>The name of the recipe that you created.</p> pub name: std::option::Option<std::string::String>, } impl std::fmt::Debug for CreateRecipeOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CreateRecipeOutput"); formatter.field("name", &self.name); formatter.finish() } } /// See [`CreateRecipeOutput`](crate::output::CreateRecipeOutput) pub mod create_recipe_output { /// A builder for [`CreateRecipeOutput`](crate::output::CreateRecipeOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) name: std::option::Option<std::string::String>, } impl Builder { /// <p>The name of the recipe that you created.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// Consumes the builder and constructs a [`CreateRecipeOutput`](crate::output::CreateRecipeOutput) pub fn build(self) -> crate::output::CreateRecipeOutput { crate::output::CreateRecipeOutput { name: self.name } } } } impl CreateRecipeOutput { /// Creates a new builder-style object to manufacture [`CreateRecipeOutput`](crate::output::CreateRecipeOutput) pub fn builder() -> crate::output::create_recipe_output::Builder { crate::output::create_recipe_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CreateProjectOutput { /// <p>The name of the project that you created.</p> pub name: std::option::Option<std::string::String>, } impl std::fmt::Debug for CreateProjectOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CreateProjectOutput"); formatter.field("name", &self.name); formatter.finish() } } /// See [`CreateProjectOutput`](crate::output::CreateProjectOutput) pub mod create_project_output { /// A builder for [`CreateProjectOutput`](crate::output::CreateProjectOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) name: std::option::Option<std::string::String>, } impl Builder { /// <p>The name of the project that you created.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// Consumes the builder and constructs a [`CreateProjectOutput`](crate::output::CreateProjectOutput) pub fn build(self) -> crate::output::CreateProjectOutput { crate::output::CreateProjectOutput { name: self.name } } } } impl CreateProjectOutput { /// Creates a new builder-style object to manufacture [`CreateProjectOutput`](crate::output::CreateProjectOutput) pub fn builder() -> crate::output::create_project_output::Builder { crate::output::create_project_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CreateProfileJobOutput { /// <p>The name of the job that was created.</p> pub name: std::option::Option<std::string::String>, } impl std::fmt::Debug for CreateProfileJobOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CreateProfileJobOutput"); formatter.field("name", &self.name); formatter.finish() } } /// See [`CreateProfileJobOutput`](crate::output::CreateProfileJobOutput) pub mod create_profile_job_output { /// A builder for [`CreateProfileJobOutput`](crate::output::CreateProfileJobOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) name: std::option::Option<std::string::String>, } impl Builder { /// <p>The name of the job that was created.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// Consumes the builder and constructs a [`CreateProfileJobOutput`](crate::output::CreateProfileJobOutput) pub fn build(self) -> crate::output::CreateProfileJobOutput { crate::output::CreateProfileJobOutput { name: self.name } } } } impl CreateProfileJobOutput { /// Creates a new builder-style object to manufacture [`CreateProfileJobOutput`](crate::output::CreateProfileJobOutput) pub fn builder() -> crate::output::create_profile_job_output::Builder { crate::output::create_profile_job_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CreateDatasetOutput { /// <p>The name of the dataset that you created.</p> pub name: std::option::Option<std::string::String>, } impl std::fmt::Debug for CreateDatasetOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CreateDatasetOutput"); formatter.field("name", &self.name); formatter.finish() } } /// See [`CreateDatasetOutput`](crate::output::CreateDatasetOutput) pub mod create_dataset_output { /// A builder for [`CreateDatasetOutput`](crate::output::CreateDatasetOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) name: std::option::Option<std::string::String>, } impl Builder { /// <p>The name of the dataset that you created.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// Consumes the builder and constructs a [`CreateDatasetOutput`](crate::output::CreateDatasetOutput) pub fn build(self) -> crate::output::CreateDatasetOutput { crate::output::CreateDatasetOutput { name: self.name } } } } impl CreateDatasetOutput { /// Creates a new builder-style object to manufacture [`CreateDatasetOutput`](crate::output::CreateDatasetOutput) pub fn builder() -> crate::output::create_dataset_output::Builder { crate::output::create_dataset_output::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct BatchDeleteRecipeVersionOutput { /// <p>The name of the recipe that was modified.</p> pub name: std::option::Option<std::string::String>, /// <p>Errors, if any, that occurred while attempting to delete the recipe versions.</p> pub errors: std::option::Option<std::vec::Vec<crate::model::RecipeVersionErrorDetail>>, } impl std::fmt::Debug for BatchDeleteRecipeVersionOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("BatchDeleteRecipeVersionOutput"); formatter.field("name", &self.name); formatter.field("errors", &self.errors); formatter.finish() } } /// See [`BatchDeleteRecipeVersionOutput`](crate::output::BatchDeleteRecipeVersionOutput) pub mod batch_delete_recipe_version_output { /// A builder for [`BatchDeleteRecipeVersionOutput`](crate::output::BatchDeleteRecipeVersionOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) name: std::option::Option<std::string::String>, pub(crate) errors: std::option::Option<std::vec::Vec<crate::model::RecipeVersionErrorDetail>>, } impl Builder { /// <p>The name of the recipe that was modified.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } pub fn errors(mut self, input: impl Into<crate::model::RecipeVersionErrorDetail>) -> Self { let mut v = self.errors.unwrap_or_default(); v.push(input.into()); self.errors = Some(v); self } pub fn set_errors( mut self, input: std::option::Option<std::vec::Vec<crate::model::RecipeVersionErrorDetail>>, ) -> Self { self.errors = input; self } /// Consumes the builder and constructs a [`BatchDeleteRecipeVersionOutput`](crate::output::BatchDeleteRecipeVersionOutput) pub fn build(self) -> crate::output::BatchDeleteRecipeVersionOutput { crate::output::BatchDeleteRecipeVersionOutput { name: self.name, errors: self.errors, } } } } impl BatchDeleteRecipeVersionOutput { /// Creates a new builder-style object to manufacture [`BatchDeleteRecipeVersionOutput`](crate::output::BatchDeleteRecipeVersionOutput) pub fn builder() -> crate::output::batch_delete_recipe_version_output::Builder { crate::output::batch_delete_recipe_version_output::Builder::default() } }
43.629042
139
0.61047
29776968834ba2f49c9d7dd84fbe713eed889945
480
/// WormSettingsExtended : Specifies global SmartLock (WORM) settings. #[allow(unused_imports)] use serde_json::Value; #[derive(Debug, Serialize, Deserialize)] pub struct WormSettingsExtended { /// To set the compliance clock to the current system time, PUT to this resource with an empty JSON object {} for the cdate value. This cluster must be in compliance mode to set the compliance clock. #[serde(rename = "cdate")] pub cdate: Option <crate::models::Empty>, }
40
204
0.7375
0935ea96762367923a53bbf55d9bd78127b9747a
15,298
//! See [`CargoWorkspace`]. use std::iter; use std::path::PathBuf; use std::{convert::TryInto, ops, process::Command, sync::Arc}; use anyhow::{Context, Result}; use base_db::Edition; use cargo_metadata::{CargoOpt, MetadataCommand}; use la_arena::{Arena, Idx}; use paths::{AbsPath, AbsPathBuf}; use rustc_hash::FxHashMap; use serde::Deserialize; use serde_json::from_value; use crate::CfgOverrides; use crate::{build_data::BuildDataConfig, utf8_stdout}; /// [`CargoWorkspace`] represents the logical structure of, well, a Cargo /// workspace. It pretty closely mirrors `cargo metadata` output. /// /// Note that internally, rust analyzer uses a different structure: /// `CrateGraph`. `CrateGraph` is lower-level: it knows only about the crates, /// while this knows about `Packages` & `Targets`: purely cargo-related /// concepts. /// /// We use absolute paths here, `cargo metadata` guarantees to always produce /// abs paths. #[derive(Debug, Clone, Eq, PartialEq)] pub struct CargoWorkspace { packages: Arena<PackageData>, targets: Arena<TargetData>, workspace_root: AbsPathBuf, build_data_config: BuildDataConfig, } impl ops::Index<Package> for CargoWorkspace { type Output = PackageData; fn index(&self, index: Package) -> &PackageData { &self.packages[index] } } impl ops::Index<Target> for CargoWorkspace { type Output = TargetData; fn index(&self, index: Target) -> &TargetData { &self.targets[index] } } /// Describes how to set the rustc source directory. #[derive(Clone, Debug, PartialEq, Eq)] pub enum RustcSource { /// Explicit path for the rustc source directory. Path(AbsPathBuf), /// Try to automatically detect where the rustc source directory is. Discover, } #[derive(Default, Clone, Debug, PartialEq, Eq)] pub struct CargoConfig { /// Do not activate the `default` feature. pub no_default_features: bool, /// Activate all available features pub all_features: bool, /// List of features to activate. /// This will be ignored if `cargo_all_features` is true. pub features: Vec<String>, /// rustc target pub target: Option<String>, /// Don't load sysroot crates (`std`, `core` & friends). Might be useful /// when debugging isolated issues. pub no_sysroot: bool, /// rustc private crate source pub rustc_source: Option<RustcSource>, /// crates to disable `#[cfg(test)]` on pub unset_test_crates: Vec<String>, } impl CargoConfig { pub fn cfg_overrides(&self) -> CfgOverrides { self.unset_test_crates .iter() .cloned() .zip(iter::repeat_with(|| { cfg::CfgDiff::new(Vec::new(), vec![cfg::CfgAtom::Flag("test".into())]).unwrap() })) .collect() } } pub type Package = Idx<PackageData>; pub type Target = Idx<TargetData>; /// Information associated with a cargo crate #[derive(Debug, Clone, Eq, PartialEq)] pub struct PackageData { /// Version given in the `Cargo.toml` pub version: String, /// Name as given in the `Cargo.toml` pub name: String, /// Path containing the `Cargo.toml` pub manifest: AbsPathBuf, /// Targets provided by the crate (lib, bin, example, test, ...) pub targets: Vec<Target>, /// Is this package a member of the current workspace pub is_member: bool, /// List of packages this package depends on pub dependencies: Vec<PackageDependency>, /// Rust edition for this package pub edition: Edition, /// Features provided by the crate, mapped to the features required by that feature. pub features: FxHashMap<String, Vec<String>>, /// List of features enabled on this package pub active_features: Vec<String>, // String representation of package id pub id: String, // The contents of [package.metadata.rust-analyzer] pub metadata: RustAnalyzerPackageMetaData, } #[derive(Deserialize, Default, Debug, Clone, Eq, PartialEq)] pub struct RustAnalyzerPackageMetaData { pub rustc_private: bool, } #[derive(Debug, Clone, Eq, PartialEq)] pub struct PackageDependency { pub pkg: Package, pub name: String, pub kind: DepKind, } #[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord)] pub enum DepKind { /// Available to the library, binary, and dev targets in the package (but not the build script). Normal, /// Available only to test and bench targets (and the library target, when built with `cfg(test)`). Dev, /// Available only to the build script target. Build, } impl DepKind { fn iter(list: &[cargo_metadata::DepKindInfo]) -> impl Iterator<Item = Self> + '_ { let mut dep_kinds = Vec::new(); if list.is_empty() { dep_kinds.push(Self::Normal); } for info in list { let kind = match info.kind { cargo_metadata::DependencyKind::Normal => Self::Normal, cargo_metadata::DependencyKind::Development => Self::Dev, cargo_metadata::DependencyKind::Build => Self::Build, cargo_metadata::DependencyKind::Unknown => continue, }; dep_kinds.push(kind); } dep_kinds.sort_unstable(); dep_kinds.dedup(); dep_kinds.into_iter() } } /// Information associated with a package's target #[derive(Debug, Clone, Eq, PartialEq)] pub struct TargetData { /// Package that provided this target pub package: Package, /// Name as given in the `Cargo.toml` or generated from the file name pub name: String, /// Path to the main source file of the target pub root: AbsPathBuf, /// Kind of target pub kind: TargetKind, /// Is this target a proc-macro pub is_proc_macro: bool, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum TargetKind { Bin, /// Any kind of Cargo lib crate-type (dylib, rlib, proc-macro, ...). Lib, Example, Test, Bench, BuildScript, Other, } impl TargetKind { fn new(kinds: &[String]) -> TargetKind { for kind in kinds { return match kind.as_str() { "bin" => TargetKind::Bin, "test" => TargetKind::Test, "bench" => TargetKind::Bench, "example" => TargetKind::Example, "custom-build" => TargetKind::BuildScript, "proc-macro" => TargetKind::Lib, _ if kind.contains("lib") => TargetKind::Lib, _ => continue, }; } TargetKind::Other } } impl PackageData { pub fn root(&self) -> &AbsPath { self.manifest.parent().unwrap() } } #[derive(Deserialize, Default)] // Deserialise helper for the cargo metadata struct PackageMetadata { #[serde(rename = "rust-analyzer")] rust_analyzer: Option<RustAnalyzerPackageMetaData>, } impl CargoWorkspace { pub fn from_cargo_metadata( cargo_toml: &AbsPath, config: &CargoConfig, progress: &dyn Fn(String), ) -> Result<CargoWorkspace> { let mut meta = MetadataCommand::new(); meta.cargo_path(toolchain::cargo()); meta.manifest_path(cargo_toml.to_path_buf()); if config.all_features { meta.features(CargoOpt::AllFeatures); } else { if config.no_default_features { // FIXME: `NoDefaultFeatures` is mutual exclusive with `SomeFeatures` // https://github.com/oli-obk/cargo_metadata/issues/79 meta.features(CargoOpt::NoDefaultFeatures); } if !config.features.is_empty() { meta.features(CargoOpt::SomeFeatures(config.features.clone())); } } if let Some(parent) = cargo_toml.parent() { meta.current_dir(parent.to_path_buf()); } let target = if let Some(target) = &config.target { Some(target.clone()) } else if let stdout @ Some(_) = cargo_config_build_target(cargo_toml) { stdout } else { rustc_discover_host_triple(cargo_toml) }; if let Some(target) = target { meta.other_options(vec![String::from("--filter-platform"), target]); } // FIXME: Currently MetadataCommand is not based on parse_stream, // So we just report it as a whole progress("metadata".to_string()); let mut meta = meta.exec().with_context(|| { let cwd: Option<AbsPathBuf> = std::env::current_dir().ok().and_then(|p| p.try_into().ok()); let workdir = cargo_toml .parent() .map(|p| p.to_path_buf()) .or(cwd) .map(|dir| dir.to_string_lossy().to_string()) .unwrap_or_else(|| "<failed to get path>".into()); format!( "Failed to run `cargo metadata --manifest-path {}` in `{}`", cargo_toml.display(), workdir ) })?; let mut pkg_by_id = FxHashMap::default(); let mut packages = Arena::default(); let mut targets = Arena::default(); let ws_members = &meta.workspace_members; meta.packages.sort_by(|a, b| a.id.cmp(&b.id)); for meta_pkg in &meta.packages { let cargo_metadata::Package { id, edition, name, manifest_path, version, metadata, .. } = meta_pkg; let meta = from_value::<PackageMetadata>(metadata.clone()).unwrap_or_default(); let is_member = ws_members.contains(id); let edition = edition .parse::<Edition>() .with_context(|| format!("Failed to parse edition {}", edition))?; let pkg = packages.alloc(PackageData { id: id.repr.clone(), name: name.clone(), version: version.to_string(), manifest: AbsPathBuf::assert(PathBuf::from(&manifest_path)), targets: Vec::new(), is_member, edition, dependencies: Vec::new(), features: meta_pkg.features.clone().into_iter().collect(), active_features: Vec::new(), metadata: meta.rust_analyzer.unwrap_or_default(), }); let pkg_data = &mut packages[pkg]; pkg_by_id.insert(id, pkg); for meta_tgt in &meta_pkg.targets { let is_proc_macro = meta_tgt.kind.as_slice() == ["proc-macro"]; let tgt = targets.alloc(TargetData { package: pkg, name: meta_tgt.name.clone(), root: AbsPathBuf::assert(PathBuf::from(&meta_tgt.src_path)), kind: TargetKind::new(meta_tgt.kind.as_slice()), is_proc_macro, }); pkg_data.targets.push(tgt); } } let resolve = meta.resolve.expect("metadata executed with deps"); for mut node in resolve.nodes { let source = match pkg_by_id.get(&node.id) { Some(&src) => src, // FIXME: replace this and a similar branch below with `.unwrap`, once // https://github.com/rust-lang/cargo/issues/7841 // is fixed and hits stable (around 1.43-is probably?). None => { log::error!("Node id do not match in cargo metadata, ignoring {}", node.id); continue; } }; node.deps.sort_by(|a, b| a.pkg.cmp(&b.pkg)); for (dep_node, kind) in node .deps .iter() .flat_map(|dep| DepKind::iter(&dep.dep_kinds).map(move |kind| (dep, kind))) { let pkg = match pkg_by_id.get(&dep_node.pkg) { Some(&pkg) => pkg, None => { log::error!( "Dep node id do not match in cargo metadata, ignoring {}", dep_node.pkg ); continue; } }; let dep = PackageDependency { name: dep_node.name.clone(), pkg, kind }; packages[source].dependencies.push(dep); } packages[source].active_features.extend(node.features); } let workspace_root = AbsPathBuf::assert(PathBuf::from(meta.workspace_root.into_os_string())); let build_data_config = BuildDataConfig::new(cargo_toml.to_path_buf(), config.clone(), Arc::new(meta.packages)); Ok(CargoWorkspace { packages, targets, workspace_root, build_data_config }) } pub fn packages<'a>(&'a self) -> impl Iterator<Item = Package> + ExactSizeIterator + 'a { self.packages.iter().map(|(id, _pkg)| id) } pub fn target_by_root(&self, root: &AbsPath) -> Option<Target> { self.packages() .filter_map(|pkg| self[pkg].targets.iter().find(|&&it| &self[it].root == root)) .next() .copied() } pub fn workspace_root(&self) -> &AbsPath { &self.workspace_root } pub fn package_flag(&self, package: &PackageData) -> String { if self.is_unique(&*package.name) { package.name.clone() } else { format!("{}:{}", package.name, package.version) } } pub(crate) fn build_data_config(&self) -> &BuildDataConfig { &self.build_data_config } fn is_unique(&self, name: &str) -> bool { self.packages.iter().filter(|(_, v)| v.name == name).count() == 1 } } fn rustc_discover_host_triple(cargo_toml: &AbsPath) -> Option<String> { let mut rustc = Command::new(toolchain::rustc()); rustc.current_dir(cargo_toml.parent().unwrap()).arg("-vV"); log::debug!("Discovering host platform by {:?}", rustc); match utf8_stdout(rustc) { Ok(stdout) => { let field = "host: "; let target = stdout.lines().find_map(|l| l.strip_prefix(field)); if let Some(target) = target { Some(target.to_string()) } else { // If we fail to resolve the host platform, it's not the end of the world. log::info!("rustc -vV did not report host platform, got:\n{}", stdout); None } } Err(e) => { log::warn!("Failed to discover host platform: {}", e); None } } } fn cargo_config_build_target(cargo_toml: &AbsPath) -> Option<String> { let mut cargo_config = Command::new(toolchain::cargo()); cargo_config .current_dir(cargo_toml.parent().unwrap()) .args(&["-Z", "unstable-options", "config", "get", "build.target"]) .env("RUSTC_BOOTSTRAP", "1"); // if successful we receive `build.target = "target-triple"` log::debug!("Discovering cargo config target by {:?}", cargo_config); match utf8_stdout(cargo_config) { Ok(stdout) => stdout .strip_prefix("build.target = \"") .and_then(|stdout| stdout.strip_suffix('"')) .map(ToOwned::to_owned), Err(_) => None, } }
34.532731
103
0.577853
01225698f188956c7da0992a87ad9ef8092e3af5
893
use mmtk::util::Address; use mmtk::util::opaque_pointer::*; use mmtk::util::memory; use crate::DummyVM; #[test] pub fn test_handle_mmap_oom() { let panic_res = std::panic::catch_unwind(move || { let start = unsafe { Address::from_usize(0x100_0000 )}; let one_terabyte = 1000000000000; // mmap 1 terabyte memory - we expect this will fail due to out of memory. // If that's not the case, increase the size we mmap. let mmap_res = memory::dzmmap_noreplace(start, one_terabyte); memory::handle_mmap_error::<DummyVM>(mmap_res.err().unwrap(), VMThread::UNINITIALIZED); }); assert!(panic_res.is_err()); // The error should match the default implementation of Collection::out_of_memory() let err = panic_res.err().unwrap(); assert!(err.is::<&str>()); assert_eq!(err.downcast_ref::<&str>().unwrap(), &"Out of memory!"); }
37.208333
95
0.661814
bf6756cc82deed7392edcc35578a1def3e93f412
11,483
use indexmap::map::IndexMap; use nu_protocol::ast::Call; use nu_protocol::engine::{Command, EngineState, Stack}; use nu_protocol::{ Category, Config, Example, IntoPipelineData, PipelineData, ShellError, Signature, Span, Spanned, Value, }; #[derive(Clone)] pub struct FromXml; impl Command for FromXml { fn name(&self) -> &str { "from xml" } fn signature(&self) -> Signature { Signature::build("from xml").category(Category::Formats) } fn usage(&self) -> &str { "Parse text as .xml and create table." } fn run( &self, _engine_state: &EngineState, stack: &mut Stack, call: &Call, input: PipelineData, ) -> Result<nu_protocol::PipelineData, ShellError> { let head = call.head; let config = stack.get_config().unwrap_or_default(); from_xml(input, head, &config) } fn examples(&self) -> Vec<Example> { vec![Example { example: r#"'<?xml version="1.0" encoding="UTF-8"?> <note> <remember>Event</remember> </note>' | from xml"#, description: "Converts xml formatted string to table", result: Some(Value::Record { cols: vec!["note".to_string()], vals: vec![Value::Record { cols: vec!["children".to_string(), "attributes".to_string()], vals: vec![ Value::List { vals: vec![Value::Record { cols: vec!["remember".to_string()], vals: vec![Value::Record { cols: vec!["children".to_string(), "attributes".to_string()], vals: vec![ Value::List { vals: vec![Value::String { val: "Event".to_string(), span: Span::test_data(), }], span: Span::test_data(), }, Value::Record { cols: vec![], vals: vec![], span: Span::test_data(), }, ], span: Span::test_data(), }], span: Span::test_data(), }], span: Span::test_data(), }, Value::Record { cols: vec![], vals: vec![], span: Span::test_data(), }, ], span: Span::test_data(), }], span: Span::test_data(), }), }] } } fn from_attributes_to_value(attributes: &[roxmltree::Attribute], span: Span) -> Value { let mut collected = IndexMap::new(); for a in attributes { collected.insert(String::from(a.name()), Value::string(a.value(), span)); } let (cols, vals) = collected .into_iter() .fold((vec![], vec![]), |mut acc, (k, v)| { acc.0.push(k); acc.1.push(v); acc }); Value::Record { cols, vals, span } } fn from_node_to_value(n: &roxmltree::Node, span: Span) -> Value { if n.is_element() { let name = n.tag_name().name().trim().to_string(); let mut children_values = vec![]; for c in n.children() { children_values.push(from_node_to_value(&c, span)); } let children_values: Vec<Value> = children_values .into_iter() .filter(|x| match x { Value::String { val: f, .. } => { !f.trim().is_empty() // non-whitespace characters? } _ => true, }) .collect(); let mut collected = IndexMap::new(); let attribute_value: Value = from_attributes_to_value(n.attributes(), span); let mut row = IndexMap::new(); row.insert( String::from("children"), Value::List { vals: children_values, span, }, ); row.insert(String::from("attributes"), attribute_value); collected.insert(name, Value::from(Spanned { item: row, span })); Value::from(Spanned { item: collected, span, }) } else if n.is_comment() { Value::String { val: "<comment>".to_string(), span, } } else if n.is_pi() { Value::String { val: "<processing_instruction>".to_string(), span, } } else if n.is_text() { match n.text() { Some(text) => Value::String { val: text.to_string(), span, }, None => Value::String { val: "<error>".to_string(), span, }, } } else { Value::String { val: "<unknown>".to_string(), span, } } } fn from_document_to_value(d: &roxmltree::Document, span: Span) -> Value { from_node_to_value(&d.root_element(), span) } pub fn from_xml_string_to_value(s: String, span: Span) -> Result<Value, roxmltree::Error> { let parsed = roxmltree::Document::parse(&s)?; Ok(from_document_to_value(&parsed, span)) } fn from_xml(input: PipelineData, head: Span, config: &Config) -> Result<PipelineData, ShellError> { let concat_string = input.collect_string("", config)?; match from_xml_string_to_value(concat_string, head) { Ok(x) => Ok(x.into_pipeline_data()), _ => Err(ShellError::UnsupportedInput( "Could not parse string as xml".to_string(), head, )), } } #[cfg(test)] mod tests { use super::*; use indexmap::indexmap; use indexmap::IndexMap; use nu_protocol::{Spanned, Value}; fn string(input: impl Into<String>) -> Value { Value::String { val: input.into(), span: Span::test_data(), } } fn row(entries: IndexMap<String, Value>) -> Value { Value::from(Spanned { item: entries, span: Span::test_data(), }) } fn table(list: &[Value]) -> Value { Value::List { vals: list.to_vec(), span: Span::test_data(), } } fn parse(xml: &str) -> Result<Value, roxmltree::Error> { from_xml_string_to_value(xml.to_string(), Span::test_data()) } #[test] fn parses_empty_element() -> Result<(), roxmltree::Error> { let source = "<nu></nu>"; assert_eq!( parse(source)?, row(indexmap! { "nu".into() => row(indexmap! { "children".into() => table(&[]), "attributes".into() => row(indexmap! {}) }) }) ); Ok(()) } #[test] fn parses_element_with_text() -> Result<(), roxmltree::Error> { let source = "<nu>La era de los tres caballeros</nu>"; assert_eq!( parse(source)?, row(indexmap! { "nu".into() => row(indexmap! { "children".into() => table(&[string("La era de los tres caballeros")]), "attributes".into() => row(indexmap! {}) }) }) ); Ok(()) } #[test] fn parses_element_with_elements() -> Result<(), roxmltree::Error> { let source = "\ <nu> <dev>Andrés</dev> <dev>Jonathan</dev> <dev>Yehuda</dev> </nu>"; assert_eq!( parse(source)?, row(indexmap! { "nu".into() => row(indexmap! { "children".into() => table(&[ row(indexmap! { "dev".into() => row(indexmap! { "children".into() => table(&[string("Andrés")]), "attributes".into() => row(indexmap! {}) }) }), row(indexmap! { "dev".into() => row(indexmap! { "children".into() => table(&[string("Jonathan")]), "attributes".into() => row(indexmap! {}) }) }), row(indexmap! { "dev".into() => row(indexmap! { "children".into() => table(&[string("Yehuda")]), "attributes".into() => row(indexmap! {}) }) }) ]), "attributes".into() => row(indexmap! {}) }) }) ); Ok(()) } #[test] fn parses_element_with_attribute() -> Result<(), roxmltree::Error> { let source = "\ <nu version=\"2.0\"> </nu>"; assert_eq!( parse(source)?, row(indexmap! { "nu".into() => row(indexmap! { "children".into() => table(&[]), "attributes".into() => row(indexmap! { "version".into() => string("2.0") }) }) }) ); Ok(()) } #[test] fn parses_element_with_attribute_and_element() -> Result<(), roxmltree::Error> { let source = "\ <nu version=\"2.0\"> <version>2.0</version> </nu>"; assert_eq!( parse(source)?, row(indexmap! { "nu".into() => row(indexmap! { "children".into() => table(&[ row(indexmap! { "version".into() => row(indexmap! { "children".into() => table(&[string("2.0")]), "attributes".into() => row(indexmap! {}) }) }) ]), "attributes".into() => row(indexmap! { "version".into() => string("2.0") }) }) }) ); Ok(()) } #[test] fn parses_element_with_multiple_attributes() -> Result<(), roxmltree::Error> { let source = "\ <nu version=\"2.0\" age=\"25\"> </nu>"; assert_eq!( parse(source)?, row(indexmap! { "nu".into() => row(indexmap! { "children".into() => table(&[]), "attributes".into() => row(indexmap! { "version".into() => string("2.0"), "age".into() => string("25") }) }) }) ); Ok(()) } #[test] fn test_examples() { use crate::test_examples; test_examples(FromXml {}) } }
30.218421
99
0.409301
75430a008a126c7e86b23b885c9a53e9be080a55
6,151
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. //! IEEE Std 802.11-2016 WEP descriptors and credentials. //! //! **WEP is insecure and support will be removed. This module is provided for legacy support only //! and its use should be avoided.** //! //! WEP has no dedicated descriptor data. Only WEP-40 and WEP-104 are supported and key size is not //! negotiated with remote stations. // TODO(fxbug.dev/96000): Name items in this module in a way that makes it clear that they // implement an insecure security protocol. use fidl_fuchsia_wlan_common_security as fidl_security; use hex; use std::convert::{TryFrom, TryInto}; use thiserror::Error; use crate::security::{BareCredentials, SecurityError}; pub const WEP40_KEY_BYTES: usize = 5; pub const WEP104_KEY_BYTES: usize = 13; #[derive(Clone, Copy, Debug, Error, Eq, PartialEq)] #[non_exhaustive] pub enum WepError { #[error("invalid WEP key size: {0} bytes")] Size(usize), #[error("invalid WEP key encoding")] Encoding, } /// WEP key. /// /// IEEE Std 802.11-2016 describes two WEP key sizes: WEP-40 and WEP-104. `Key` provides variants /// for these standard key sizes represented as unencoded bytes. #[derive(Clone, Debug, Eq, PartialEq)] pub enum WepKey { /// WEP-40 key. This is a partial key and is joined with an IV to form a 64-bit key. Wep40([u8; WEP40_KEY_BYTES]), /// WEP-104 key. This is a partial key and is joined with an IV to form a 128-bit key. Wep104([u8; WEP104_KEY_BYTES]), } impl WepKey { /// Parses a WEP key from a byte sequence. /// /// This function parses both unencoded keys and ASCII hexadecimal encoded keys. IEEE Std /// 802.11-2016 does not specify an encoding for non-hexadecimal keys, so the raw bytes are /// accepted as is, though these keys are typically ASCII or UTF-8 encoded text in practice. /// ASCII hexadecimal encoded keys are decoded into raw bytes. /// /// Note that `Key` does not provide a mechanism to restore the original byte sequence parsed /// by this function, so the exact encoding of ASCII hexadecimal encoded keys may be lost. /// /// # Errors /// /// Returns an error if the size or encoding of the byte sequence is incompatible. pub fn parse(bytes: impl AsRef<[u8]>) -> Result<Self, WepError> { const WEP40_HEX_ENCODING_BYTES: usize = WEP40_KEY_BYTES * 2; const WEP104_HEX_ENCODING_BYTES: usize = WEP104_KEY_BYTES * 2; let bytes = bytes.as_ref(); match bytes.len() { WEP40_HEX_ENCODING_BYTES | WEP104_HEX_ENCODING_BYTES => { let bytes = hex::decode(bytes).map_err(|_| WepError::Encoding)?; Ok(match bytes.len() { WEP40_KEY_BYTES => WepKey::Wep40(bytes.try_into().unwrap()), WEP104_KEY_BYTES => WepKey::Wep104(bytes.try_into().unwrap()), _ => unreachable!(), }) } _ => WepKey::try_from(bytes), } } } impl AsRef<[u8]> for WepKey { fn as_ref(&self) -> &[u8] { match self { WepKey::Wep40(ref bytes) => bytes, WepKey::Wep104(ref bytes) => bytes, } } } impl From<[u8; WEP40_KEY_BYTES]> for WepKey { fn from(bytes: [u8; WEP40_KEY_BYTES]) -> Self { WepKey::Wep40(bytes) } } impl From<[u8; WEP104_KEY_BYTES]> for WepKey { fn from(bytes: [u8; WEP104_KEY_BYTES]) -> Self { WepKey::Wep104(bytes) } } impl From<WepKey> for Vec<u8> { fn from(key: WepKey) -> Self { match key { WepKey::Wep40(bytes) => bytes.into(), WepKey::Wep104(bytes) => bytes.into(), } } } /// Converts unencoded bytes into a WEP key. /// /// This conversion is not a parse and does **not** accept ASCII hexadecimal encoded keys; the /// bytes are copied as is. Use `Key::parse` for hexadecimal keys. impl TryFrom<&[u8]> for WepKey { type Error = WepError; fn try_from(bytes: &[u8]) -> Result<Self, Self::Error> { let n = bytes.len(); match n { WEP40_KEY_BYTES => Ok(WepKey::Wep40(bytes.try_into().unwrap())), WEP104_KEY_BYTES => Ok(WepKey::Wep104(bytes.try_into().unwrap())), _ => Err(WepError::Size(n)), } } } /// Conversion from a WEP key into bare credentials. impl From<WepKey> for BareCredentials { fn from(key: WepKey) -> Self { BareCredentials::WepKey(key) } } /// WEP authenticator. #[derive(Clone, Debug, Eq, PartialEq)] pub struct WepAuthenticator { /// WEP key used to authenticate. pub key: WepKey, } impl From<WepAuthenticator> for fidl_security::Protocol { fn from(_: WepAuthenticator) -> Self { fidl_security::Protocol::Wep } } impl From<WepAuthenticator> for fidl_security::WepCredentials { fn from(authenticator: WepAuthenticator) -> Self { let key = authenticator.key.into(); fidl_security::WepCredentials { key } } } impl TryFrom<fidl_security::WepCredentials> for WepAuthenticator { type Error = SecurityError; fn try_from(credentials: fidl_security::WepCredentials) -> Result<Self, Self::Error> { let key = credentials.key.as_slice().try_into()?; Ok(WepAuthenticator { key }) } } #[cfg(test)] mod tests { use test_case::test_case; use crate::security::wep::{WepError, WepKey, WEP104_KEY_BYTES}; #[test_case("wep40" => Ok(WepKey::Wep40([b'w', b'e', b'p', b'4', b'0'])))] #[test_case("abcdef0000" => Ok(WepKey::Wep40([0xAB, 0xCD, 0xEF, 0, 0])))] #[test_case("authenticates" => Ok(WepKey::Wep104([ b'a', b'u', b't', b'h', b'e', b'n', b't', b'i', b'c', b'a', b't', b'e', b's', ])))] #[test_case("ffffffffffffffffffffffffff" => Ok(WepKey::Wep104([0xFF; WEP104_KEY_BYTES])))] #[test_case("abcdef" => Err(WepError::Size(6)))] #[test_case("abcdefZZZZ" => Err(WepError::Encoding))] fn parse_wep_key(bytes: impl AsRef<[u8]>) -> Result<WepKey, WepError> { WepKey::parse(bytes) } }
33.983425
99
0.633881
870aa57b5645628d6f14dafb3f7de30cf7b1f02a
740
// iterators4.rs pub fn factorial(num: u64) -> u64 { // Complete this function to return the factorial of num // Do not use: // - return // Try not to use: // - imperative style loops (for, while) // - additional variables // For an extra challenge, don't use: // - recursion // Execute `rustlings hint iterators4` for hints. // (2..=num).fold(1, |a, b| a * b) (1..=num).reduce(|a, b| a * b).unwrap() } #[cfg(test)] mod tests { use super::*; #[test] fn factorial_of_1() { assert_eq!(1, factorial(1)); } #[test] fn factorial_of_2() { assert_eq!(2, factorial(2)); } #[test] fn factorial_of_4() { assert_eq!(24, factorial(4)); } }
21.142857
60
0.543243
03d2105e5cca52a1253120bd5e4fa82884a81ca9
65,349
use crate::base::*; use crate::config::StripUnconfigured; use crate::configure; use crate::hygiene::SyntaxContext; use crate::mbe::macro_rules::annotate_err_with_kind; use crate::module::{mod_dir_path, parse_external_mod, DirOwnership, ParsedExternalMod}; use crate::placeholders::{placeholder, PlaceholderExpander}; use rustc_ast as ast; use rustc_ast::mut_visit::*; use rustc_ast::ptr::P; use rustc_ast::token; use rustc_ast::tokenstream::TokenStream; use rustc_ast::visit::{self, AssocCtxt, Visitor}; use rustc_ast::{AstLike, Block, Inline, ItemKind, Local, MacArgs, MacCall}; use rustc_ast::{MacCallStmt, MacStmtStyle, MetaItemKind, ModKind, NestedMetaItem}; use rustc_ast::{NodeId, PatKind, Path, StmtKind, Unsafe}; use rustc_ast_pretty::pprust; use rustc_attr::is_builtin_attr; use rustc_data_structures::map_in_place::MapInPlace; use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_data_structures::sync::Lrc; use rustc_errors::{Applicability, FatalError, PResult}; use rustc_feature::Features; use rustc_parse::parser::{ AttemptLocalParseRecovery, ForceCollect, Parser, RecoverColon, RecoverComma, }; use rustc_parse::validate_attr; use rustc_session::lint::builtin::{UNUSED_ATTRIBUTES, UNUSED_DOC_COMMENTS}; use rustc_session::lint::BuiltinLintDiagnostics; use rustc_session::parse::{feature_err, ParseSess}; use rustc_session::Limit; use rustc_span::symbol::{sym, Ident}; use rustc_span::{FileName, LocalExpnId, Span}; use smallvec::{smallvec, SmallVec}; use std::ops::DerefMut; use std::path::PathBuf; use std::rc::Rc; use std::{iter, mem}; macro_rules! ast_fragments { ( $($Kind:ident($AstTy:ty) { $kind_name:expr; $(one fn $mut_visit_ast:ident; fn $visit_ast:ident;)? $(many fn $flat_map_ast_elt:ident; fn $visit_ast_elt:ident($($args:tt)*);)? fn $make_ast:ident; })* ) => { /// A fragment of AST that can be produced by a single macro expansion. /// Can also serve as an input and intermediate result for macro expansion operations. pub enum AstFragment { OptExpr(Option<P<ast::Expr>>), $($Kind($AstTy),)* } /// "Discriminant" of an AST fragment. #[derive(Copy, Clone, PartialEq, Eq)] pub enum AstFragmentKind { OptExpr, $($Kind,)* } impl AstFragmentKind { pub fn name(self) -> &'static str { match self { AstFragmentKind::OptExpr => "expression", $(AstFragmentKind::$Kind => $kind_name,)* } } fn make_from<'a>(self, result: Box<dyn MacResult + 'a>) -> Option<AstFragment> { match self { AstFragmentKind::OptExpr => result.make_expr().map(Some).map(AstFragment::OptExpr), $(AstFragmentKind::$Kind => result.$make_ast().map(AstFragment::$Kind),)* } } } impl AstFragment { pub fn add_placeholders(&mut self, placeholders: &[NodeId]) { if placeholders.is_empty() { return; } match self { $($(AstFragment::$Kind(ast) => ast.extend(placeholders.iter().flat_map(|id| { // We are repeating through arguments with `many`, to do that we have to // mention some macro variable from those arguments even if it's not used. macro _repeating($flat_map_ast_elt) {} placeholder(AstFragmentKind::$Kind, *id, None).$make_ast() })),)?)* _ => panic!("unexpected AST fragment kind") } } pub fn make_opt_expr(self) -> Option<P<ast::Expr>> { match self { AstFragment::OptExpr(expr) => expr, _ => panic!("AstFragment::make_* called on the wrong kind of fragment"), } } $(pub fn $make_ast(self) -> $AstTy { match self { AstFragment::$Kind(ast) => ast, _ => panic!("AstFragment::make_* called on the wrong kind of fragment"), } })* pub fn mut_visit_with<F: MutVisitor>(&mut self, vis: &mut F) { match self { AstFragment::OptExpr(opt_expr) => { visit_clobber(opt_expr, |opt_expr| { if let Some(expr) = opt_expr { vis.filter_map_expr(expr) } else { None } }); } $($(AstFragment::$Kind(ast) => vis.$mut_visit_ast(ast),)?)* $($(AstFragment::$Kind(ast) => ast.flat_map_in_place(|ast| vis.$flat_map_ast_elt(ast)),)?)* } } pub fn visit_with<'a, V: Visitor<'a>>(&'a self, visitor: &mut V) { match *self { AstFragment::OptExpr(Some(ref expr)) => visitor.visit_expr(expr), AstFragment::OptExpr(None) => {} $($(AstFragment::$Kind(ref ast) => visitor.$visit_ast(ast),)?)* $($(AstFragment::$Kind(ref ast) => for ast_elt in &ast[..] { visitor.$visit_ast_elt(ast_elt, $($args)*); })?)* } } } impl<'a> MacResult for crate::mbe::macro_rules::ParserAnyMacro<'a> { $(fn $make_ast(self: Box<crate::mbe::macro_rules::ParserAnyMacro<'a>>) -> Option<$AstTy> { Some(self.make(AstFragmentKind::$Kind).$make_ast()) })* } } } ast_fragments! { Expr(P<ast::Expr>) { "expression"; one fn visit_expr; fn visit_expr; fn make_expr; } Pat(P<ast::Pat>) { "pattern"; one fn visit_pat; fn visit_pat; fn make_pat; } Ty(P<ast::Ty>) { "type"; one fn visit_ty; fn visit_ty; fn make_ty; } Stmts(SmallVec<[ast::Stmt; 1]>) { "statement"; many fn flat_map_stmt; fn visit_stmt(); fn make_stmts; } Items(SmallVec<[P<ast::Item>; 1]>) { "item"; many fn flat_map_item; fn visit_item(); fn make_items; } TraitItems(SmallVec<[P<ast::AssocItem>; 1]>) { "trait item"; many fn flat_map_trait_item; fn visit_assoc_item(AssocCtxt::Trait); fn make_trait_items; } ImplItems(SmallVec<[P<ast::AssocItem>; 1]>) { "impl item"; many fn flat_map_impl_item; fn visit_assoc_item(AssocCtxt::Impl); fn make_impl_items; } ForeignItems(SmallVec<[P<ast::ForeignItem>; 1]>) { "foreign item"; many fn flat_map_foreign_item; fn visit_foreign_item(); fn make_foreign_items; } Arms(SmallVec<[ast::Arm; 1]>) { "match arm"; many fn flat_map_arm; fn visit_arm(); fn make_arms; } Fields(SmallVec<[ast::ExprField; 1]>) { "field expression"; many fn flat_map_expr_field; fn visit_expr_field(); fn make_expr_fields; } FieldPats(SmallVec<[ast::PatField; 1]>) { "field pattern"; many fn flat_map_pat_field; fn visit_pat_field(); fn make_pat_fields; } GenericParams(SmallVec<[ast::GenericParam; 1]>) { "generic parameter"; many fn flat_map_generic_param; fn visit_generic_param(); fn make_generic_params; } Params(SmallVec<[ast::Param; 1]>) { "function parameter"; many fn flat_map_param; fn visit_param(); fn make_params; } StructFields(SmallVec<[ast::FieldDef; 1]>) { "field"; many fn flat_map_field_def; fn visit_field_def(); fn make_field_defs; } Variants(SmallVec<[ast::Variant; 1]>) { "variant"; many fn flat_map_variant; fn visit_variant(); fn make_variants; } } pub enum SupportsMacroExpansion { No, Yes { supports_inner_attrs: bool }, } impl AstFragmentKind { crate fn dummy(self, span: Span) -> AstFragment { self.make_from(DummyResult::any(span)).expect("couldn't create a dummy AST fragment") } pub fn supports_macro_expansion(self) -> SupportsMacroExpansion { match self { AstFragmentKind::OptExpr | AstFragmentKind::Expr | AstFragmentKind::Stmts | AstFragmentKind::Ty | AstFragmentKind::Pat => SupportsMacroExpansion::Yes { supports_inner_attrs: false }, AstFragmentKind::Items | AstFragmentKind::TraitItems | AstFragmentKind::ImplItems | AstFragmentKind::ForeignItems => { SupportsMacroExpansion::Yes { supports_inner_attrs: true } } AstFragmentKind::Arms | AstFragmentKind::Fields | AstFragmentKind::FieldPats | AstFragmentKind::GenericParams | AstFragmentKind::Params | AstFragmentKind::StructFields | AstFragmentKind::Variants => SupportsMacroExpansion::No, } } fn expect_from_annotatables<I: IntoIterator<Item = Annotatable>>( self, items: I, ) -> AstFragment { let mut items = items.into_iter(); match self { AstFragmentKind::Arms => { AstFragment::Arms(items.map(Annotatable::expect_arm).collect()) } AstFragmentKind::Fields => { AstFragment::Fields(items.map(Annotatable::expect_expr_field).collect()) } AstFragmentKind::FieldPats => { AstFragment::FieldPats(items.map(Annotatable::expect_pat_field).collect()) } AstFragmentKind::GenericParams => { AstFragment::GenericParams(items.map(Annotatable::expect_generic_param).collect()) } AstFragmentKind::Params => { AstFragment::Params(items.map(Annotatable::expect_param).collect()) } AstFragmentKind::StructFields => { AstFragment::StructFields(items.map(Annotatable::expect_field_def).collect()) } AstFragmentKind::Variants => { AstFragment::Variants(items.map(Annotatable::expect_variant).collect()) } AstFragmentKind::Items => { AstFragment::Items(items.map(Annotatable::expect_item).collect()) } AstFragmentKind::ImplItems => { AstFragment::ImplItems(items.map(Annotatable::expect_impl_item).collect()) } AstFragmentKind::TraitItems => { AstFragment::TraitItems(items.map(Annotatable::expect_trait_item).collect()) } AstFragmentKind::ForeignItems => { AstFragment::ForeignItems(items.map(Annotatable::expect_foreign_item).collect()) } AstFragmentKind::Stmts => { AstFragment::Stmts(items.map(Annotatable::expect_stmt).collect()) } AstFragmentKind::Expr => AstFragment::Expr( items.next().expect("expected exactly one expression").expect_expr(), ), AstFragmentKind::OptExpr => { AstFragment::OptExpr(items.next().map(Annotatable::expect_expr)) } AstFragmentKind::Pat | AstFragmentKind::Ty => { panic!("patterns and types aren't annotatable") } } } } pub struct Invocation { pub kind: InvocationKind, pub fragment_kind: AstFragmentKind, pub expansion_data: ExpansionData, } pub enum InvocationKind { Bang { mac: ast::MacCall, span: Span, }, Attr { attr: ast::Attribute, // Re-insertion position for inert attributes. pos: usize, item: Annotatable, // Required for resolving derive helper attributes. derives: Vec<Path>, }, Derive { path: Path, item: Annotatable, }, } impl InvocationKind { fn placeholder_visibility(&self) -> Option<ast::Visibility> { // HACK: For unnamed fields placeholders should have the same visibility as the actual // fields because for tuple structs/variants resolve determines visibilities of their // constructor using these field visibilities before attributes on them are are expanded. // The assumption is that the attribute expansion cannot change field visibilities, // and it holds because only inert attributes are supported in this position. match self { InvocationKind::Attr { item: Annotatable::FieldDef(field), .. } | InvocationKind::Derive { item: Annotatable::FieldDef(field), .. } if field.ident.is_none() => { Some(field.vis.clone()) } _ => None, } } } impl Invocation { pub fn span(&self) -> Span { match &self.kind { InvocationKind::Bang { span, .. } => *span, InvocationKind::Attr { attr, .. } => attr.span, InvocationKind::Derive { path, .. } => path.span, } } } pub struct MacroExpander<'a, 'b> { pub cx: &'a mut ExtCtxt<'b>, monotonic: bool, // cf. `cx.monotonic_expander()` } impl<'a, 'b> MacroExpander<'a, 'b> { pub fn new(cx: &'a mut ExtCtxt<'b>, monotonic: bool) -> Self { MacroExpander { cx, monotonic } } // FIXME: Avoid visiting the crate as a `Mod` item, // make crate a first class expansion target instead. pub fn expand_crate(&mut self, mut krate: ast::Crate) -> ast::Crate { let file_path = match self.cx.source_map().span_to_filename(krate.span) { FileName::Real(name) => name .into_local_path() .expect("attempting to resolve a file path in an external file"), other => PathBuf::from(other.prefer_local().to_string()), }; let dir_path = file_path.parent().unwrap_or(&file_path).to_owned(); self.cx.root_path = dir_path.clone(); self.cx.current_expansion.module = Rc::new(ModuleData { mod_path: vec![Ident::from_str(&self.cx.ecfg.crate_name)], file_path_stack: vec![file_path], dir_path, }); let krate_item = AstFragment::Items(smallvec![P(ast::Item { attrs: krate.attrs, span: krate.span, kind: ast::ItemKind::Mod( Unsafe::No, ModKind::Loaded(krate.items, Inline::Yes, krate.span) ), ident: Ident::invalid(), id: ast::DUMMY_NODE_ID, vis: ast::Visibility { span: krate.span.shrink_to_lo(), kind: ast::VisibilityKind::Public, tokens: None, }, tokens: None, })]); match self.fully_expand_fragment(krate_item).make_items().pop().map(P::into_inner) { Some(ast::Item { attrs, kind: ast::ItemKind::Mod(_, ModKind::Loaded(items, ..)), .. }) => { krate.attrs = attrs; krate.items = items; } None => { // Resolution failed so we return an empty expansion krate.attrs = vec![]; krate.items = vec![]; } Some(ast::Item { span, kind, .. }) => { krate.attrs = vec![]; krate.items = vec![]; self.cx.span_err( span, &format!( "expected crate top-level item to be a module after macro expansion, found {} {}", kind.article(), kind.descr() ), ); // FIXME: this workaround issue #84569 FatalError.raise(); } }; self.cx.trace_macros_diag(); krate } // Recursively expand all macro invocations in this AST fragment. pub fn fully_expand_fragment(&mut self, input_fragment: AstFragment) -> AstFragment { let orig_expansion_data = self.cx.current_expansion.clone(); let orig_force_mode = self.cx.force_mode; // Collect all macro invocations and replace them with placeholders. let (mut fragment_with_placeholders, mut invocations) = self.collect_invocations(input_fragment, &[]); // Optimization: if we resolve all imports now, // we'll be able to immediately resolve most of imported macros. self.resolve_imports(); // Resolve paths in all invocations and produce output expanded fragments for them, but // do not insert them into our input AST fragment yet, only store in `expanded_fragments`. // The output fragments also go through expansion recursively until no invocations are left. // Unresolved macros produce dummy outputs as a recovery measure. invocations.reverse(); let mut expanded_fragments = Vec::new(); let mut undetermined_invocations = Vec::new(); let (mut progress, mut force) = (false, !self.monotonic); loop { let (invoc, ext) = if let Some(invoc) = invocations.pop() { invoc } else { self.resolve_imports(); if undetermined_invocations.is_empty() { break; } invocations = mem::take(&mut undetermined_invocations); force = !mem::replace(&mut progress, false); if force && self.monotonic { self.cx.sess.delay_span_bug( invocations.last().unwrap().0.span(), "expansion entered force mode without producing any errors", ); } continue; }; let ext = match ext { Some(ext) => ext, None => { let eager_expansion_root = if self.monotonic { invoc.expansion_data.id } else { orig_expansion_data.id }; match self.cx.resolver.resolve_macro_invocation( &invoc, eager_expansion_root, force, ) { Ok(ext) => ext, Err(Indeterminate) => { // Cannot resolve, will retry this invocation later. undetermined_invocations.push((invoc, None)); continue; } } } }; let ExpansionData { depth, id: expn_id, .. } = invoc.expansion_data; let depth = depth - orig_expansion_data.depth; self.cx.current_expansion = invoc.expansion_data.clone(); self.cx.force_mode = force; let fragment_kind = invoc.fragment_kind; let (expanded_fragment, new_invocations) = match self.expand_invoc(invoc, &ext.kind) { ExpandResult::Ready(fragment) => { let mut derive_invocations = Vec::new(); let derive_placeholders = self .cx .resolver .take_derive_resolutions(expn_id) .map(|derives| { derive_invocations.reserve(derives.len()); derives .into_iter() .map(|(path, item, _exts)| { // FIXME: Consider using the derive resolutions (`_exts`) // instead of enqueuing the derives to be resolved again later. let expn_id = LocalExpnId::fresh_empty(); derive_invocations.push(( Invocation { kind: InvocationKind::Derive { path, item }, fragment_kind, expansion_data: ExpansionData { id: expn_id, ..self.cx.current_expansion.clone() }, }, None, )); NodeId::placeholder_from_expn_id(expn_id) }) .collect::<Vec<_>>() }) .unwrap_or_default(); let (fragment, collected_invocations) = self.collect_invocations(fragment, &derive_placeholders); // We choose to expand any derive invocations associated with this macro invocation // *before* any macro invocations collected from the output fragment derive_invocations.extend(collected_invocations); (fragment, derive_invocations) } ExpandResult::Retry(invoc) => { if force { self.cx.span_bug( invoc.span(), "expansion entered force mode but is still stuck", ); } else { // Cannot expand, will retry this invocation later. undetermined_invocations.push((invoc, Some(ext))); continue; } } }; progress = true; if expanded_fragments.len() < depth { expanded_fragments.push(Vec::new()); } expanded_fragments[depth - 1].push((expn_id, expanded_fragment)); invocations.extend(new_invocations.into_iter().rev()); } self.cx.current_expansion = orig_expansion_data; self.cx.force_mode = orig_force_mode; // Finally incorporate all the expanded macros into the input AST fragment. let mut placeholder_expander = PlaceholderExpander::new(self.cx, self.monotonic); while let Some(expanded_fragments) = expanded_fragments.pop() { for (expn_id, expanded_fragment) in expanded_fragments.into_iter().rev() { placeholder_expander .add(NodeId::placeholder_from_expn_id(expn_id), expanded_fragment); } } fragment_with_placeholders.mut_visit_with(&mut placeholder_expander); fragment_with_placeholders } fn resolve_imports(&mut self) { if self.monotonic { self.cx.resolver.resolve_imports(); } } /// Collects all macro invocations reachable at this time in this AST fragment, and replace /// them with "placeholders" - dummy macro invocations with specially crafted `NodeId`s. /// Then call into resolver that builds a skeleton ("reduced graph") of the fragment and /// prepares data for resolving paths of macro invocations. fn collect_invocations( &mut self, mut fragment: AstFragment, extra_placeholders: &[NodeId], ) -> (AstFragment, Vec<(Invocation, Option<Lrc<SyntaxExtension>>)>) { // Resolve `$crate`s in the fragment for pretty-printing. self.cx.resolver.resolve_dollar_crates(); let invocations = { let mut collector = InvocationCollector { // Non-derive macro invocations cannot see the results of cfg expansion - they // will either be removed along with the item, or invoked before the cfg/cfg_attr // attribute is expanded. Therefore, we don't need to configure the tokens // Derive macros *can* see the results of cfg-expansion - they are handled // specially in `fully_expand_fragment` cfg: StripUnconfigured { sess: &self.cx.sess, features: self.cx.ecfg.features, config_tokens: false, }, cx: self.cx, invocations: Vec::new(), monotonic: self.monotonic, }; fragment.mut_visit_with(&mut collector); fragment.add_placeholders(extra_placeholders); collector.invocations }; if self.monotonic { self.cx .resolver .visit_ast_fragment_with_placeholders(self.cx.current_expansion.id, &fragment); } (fragment, invocations) } fn error_recursion_limit_reached(&mut self) { let expn_data = self.cx.current_expansion.id.expn_data(); let suggested_limit = self.cx.ecfg.recursion_limit * 2; self.cx .struct_span_err( expn_data.call_site, &format!("recursion limit reached while expanding `{}`", expn_data.kind.descr()), ) .help(&format!( "consider adding a `#![recursion_limit=\"{}\"]` attribute to your crate (`{}`)", suggested_limit, self.cx.ecfg.crate_name, )) .emit(); self.cx.trace_macros_diag(); } /// A macro's expansion does not fit in this fragment kind. /// For example, a non-type macro in a type position. fn error_wrong_fragment_kind(&mut self, kind: AstFragmentKind, mac: &ast::MacCall, span: Span) { let msg = format!( "non-{kind} macro in {kind} position: {path}", kind = kind.name(), path = pprust::path_to_string(&mac.path), ); self.cx.span_err(span, &msg); self.cx.trace_macros_diag(); } fn expand_invoc( &mut self, invoc: Invocation, ext: &SyntaxExtensionKind, ) -> ExpandResult<AstFragment, Invocation> { let recursion_limit = self.cx.reduced_recursion_limit.unwrap_or(self.cx.ecfg.recursion_limit); if !recursion_limit.value_within_limit(self.cx.current_expansion.depth) { if self.cx.reduced_recursion_limit.is_none() { self.error_recursion_limit_reached(); } // Reduce the recursion limit by half each time it triggers. self.cx.reduced_recursion_limit = Some(recursion_limit / 2); return ExpandResult::Ready(invoc.fragment_kind.dummy(invoc.span())); } let (fragment_kind, span) = (invoc.fragment_kind, invoc.span()); ExpandResult::Ready(match invoc.kind { InvocationKind::Bang { mac, .. } => match ext { SyntaxExtensionKind::Bang(expander) => { let tok_result = match expander.expand(self.cx, span, mac.args.inner_tokens()) { Err(_) => return ExpandResult::Ready(fragment_kind.dummy(span)), Ok(ts) => ts, }; self.parse_ast_fragment(tok_result, fragment_kind, &mac.path, span) } SyntaxExtensionKind::LegacyBang(expander) => { let prev = self.cx.current_expansion.prior_type_ascription; self.cx.current_expansion.prior_type_ascription = mac.prior_type_ascription; let tok_result = expander.expand(self.cx, span, mac.args.inner_tokens()); let result = if let Some(result) = fragment_kind.make_from(tok_result) { result } else { self.error_wrong_fragment_kind(fragment_kind, &mac, span); fragment_kind.dummy(span) }; self.cx.current_expansion.prior_type_ascription = prev; result } _ => unreachable!(), }, InvocationKind::Attr { attr, pos, mut item, derives } => match ext { SyntaxExtensionKind::Attr(expander) => { self.gate_proc_macro_input(&item); self.gate_proc_macro_attr_item(span, &item); let mut fake_tokens = false; if let Annotatable::Item(item_inner) = &item { if let ItemKind::Mod(_, mod_kind) = &item_inner.kind { // FIXME: Collect tokens and use them instead of generating // fake ones. These are unstable, so it needs to be // fixed prior to stabilization // Fake tokens when we are invoking an inner attribute, and: fake_tokens = matches!(attr.style, ast::AttrStyle::Inner) && // We are invoking an attribute on the crate root, or an outline // module (item_inner.ident.name.is_empty() || !matches!(mod_kind, ast::ModKind::Loaded(_, Inline::Yes, _))); } } let tokens = if fake_tokens { rustc_parse::fake_token_stream( &self.cx.sess.parse_sess, &item.into_nonterminal(), ) } else { item.into_tokens(&self.cx.sess.parse_sess) }; let attr_item = attr.unwrap_normal_item(); if let MacArgs::Eq(..) = attr_item.args { self.cx.span_err(span, "key-value macro attributes are not supported"); } let inner_tokens = attr_item.args.inner_tokens(); let tok_result = match expander.expand(self.cx, span, inner_tokens, tokens) { Err(_) => return ExpandResult::Ready(fragment_kind.dummy(span)), Ok(ts) => ts, }; self.parse_ast_fragment(tok_result, fragment_kind, &attr_item.path, span) } SyntaxExtensionKind::LegacyAttr(expander) => { match validate_attr::parse_meta(&self.cx.sess.parse_sess, &attr) { Ok(meta) => { let items = match expander.expand(self.cx, span, &meta, item) { ExpandResult::Ready(items) => items, ExpandResult::Retry(item) => { // Reassemble the original invocation for retrying. return ExpandResult::Retry(Invocation { kind: InvocationKind::Attr { attr, pos, item, derives }, ..invoc }); } }; if fragment_kind == AstFragmentKind::Expr && items.is_empty() { let msg = "removing an expression is not supported in this position"; self.cx.span_err(span, msg); fragment_kind.dummy(span) } else { fragment_kind.expect_from_annotatables(items) } } Err(mut err) => { err.emit(); fragment_kind.dummy(span) } } } SyntaxExtensionKind::NonMacroAttr { mark_used } => { self.cx.sess.mark_attr_known(&attr); if *mark_used { self.cx.sess.mark_attr_used(&attr); } item.visit_attrs(|attrs| attrs.insert(pos, attr)); fragment_kind.expect_from_annotatables(iter::once(item)) } _ => unreachable!(), }, InvocationKind::Derive { path, item } => match ext { SyntaxExtensionKind::Derive(expander) | SyntaxExtensionKind::LegacyDerive(expander) => { if let SyntaxExtensionKind::Derive(..) = ext { self.gate_proc_macro_input(&item); } let meta = ast::MetaItem { kind: ast::MetaItemKind::Word, span, path }; let items = match expander.expand(self.cx, span, &meta, item) { ExpandResult::Ready(items) => items, ExpandResult::Retry(item) => { // Reassemble the original invocation for retrying. return ExpandResult::Retry(Invocation { kind: InvocationKind::Derive { path: meta.path, item }, ..invoc }); } }; fragment_kind.expect_from_annotatables(items) } _ => unreachable!(), }, }) } fn gate_proc_macro_attr_item(&self, span: Span, item: &Annotatable) { let kind = match item { Annotatable::Item(_) | Annotatable::TraitItem(_) | Annotatable::ImplItem(_) | Annotatable::ForeignItem(_) => return, Annotatable::Stmt(stmt) => { // Attributes are stable on item statements, // but unstable on all other kinds of statements if stmt.is_item() { return; } "statements" } Annotatable::Expr(_) => "expressions", Annotatable::Arm(..) | Annotatable::ExprField(..) | Annotatable::PatField(..) | Annotatable::GenericParam(..) | Annotatable::Param(..) | Annotatable::FieldDef(..) | Annotatable::Variant(..) => panic!("unexpected annotatable"), }; if self.cx.ecfg.proc_macro_hygiene() { return; } feature_err( &self.cx.sess.parse_sess, sym::proc_macro_hygiene, span, &format!("custom attributes cannot be applied to {}", kind), ) .emit(); } fn gate_proc_macro_input(&self, annotatable: &Annotatable) { struct GateProcMacroInput<'a> { parse_sess: &'a ParseSess, } impl<'ast, 'a> Visitor<'ast> for GateProcMacroInput<'a> { fn visit_item(&mut self, item: &'ast ast::Item) { match &item.kind { ast::ItemKind::Mod(_, mod_kind) if !matches!(mod_kind, ModKind::Loaded(_, Inline::Yes, _)) => { feature_err( self.parse_sess, sym::proc_macro_hygiene, item.span, "non-inline modules in proc macro input are unstable", ) .emit(); } _ => {} } visit::walk_item(self, item); } } if !self.cx.ecfg.proc_macro_hygiene() { annotatable .visit_with(&mut GateProcMacroInput { parse_sess: &self.cx.sess.parse_sess }); } } fn parse_ast_fragment( &mut self, toks: TokenStream, kind: AstFragmentKind, path: &Path, span: Span, ) -> AstFragment { let mut parser = self.cx.new_parser_from_tts(toks); match parse_ast_fragment(&mut parser, kind) { Ok(fragment) => { ensure_complete_parse(&mut parser, path, kind.name(), span); fragment } Err(mut err) => { if err.span.is_dummy() { err.set_span(span); } annotate_err_with_kind(&mut err, kind, span); err.emit(); self.cx.trace_macros_diag(); kind.dummy(span) } } } } pub fn parse_ast_fragment<'a>( this: &mut Parser<'a>, kind: AstFragmentKind, ) -> PResult<'a, AstFragment> { Ok(match kind { AstFragmentKind::Items => { let mut items = SmallVec::new(); while let Some(item) = this.parse_item(ForceCollect::No)? { items.push(item); } AstFragment::Items(items) } AstFragmentKind::TraitItems => { let mut items = SmallVec::new(); while let Some(item) = this.parse_trait_item(ForceCollect::No)? { items.extend(item); } AstFragment::TraitItems(items) } AstFragmentKind::ImplItems => { let mut items = SmallVec::new(); while let Some(item) = this.parse_impl_item(ForceCollect::No)? { items.extend(item); } AstFragment::ImplItems(items) } AstFragmentKind::ForeignItems => { let mut items = SmallVec::new(); while let Some(item) = this.parse_foreign_item(ForceCollect::No)? { items.extend(item); } AstFragment::ForeignItems(items) } AstFragmentKind::Stmts => { let mut stmts = SmallVec::new(); // Won't make progress on a `}`. while this.token != token::Eof && this.token != token::CloseDelim(token::Brace) { if let Some(stmt) = this.parse_full_stmt(AttemptLocalParseRecovery::Yes)? { stmts.push(stmt); } } AstFragment::Stmts(stmts) } AstFragmentKind::Expr => AstFragment::Expr(this.parse_expr()?), AstFragmentKind::OptExpr => { if this.token != token::Eof { AstFragment::OptExpr(Some(this.parse_expr()?)) } else { AstFragment::OptExpr(None) } } AstFragmentKind::Ty => AstFragment::Ty(this.parse_ty()?), AstFragmentKind::Pat => AstFragment::Pat(this.parse_pat_allow_top_alt( None, RecoverComma::No, RecoverColon::Yes, )?), AstFragmentKind::Arms | AstFragmentKind::Fields | AstFragmentKind::FieldPats | AstFragmentKind::GenericParams | AstFragmentKind::Params | AstFragmentKind::StructFields | AstFragmentKind::Variants => panic!("unexpected AST fragment kind"), }) } pub fn ensure_complete_parse<'a>( this: &mut Parser<'a>, macro_path: &Path, kind_name: &str, span: Span, ) { if this.token != token::Eof { let token = pprust::token_to_string(&this.token); let msg = format!("macro expansion ignores token `{}` and any following", token); // Avoid emitting backtrace info twice. let def_site_span = this.token.span.with_ctxt(SyntaxContext::root()); let mut err = this.struct_span_err(def_site_span, &msg); err.span_label(span, "caused by the macro expansion here"); let msg = format!( "the usage of `{}!` is likely invalid in {} context", pprust::path_to_string(macro_path), kind_name, ); err.note(&msg); let semi_span = this.sess.source_map().next_point(span); let semi_full_span = semi_span.to(this.sess.source_map().next_point(semi_span)); match this.sess.source_map().span_to_snippet(semi_full_span) { Ok(ref snippet) if &snippet[..] != ";" && kind_name == "expression" => { err.span_suggestion( semi_span, "you might be missing a semicolon here", ";".to_owned(), Applicability::MaybeIncorrect, ); } _ => {} } err.emit(); } } struct InvocationCollector<'a, 'b> { cx: &'a mut ExtCtxt<'b>, cfg: StripUnconfigured<'a>, invocations: Vec<(Invocation, Option<Lrc<SyntaxExtension>>)>, monotonic: bool, } impl<'a, 'b> InvocationCollector<'a, 'b> { fn collect(&mut self, fragment_kind: AstFragmentKind, kind: InvocationKind) -> AstFragment { let expn_id = LocalExpnId::fresh_empty(); let vis = kind.placeholder_visibility(); self.invocations.push(( Invocation { kind, fragment_kind, expansion_data: ExpansionData { id: expn_id, depth: self.cx.current_expansion.depth + 1, ..self.cx.current_expansion.clone() }, }, None, )); placeholder(fragment_kind, NodeId::placeholder_from_expn_id(expn_id), vis) } fn collect_bang( &mut self, mac: ast::MacCall, span: Span, kind: AstFragmentKind, ) -> AstFragment { self.collect(kind, InvocationKind::Bang { mac, span }) } fn collect_attr( &mut self, (attr, pos, derives): (ast::Attribute, usize, Vec<Path>), item: Annotatable, kind: AstFragmentKind, ) -> AstFragment { self.collect(kind, InvocationKind::Attr { attr, pos, item, derives }) } /// If `item` is an attribute invocation, remove the attribute and return it together with /// its position and derives following it. We have to collect the derives in order to resolve /// legacy derive helpers (helpers written before derives that introduce them). fn take_first_attr( &mut self, item: &mut impl AstLike, ) -> Option<(ast::Attribute, usize, Vec<Path>)> { let mut attr = None; item.visit_attrs(|attrs| { attr = attrs .iter() .position(|a| !self.cx.sess.is_attr_known(a) && !is_builtin_attr(a)) .map(|attr_pos| { let attr = attrs.remove(attr_pos); let following_derives = attrs[attr_pos..] .iter() .filter(|a| a.has_name(sym::derive)) .flat_map(|a| a.meta_item_list().unwrap_or_default()) .filter_map(|nested_meta| match nested_meta { NestedMetaItem::MetaItem(ast::MetaItem { kind: MetaItemKind::Word, path, .. }) => Some(path), _ => None, }) .collect(); (attr, attr_pos, following_derives) }) }); attr } fn configure<T: AstLike>(&mut self, node: T) -> Option<T> { self.cfg.configure(node) } // Detect use of feature-gated or invalid attributes on macro invocations // since they will not be detected after macro expansion. fn check_attributes(&mut self, attrs: &[ast::Attribute], call: &MacCall) { let features = self.cx.ecfg.features.unwrap(); let mut attrs = attrs.iter().peekable(); let mut span: Option<Span> = None; while let Some(attr) = attrs.next() { rustc_ast_passes::feature_gate::check_attribute(attr, self.cx.sess, features); validate_attr::check_meta(&self.cx.sess.parse_sess, attr); let current_span = if let Some(sp) = span { sp.to(attr.span) } else { attr.span }; span = Some(current_span); if attrs.peek().map_or(false, |next_attr| next_attr.doc_str().is_some()) { continue; } if attr.is_doc_comment() { self.cx.sess.parse_sess.buffer_lint_with_diagnostic( &UNUSED_DOC_COMMENTS, current_span, self.cx.current_expansion.lint_node_id, "unused doc comment", BuiltinLintDiagnostics::UnusedDocComment(attr.span), ); } else if rustc_attr::is_builtin_attr(attr) { let attr_name = attr.ident().unwrap().name; // `#[cfg]` and `#[cfg_attr]` are special - they are // eagerly evaluated. if attr_name != sym::cfg && attr_name != sym::cfg_attr { self.cx.sess.parse_sess.buffer_lint_with_diagnostic( &UNUSED_ATTRIBUTES, attr.span, self.cx.current_expansion.lint_node_id, &format!("unused attribute `{}`", attr_name), BuiltinLintDiagnostics::UnusedBuiltinAttribute { attr_name, macro_name: pprust::path_to_string(&call.path), invoc_span: call.path.span, }, ); } } } } } /// Wraps a call to `noop_visit_*` / `noop_flat_map_*` /// for an AST node that supports attributes /// (see the `Annotatable` enum) /// This method assigns a `NodeId`, and sets that `NodeId` /// as our current 'lint node id'. If a macro call is found /// inside this AST node, we will use this AST node's `NodeId` /// to emit lints associated with that macro (allowing /// `#[allow]` / `#[deny]` to be applied close to /// the macro invocation). /// /// Do *not* call this for a macro AST node /// (e.g. `ExprKind::MacCall`) - we cannot emit lints /// at these AST nodes, since they are removed and /// replaced with the result of macro expansion. /// /// All other `NodeId`s are assigned by `visit_id`. /// * `self` is the 'self' parameter for the current method, /// * `id` is a mutable reference to the `NodeId` field /// of the current AST node. /// * `closure` is a closure that executes the /// `noop_visit_*` / `noop_flat_map_*` method /// for the current AST node. macro_rules! assign_id { ($self:ident, $id:expr, $closure:expr) => {{ let old_id = $self.cx.current_expansion.lint_node_id; if $self.monotonic { debug_assert_eq!(*$id, ast::DUMMY_NODE_ID); let new_id = $self.cx.resolver.next_node_id(); *$id = new_id; $self.cx.current_expansion.lint_node_id = new_id; } let ret = ($closure)(); $self.cx.current_expansion.lint_node_id = old_id; ret }}; } impl<'a, 'b> MutVisitor for InvocationCollector<'a, 'b> { fn visit_expr(&mut self, expr: &mut P<ast::Expr>) { self.cfg.configure_expr(expr); visit_clobber(expr.deref_mut(), |mut expr| { if let Some(attr) = self.take_first_attr(&mut expr) { // Collect the invoc regardless of whether or not attributes are permitted here // expansion will eat the attribute so it won't error later. self.cfg.maybe_emit_expr_attr_err(&attr.0); // AstFragmentKind::Expr requires the macro to emit an expression. return self .collect_attr(attr, Annotatable::Expr(P(expr)), AstFragmentKind::Expr) .make_expr() .into_inner(); } if let ast::ExprKind::MacCall(mac) = expr.kind { self.check_attributes(&expr.attrs, &mac); self.collect_bang(mac, expr.span, AstFragmentKind::Expr).make_expr().into_inner() } else { assign_id!(self, &mut expr.id, || { ensure_sufficient_stack(|| noop_visit_expr(&mut expr, self)); }); expr } }); } // This is needed in order to set `lint_node_id` for `let` statements fn visit_local(&mut self, local: &mut P<Local>) { assign_id!(self, &mut local.id, || noop_visit_local(local, self)); } fn flat_map_arm(&mut self, arm: ast::Arm) -> SmallVec<[ast::Arm; 1]> { let mut arm = configure!(self, arm); if let Some(attr) = self.take_first_attr(&mut arm) { return self .collect_attr(attr, Annotatable::Arm(arm), AstFragmentKind::Arms) .make_arms(); } assign_id!(self, &mut arm.id, || noop_flat_map_arm(arm, self)) } fn flat_map_expr_field(&mut self, field: ast::ExprField) -> SmallVec<[ast::ExprField; 1]> { let mut field = configure!(self, field); if let Some(attr) = self.take_first_attr(&mut field) { return self .collect_attr(attr, Annotatable::ExprField(field), AstFragmentKind::Fields) .make_expr_fields(); } assign_id!(self, &mut field.id, || noop_flat_map_expr_field(field, self)) } fn flat_map_pat_field(&mut self, fp: ast::PatField) -> SmallVec<[ast::PatField; 1]> { let mut fp = configure!(self, fp); if let Some(attr) = self.take_first_attr(&mut fp) { return self .collect_attr(attr, Annotatable::PatField(fp), AstFragmentKind::FieldPats) .make_pat_fields(); } assign_id!(self, &mut fp.id, || noop_flat_map_pat_field(fp, self)) } fn flat_map_param(&mut self, p: ast::Param) -> SmallVec<[ast::Param; 1]> { let mut p = configure!(self, p); if let Some(attr) = self.take_first_attr(&mut p) { return self .collect_attr(attr, Annotatable::Param(p), AstFragmentKind::Params) .make_params(); } assign_id!(self, &mut p.id, || noop_flat_map_param(p, self)) } fn flat_map_field_def(&mut self, sf: ast::FieldDef) -> SmallVec<[ast::FieldDef; 1]> { let mut sf = configure!(self, sf); if let Some(attr) = self.take_first_attr(&mut sf) { return self .collect_attr(attr, Annotatable::FieldDef(sf), AstFragmentKind::StructFields) .make_field_defs(); } assign_id!(self, &mut sf.id, || noop_flat_map_field_def(sf, self)) } fn flat_map_variant(&mut self, variant: ast::Variant) -> SmallVec<[ast::Variant; 1]> { let mut variant = configure!(self, variant); if let Some(attr) = self.take_first_attr(&mut variant) { return self .collect_attr(attr, Annotatable::Variant(variant), AstFragmentKind::Variants) .make_variants(); } assign_id!(self, &mut variant.id, || noop_flat_map_variant(variant, self)) } fn filter_map_expr(&mut self, expr: P<ast::Expr>) -> Option<P<ast::Expr>> { let expr = configure!(self, expr); expr.filter_map(|mut expr| { if let Some(attr) = self.take_first_attr(&mut expr) { self.cfg.maybe_emit_expr_attr_err(&attr.0); return self .collect_attr(attr, Annotatable::Expr(P(expr)), AstFragmentKind::OptExpr) .make_opt_expr() .map(|expr| expr.into_inner()); } if let ast::ExprKind::MacCall(mac) = expr.kind { self.check_attributes(&expr.attrs, &mac); self.collect_bang(mac, expr.span, AstFragmentKind::OptExpr) .make_opt_expr() .map(|expr| expr.into_inner()) } else { assign_id!(self, &mut expr.id, || { Some({ noop_visit_expr(&mut expr, self); expr }) }) } }) } fn visit_pat(&mut self, pat: &mut P<ast::Pat>) { match pat.kind { PatKind::MacCall(_) => {} _ => return noop_visit_pat(pat, self), } visit_clobber(pat, |mut pat| match mem::replace(&mut pat.kind, PatKind::Wild) { PatKind::MacCall(mac) => { self.collect_bang(mac, pat.span, AstFragmentKind::Pat).make_pat() } _ => unreachable!(), }); } fn flat_map_stmt(&mut self, stmt: ast::Stmt) -> SmallVec<[ast::Stmt; 1]> { let mut stmt = configure!(self, stmt); // we'll expand attributes on expressions separately if !stmt.is_expr() { if let Some(attr) = self.take_first_attr(&mut stmt) { return self .collect_attr(attr, Annotatable::Stmt(P(stmt)), AstFragmentKind::Stmts) .make_stmts(); } } if let StmtKind::MacCall(mac) = stmt.kind { let MacCallStmt { mac, style, attrs, tokens: _ } = mac.into_inner(); self.check_attributes(&attrs, &mac); let mut placeholder = self.collect_bang(mac, stmt.span, AstFragmentKind::Stmts).make_stmts(); // If this is a macro invocation with a semicolon, then apply that // semicolon to the final statement produced by expansion. if style == MacStmtStyle::Semicolon { if let Some(stmt) = placeholder.pop() { placeholder.push(stmt.add_trailing_semicolon()); } } return placeholder; } // The placeholder expander gives ids to statements, so we avoid folding the id here. // We don't use `assign_id!` - it will be called when we visit statement's contents // (e.g. an expression, item, or local) let ast::Stmt { id, kind, span } = stmt; noop_flat_map_stmt_kind(kind, self) .into_iter() .map(|kind| ast::Stmt { id, kind, span }) .collect() } fn visit_block(&mut self, block: &mut P<Block>) { let orig_dir_ownership = mem::replace( &mut self.cx.current_expansion.dir_ownership, DirOwnership::UnownedViaBlock, ); noop_visit_block(block, self); self.cx.current_expansion.dir_ownership = orig_dir_ownership; } fn flat_map_item(&mut self, item: P<ast::Item>) -> SmallVec<[P<ast::Item>; 1]> { let mut item = configure!(self, item); if let Some(attr) = self.take_first_attr(&mut item) { return self .collect_attr(attr, Annotatable::Item(item), AstFragmentKind::Items) .make_items(); } let mut attrs = mem::take(&mut item.attrs); // We do this to please borrowck. let ident = item.ident; let span = item.span; match item.kind { ast::ItemKind::MacCall(ref mac) => { self.check_attributes(&attrs, &mac); item.attrs = attrs; item.and_then(|item| match item.kind { ItemKind::MacCall(mac) => { self.collect_bang(mac, span, AstFragmentKind::Items).make_items() } _ => unreachable!(), }) } ast::ItemKind::Mod(_, ref mut mod_kind) if ident != Ident::invalid() => { let (file_path, dir_path, dir_ownership) = match mod_kind { ModKind::Loaded(_, inline, _) => { // Inline `mod foo { ... }`, but we still need to push directories. let (dir_path, dir_ownership) = mod_dir_path( &self.cx.sess, ident, &attrs, &self.cx.current_expansion.module, self.cx.current_expansion.dir_ownership, *inline, ); item.attrs = attrs; (None, dir_path, dir_ownership) } ModKind::Unloaded => { // We have an outline `mod foo;` so we need to parse the file. let old_attrs_len = attrs.len(); let ParsedExternalMod { mut items, inner_span, file_path, dir_path, dir_ownership, } = parse_external_mod( &self.cx.sess, ident, span, &self.cx.current_expansion.module, self.cx.current_expansion.dir_ownership, &mut attrs, ); if let Some(extern_mod_loaded) = self.cx.extern_mod_loaded { (attrs, items) = extern_mod_loaded(ident, attrs, items, inner_span); } *mod_kind = ModKind::Loaded(items, Inline::No, inner_span); item.attrs = attrs; if item.attrs.len() > old_attrs_len { // If we loaded an out-of-line module and added some inner attributes, // then we need to re-configure it and re-collect attributes for // resolution and expansion. item = configure!(self, item); if let Some(attr) = self.take_first_attr(&mut item) { return self .collect_attr( attr, Annotatable::Item(item), AstFragmentKind::Items, ) .make_items(); } } (Some(file_path), dir_path, dir_ownership) } }; // Set the module info before we flat map. let mut module = self.cx.current_expansion.module.with_dir_path(dir_path); module.mod_path.push(ident); if let Some(file_path) = file_path { module.file_path_stack.push(file_path); } let orig_module = mem::replace(&mut self.cx.current_expansion.module, Rc::new(module)); let orig_dir_ownership = mem::replace(&mut self.cx.current_expansion.dir_ownership, dir_ownership); let result = assign_id!(self, &mut item.id, || noop_flat_map_item(item, self)); // Restore the module info. self.cx.current_expansion.dir_ownership = orig_dir_ownership; self.cx.current_expansion.module = orig_module; result } _ => { item.attrs = attrs; // The crate root is special - don't assign an ID to it. if !(matches!(item.kind, ast::ItemKind::Mod(..)) && ident == Ident::invalid()) { assign_id!(self, &mut item.id, || noop_flat_map_item(item, self)) } else { noop_flat_map_item(item, self) } } } } fn flat_map_trait_item(&mut self, item: P<ast::AssocItem>) -> SmallVec<[P<ast::AssocItem>; 1]> { let mut item = configure!(self, item); if let Some(attr) = self.take_first_attr(&mut item) { return self .collect_attr(attr, Annotatable::TraitItem(item), AstFragmentKind::TraitItems) .make_trait_items(); } match item.kind { ast::AssocItemKind::MacCall(ref mac) => { self.check_attributes(&item.attrs, &mac); item.and_then(|item| match item.kind { ast::AssocItemKind::MacCall(mac) => self .collect_bang(mac, item.span, AstFragmentKind::TraitItems) .make_trait_items(), _ => unreachable!(), }) } _ => { assign_id!(self, &mut item.id, || noop_flat_map_assoc_item(item, self)) } } } fn flat_map_impl_item(&mut self, item: P<ast::AssocItem>) -> SmallVec<[P<ast::AssocItem>; 1]> { let mut item = configure!(self, item); if let Some(attr) = self.take_first_attr(&mut item) { return self .collect_attr(attr, Annotatable::ImplItem(item), AstFragmentKind::ImplItems) .make_impl_items(); } match item.kind { ast::AssocItemKind::MacCall(ref mac) => { self.check_attributes(&item.attrs, &mac); item.and_then(|item| match item.kind { ast::AssocItemKind::MacCall(mac) => self .collect_bang(mac, item.span, AstFragmentKind::ImplItems) .make_impl_items(), _ => unreachable!(), }) } _ => { assign_id!(self, &mut item.id, || noop_flat_map_assoc_item(item, self)) } } } fn visit_ty(&mut self, ty: &mut P<ast::Ty>) { match ty.kind { ast::TyKind::MacCall(_) => {} _ => return noop_visit_ty(ty, self), }; visit_clobber(ty, |mut ty| match mem::replace(&mut ty.kind, ast::TyKind::Err) { ast::TyKind::MacCall(mac) => { self.collect_bang(mac, ty.span, AstFragmentKind::Ty).make_ty() } _ => unreachable!(), }); } fn flat_map_foreign_item( &mut self, foreign_item: P<ast::ForeignItem>, ) -> SmallVec<[P<ast::ForeignItem>; 1]> { let mut foreign_item = configure!(self, foreign_item); if let Some(attr) = self.take_first_attr(&mut foreign_item) { return self .collect_attr( attr, Annotatable::ForeignItem(foreign_item), AstFragmentKind::ForeignItems, ) .make_foreign_items(); } match foreign_item.kind { ast::ForeignItemKind::MacCall(ref mac) => { self.check_attributes(&foreign_item.attrs, &mac); foreign_item.and_then(|item| match item.kind { ast::ForeignItemKind::MacCall(mac) => self .collect_bang(mac, item.span, AstFragmentKind::ForeignItems) .make_foreign_items(), _ => unreachable!(), }) } _ => { assign_id!(self, &mut foreign_item.id, || noop_flat_map_foreign_item( foreign_item, self )) } } } fn flat_map_generic_param( &mut self, param: ast::GenericParam, ) -> SmallVec<[ast::GenericParam; 1]> { let mut param = configure!(self, param); if let Some(attr) = self.take_first_attr(&mut param) { return self .collect_attr( attr, Annotatable::GenericParam(param), AstFragmentKind::GenericParams, ) .make_generic_params(); } assign_id!(self, &mut param.id, || noop_flat_map_generic_param(param, self)) } fn visit_id(&mut self, id: &mut ast::NodeId) { // We may have already assigned a `NodeId` // by calling `assign_id` if self.monotonic && *id == ast::DUMMY_NODE_ID { *id = self.cx.resolver.next_node_id(); } } } pub struct ExpansionConfig<'feat> { pub crate_name: String, pub features: Option<&'feat Features>, pub recursion_limit: Limit, pub trace_mac: bool, pub should_test: bool, // If false, strip `#[test]` nodes pub span_debug: bool, // If true, use verbose debugging for `proc_macro::Span` pub proc_macro_backtrace: bool, // If true, show backtraces for proc-macro panics } impl<'feat> ExpansionConfig<'feat> { pub fn default(crate_name: String) -> ExpansionConfig<'static> { ExpansionConfig { crate_name, features: None, recursion_limit: Limit::new(1024), trace_mac: false, should_test: false, span_debug: false, proc_macro_backtrace: false, } } fn proc_macro_hygiene(&self) -> bool { self.features.map_or(false, |features| features.proc_macro_hygiene) } }
40.363805
131
0.515693
e2872738b7a6309d93ab9fe86151bf90c13980b6
1,156
use std::sync::Arc; use druid::{im::Vector, Data, Lens}; use crate::data::{Album, Artist, Playlist, Promise, Show, Track}; #[derive(Clone, Data, Lens)] pub struct Search { pub input: String, pub results: Promise<SearchResults, Arc<str>>, } #[derive(Copy, Clone, Data, Eq, PartialEq)] pub enum SearchTopic { Artist, Album, Track, Playlist, Show, } impl SearchTopic { pub fn as_str(&self) -> &'static str { match self { SearchTopic::Artist => "artist", SearchTopic::Album => "album", SearchTopic::Track => "track", SearchTopic::Playlist => "playlist", SearchTopic::Show => "show", } } pub fn all() -> &'static [Self] { &[ Self::Artist, Self::Album, Self::Track, Self::Playlist, Self::Show, ] } } #[derive(Clone, Data, Lens)] pub struct SearchResults { pub query: Arc<str>, pub artists: Vector<Artist>, pub albums: Vector<Arc<Album>>, pub tracks: Vector<Arc<Track>>, pub playlists: Vector<Playlist>, pub shows: Vector<Arc<Show>>, }
21.811321
65
0.552768
edf408a1e6c0b1daf1c56f09c73ea2cefd16d5f1
8,266
use serde::{ser::SerializeMap, Deserialize, Serialize}; use swc_common::ast_serde; use crate::{ common::{ BaseNode, Decorator, Identifier, Param, PatternLike, TypeAnnotOrNoop, TypeParamDeclOrNoop, }, expr::Expression, flavor::Flavor, flow::{ ObjectTypeCallProperty, ObjectTypeIndexer, ObjectTypeInternalSlot, ObjectTypeProperty, ObjectTypeSpreadProperty, }, lit::{NumericLiteral, StringLiteral}, stmt::BlockStatement, }; #[derive(Debug, Clone, PartialEq)] #[ast_serde] pub enum UserWhitespacable { #[tag("ObjectMethod")] ObjectMethod(ObjectMethod), #[tag("ObjectProperty")] ObjectProperty(ObjectProperty), #[tag("ObjectTypeInternalSlot")] ObjectTypeInternalSlot(ObjectTypeInternalSlot), #[tag("ObjectTypeCallProperty")] ObjectTypeCallProperty(ObjectTypeCallProperty), #[tag("ObjectTypeIndexer")] ObjectTypeIndexer(ObjectTypeIndexer), #[tag("ObjectTypeProperty")] ObjectTypeProperty(ObjectTypeProperty), #[tag("ObjectTypeSpreadProperty")] ObjectTypeSpreadProperty(ObjectTypeSpreadProperty), } #[derive(Debug, Clone, PartialEq)] #[ast_serde] pub enum ObjectMember { #[tag("ObjectMember")] Method(ObjectMethod), #[tag("ObjectProperty")] Prop(ObjectProperty), } #[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "lowercase")] pub enum ObjectMethodKind { Method, Get, Set, } #[derive(Debug, Clone, PartialEq)] #[ast_serde] pub enum ObjectKey { #[tag("Identifier")] Id(Identifier), #[tag("StringLiteral")] String(StringLiteral), #[tag("NumericLiteral")] Numeric(NumericLiteral), #[tag("*")] Expr(Box<Expression>), } #[derive(Debug, Clone, Deserialize, PartialEq)] #[serde(rename_all = "camelCase")] #[serde(tag = "type")] pub struct ObjectMethod { #[serde(flatten)] pub base: BaseNode, pub kind: ObjectMethodKind, pub key: ObjectKey, #[serde(default)] pub params: Vec<Param>, pub body: BlockStatement, #[serde(default)] pub computed: bool, #[serde(default)] pub generator: Option<bool>, #[serde(default, rename = "async")] pub is_async: Option<bool>, #[serde(default)] pub decorator: Option<Vec<Decorator>>, #[serde(default)] pub return_type: Option<Box<TypeAnnotOrNoop>>, #[serde(default)] pub type_parameters: Option<TypeParamDeclOrNoop>, } impl Serialize for ObjectMethod { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer, { match Flavor::current() { Flavor::Babel => { let actual = BabelObjectMethod { type_: "ObjectMethod", base: &self.base, key: &self.key, kind: self.kind, params: &self.params, body: &self.body, computed: self.computed, generator: self.generator, is_async: self.is_async, decorator: self.decorator.as_deref(), return_type: self.return_type.as_deref(), type_parameters: self.type_parameters.as_ref(), }; actual.serialize(serializer) } Flavor::Acorn => { let mut s = serializer.serialize_map(None)?; { // TODO(kdy1): This is bad. self.base .serialize(serde::__private::ser::FlatMapSerializer(&mut s))?; } s.serialize_entry("type", "Property")?; s.serialize_entry("kind", &self.kind)?; s.serialize_entry("method", &false)?; s.serialize_entry("shorthand", &false)?; s.serialize_entry("key", &self.key)?; s.serialize_entry("computed", &self.computed)?; s.serialize_entry( "value", &AcornObjectMethodValue { type_: "FunctionExpression", base: &self.base, body: &self.body, params: &self.params, generator: self.generator.unwrap_or(false), is_async: self.is_async.unwrap_or(false), }, )?; s.end() } } } } #[derive(Serialize)] struct AcornObjectMethodValue<'a> { /// `FuncionExpression` #[serde(rename = "type")] type_: &'static str, #[serde(flatten)] base: &'a BaseNode, body: &'a BlockStatement, params: &'a [Param], generator: bool, #[serde(rename = "async")] is_async: bool, } #[derive(Serialize)] #[serde(rename_all = "camelCase")] struct BabelObjectMethod<'a> { #[serde(rename = "type")] type_: &'static str, #[serde(flatten)] pub base: &'a BaseNode, pub kind: ObjectMethodKind, pub key: &'a ObjectKey, #[serde(default)] pub params: &'a [Param], pub body: &'a BlockStatement, #[serde(default)] pub computed: bool, #[serde(default)] pub generator: Option<bool>, #[serde(default, rename = "async")] pub is_async: Option<bool>, #[serde(default)] pub decorator: Option<&'a [Decorator]>, #[serde(default)] pub return_type: Option<&'a TypeAnnotOrNoop>, #[serde(default)] pub type_parameters: Option<&'a TypeParamDeclOrNoop>, } #[derive(Debug, Clone, PartialEq)] #[ast_serde] pub enum ObjectPropVal { #[tag("Identifier")] #[tag("RestElement")] #[tag("AssignmentPattern")] #[tag("ArrayPattern")] #[tag("ObjectPattern")] Pattern(PatternLike), #[tag("*")] Expr(Box<Expression>), } #[derive(Debug, Clone, Deserialize, PartialEq)] pub struct ObjectProperty { #[serde(flatten)] pub base: BaseNode, pub key: ObjectKey, pub value: ObjectPropVal, #[serde(default)] pub computed: bool, #[serde(default)] pub shorthand: bool, #[serde(default, skip_serializing_if = "crate::flavor::Flavor::skip_none")] pub decorators: Option<Vec<Decorator>>, } #[derive(Serialize)] struct BabelObjectProperty<'a> { #[serde(rename = "type")] type_: &'a str, #[serde(flatten)] base: &'a BaseNode, key: &'a ObjectKey, value: &'a ObjectPropVal, method: bool, computed: bool, shorthand: bool, #[serde(skip_serializing_if = "crate::flavor::Flavor::skip_none")] decorators: Option<&'a [Decorator]>, } impl Serialize for ObjectProperty { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer, { match Flavor::current() { Flavor::Babel => { let actual = BabelObjectProperty { type_: "ObjectProperty", base: &self.base, key: &self.key, value: &self.value, method: false, computed: self.computed, shorthand: self.shorthand, decorators: self.decorators.as_deref(), }; actual.serialize(serializer) } Flavor::Acorn => { let mut s = serializer.serialize_map(None)?; { // TODO(kdy1): This is bad. self.base .serialize(serde::__private::ser::FlatMapSerializer(&mut s))?; } s.serialize_entry("type", "Property")?; s.serialize_entry("kind", "init")?; s.serialize_entry("method", &false)?; s.serialize_entry("shorthand", &self.shorthand)?; s.serialize_entry("key", &self.key)?; s.serialize_entry("value", &self.value)?; s.serialize_entry("computed", &self.computed)?; if let Some(decorators) = &self.decorators { if !decorators.is_empty() { s.serialize_entry("decorators", decorators)?; } } s.end() } } } }
29.521429
98
0.555771
1c06696a770cdbd1651fd083582baa387157edd4
235
#[macro_use] extern crate afl; fn main() { fuzz!(|data: &[u8]| { // Use first 32 bytes of data as key. if data.len() >= 32 { poly1305::fuzz_avx2((&data[0..32]).into(), &data[32..]); } }); }
19.583333
68
0.468085
1ae42873c0533bdac968d7d8541d4b32b3223205
3,098
// Take a look at the license at the top of the repository in the LICENSE file. use crate::{ sys::{component::Component, Disk, Networks, Process, Processor}, LoadAvg, Pid, ProcessRefreshKind, RefreshKind, SystemExt, User, }; use std::collections::HashMap; #[doc = include_str!("../../md_doc/system.md")] pub struct System { processes_list: HashMap<Pid, Process>, networks: Networks, global_processor: Processor, } impl SystemExt for System { const IS_SUPPORTED: bool = false; fn new_with_specifics(_: RefreshKind) -> System { System { processes_list: Default::default(), networks: Networks::new(), global_processor: Processor::new(), } } fn refresh_memory(&mut self) {} fn refresh_cpu(&mut self) {} fn refresh_components_list(&mut self) {} fn refresh_processes_specifics(&mut self, _refresh_kind: ProcessRefreshKind) {} fn refresh_process_specifics(&mut self, _pid: Pid, _refresh_kind: ProcessRefreshKind) -> bool { false } fn refresh_disks_list(&mut self) {} fn refresh_users_list(&mut self) {} // COMMON PART // // Need to be moved into a "common" file to avoid duplication. fn processes(&self) -> &HashMap<Pid, Process> { &self.processes_list } fn process(&self, _pid: Pid) -> Option<&Process> { None } fn networks(&self) -> &Networks { &self.networks } fn networks_mut(&mut self) -> &mut Networks { &mut self.networks } fn global_processor_info(&self) -> &Processor { &self.global_processor } fn processors(&self) -> &[Processor] { &[] } fn physical_core_count(&self) -> Option<usize> { None } fn total_memory(&self) -> u64 { 0 } fn free_memory(&self) -> u64 { 0 } fn available_memory(&self) -> u64 { 0 } fn used_memory(&self) -> u64 { 0 } fn total_swap(&self) -> u64 { 0 } fn free_swap(&self) -> u64 { 0 } fn used_swap(&self) -> u64 { 0 } fn components(&self) -> &[Component] { &[] } fn components_mut(&mut self) -> &mut [Component] { &mut [] } fn disks(&self) -> &[Disk] { &[] } fn disks_mut(&mut self) -> &mut [Disk] { &mut [] } fn uptime(&self) -> u64 { 0 } fn boot_time(&self) -> u64 { 0 } fn load_average(&self) -> LoadAvg { LoadAvg { one: 0., five: 0., fifteen: 0., } } fn users(&self) -> &[User] { &[] } fn name(&self) -> Option<String> { None } fn long_os_version(&self) -> Option<String> { None } fn kernel_version(&self) -> Option<String> { None } fn os_version(&self) -> Option<String> { None } fn host_name(&self) -> Option<String> { None } } impl Default for System { fn default() -> System { System::new() } }
18.662651
99
0.534538
50133f152890759fb0d19e575dedb4b2d1480abf
617
//! 内存管理模块 //! //! 负责空间分配和虚拟地址映射 // 因为模块内包含许多基础设施类别,实现了许多以后可能会用到的函数, // 所以在模块范围内不提示「未使用的函数」等警告 #![allow(dead_code)] pub mod address; pub mod config; pub mod frame; pub mod heap; pub mod range; pub mod mapping; /// 一个缩写,模块中一些函数会使用 pub type MemoryResult<T> = Result<T, &'static str>; pub use { address::*, config::*, frame::FRAME_ALLOCATOR, mapping::{Flags, MapType, MemorySet, Segment}, range::Range, }; /// 初始化内存相关的子模块 /// /// - [`heap::init`] pub fn init() { heap::init(); // 允许内核读写用户态内存 unsafe { riscv::register::sstatus::set_sum() }; println!("mod memory initialized"); }
16.675676
51
0.638574
e59e50e257c5147796c1305142ee297a6ff2222d
362
use std::{error, io::{self, Read}}; mod markdown; mod renderer; fn main() -> Result<(), Box<dyn error::Error>> { let mut buffer = String::new(); let mut stdin = io::stdin(); stdin.read_to_string(&mut buffer)?; let renderer = renderer::HtmlRenderer {}; let html = markdown::parse(&buffer, &renderer)?; print!("{}", html); Ok(()) }
21.294118
52
0.59116
f4b81e332e5782d1e16bd80fb3e8218e12c5c7da
37,704
extern crate indy; extern crate indy_crypto; // Workaround to share some utils code based on indy sdk types between tests and indy sdk use indy::api as api; #[macro_use] extern crate serde_derive; #[macro_use] extern crate serde_json; #[macro_use] extern crate lazy_static; #[macro_use] extern crate log; #[macro_use] mod utils; #[cfg(feature = "local_nodes_pool")] use utils::callback::CallbackUtils; use utils::pool::PoolUtils; use utils::test::TestUtils; use utils::timeout::TimeoutUtils; use utils::domain::credential_definition::CredentialDefinition; use utils::domain::credential_for_proof_request::CredentialsForProofRequest; use utils::domain::proof::Proof; use utils::domain::revocation_registry_definition::RevocationRegistryDefinition; use utils::domain::revocation_registry::RevocationRegistry; use utils::domain::revocation_state::RevocationState; use utils::domain::schema::Schema; use utils::environment::EnvironmentUtils; use indy::api::ErrorCode; use indy::api::anoncreds::*; use indy::api::blob_storage::*; use indy::api::crypto::*; #[cfg(feature = "local_nodes_pool")] use indy::api::ledger::*; #[cfg(feature = "local_nodes_pool")] use indy::api::pool::*; use indy::api::wallet::*; use indy::api::did::*; use std::ptr::null; use std::ffi::CString; #[cfg(feature = "local_nodes_pool")] use std::thread; #[test] fn anoncreds_demo_works() { TestUtils::cleanup_storage(); let (issuer_create_schema_receiver, issuer_create_schema_command_handle, issuer_create_schema_callback) = CallbackUtils::_closure_to_cb_ec_string_string(); let (issuer_create_credential_definition_receiver, issuer_create_credential_definition_command_handle, issuer_create_credential_definition_callback) = CallbackUtils::_closure_to_cb_ec_string_string(); let (issuer_create_credential_offer_receiver, issuer_create_credential_offer_command_handle, issuer_create_credential_offer_callback) = CallbackUtils::_closure_to_cb_ec_string(); let (create_wallet_receiver, create_wallet_command_handle, create_wallet_callback) = CallbackUtils::_closure_to_cb_ec(); let (open_wallet_receiver, open_wallet_command_handle, open_wallet_callback) = CallbackUtils::_closure_to_cb_ec_i32(); let (prover_create_master_secret_receiver, prover_create_master_secret_command_handle, prover_create_master_secret_callback) = CallbackUtils::_closure_to_cb_ec_string(); let (prover_create_credential_req_receiver, prover_create_credential_req_command_handle, prover_create_credential_req_callback) = CallbackUtils::_closure_to_cb_ec_string_string(); let (issuer_create_credential_receiver, issuer_create_credential_command_handle, issuer_create_credential_callback) = CallbackUtils::_closure_to_cb_ec_string_opt_string_opt_string(); let (prover_store_credential_receiver, prover_store_credential_command_handle, prover_store_credential_callback) = CallbackUtils::_closure_to_cb_ec_string(); let (prover_get_credentials_for_proof_req_receiver, prover_get_credentials_for_proof_req_command_handle, prover_get_credentials_for_proof_req_callback) = CallbackUtils::_closure_to_cb_ec_string(); let (prover_create_proof_receiver, prover_create_proof_command_handle, prover_create_proof_callback) = CallbackUtils::_closure_to_cb_ec_string(); let (verifier_verify_proof_receiver, verifier_verify_proof_command_handle, verifier_verify_proof_callback) = CallbackUtils::_closure_to_cb_ec_bool(); let (close_wallet_receiver, close_wallet_command_handle, close_wallet_callback) = CallbackUtils::_closure_to_cb_ec(); let (bs_writer_receiver, bs_writer_command_handle, bs_writer_cb) = CallbackUtils::_closure_to_cb_ec_i32(); let (bs_reader_receiver, bs_reader_command_handle, bs_reader_cb) = CallbackUtils::_closure_to_cb_ec_i32(); let (cs_rev_reg_receiver, cs_rev_reg_command_handle, cs_rev_reg_cb) = CallbackUtils::_closure_to_cb_ec_string_string_string(); let (create_rev_state_receiver, create_rev_state_command_handle, create_rev_state_cb) = CallbackUtils::_closure_to_cb_ec_string(); let pool_name = "pool_1"; let wallet_name = "issuer_wallet1"; let xtype = "default"; //TODO CREATE ISSUER, PROVER, VERIFIER WALLETS //1. Creates Wallet let err = indy_create_wallet(create_wallet_command_handle, CString::new(pool_name).unwrap().as_ptr(), CString::new(wallet_name).unwrap().as_ptr(), CString::new(xtype).unwrap().as_ptr(), null(), null(), create_wallet_callback); assert_eq!(ErrorCode::Success, err); let err = create_wallet_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap(); assert_eq!(ErrorCode::Success, err); //2. Opens Wallet let err = indy_open_wallet(open_wallet_command_handle, CString::new(wallet_name).unwrap().as_ptr(), null(), null(), open_wallet_callback); assert_eq!(ErrorCode::Success, err); let (err, wallet_handle) = open_wallet_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap(); assert_eq!(ErrorCode::Success, err); let issuer_did = "NcYxiDXkpYi6ov5FcYDi1e"; let prover_did = "VsKV7grR1BUE29mG2Fm2kX"; let schema_name = "gvt"; let version = "1.0"; let attrs = r#"["name", "age", "sex", "height"]"#; // 3. Issuer create Schema let err = indy_issuer_create_schema(issuer_create_schema_command_handle, CString::new(issuer_did.clone()).unwrap().as_ptr(), CString::new(schema_name.clone()).unwrap().as_ptr(), CString::new(version.clone()).unwrap().as_ptr(), CString::new(attrs.clone()).unwrap().as_ptr(), issuer_create_schema_callback); assert_eq!(ErrorCode::Success, err); let (err, schema_id, schema_json) = issuer_create_schema_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap(); assert_eq!(ErrorCode::Success, err); // 4. Issuer create Credential Definition for Schema let tag = r#"TAG1"#; let config = r#"{ "support_revocation": true }"#; let err = indy_issuer_create_and_store_credential_def(issuer_create_credential_definition_command_handle, wallet_handle, CString::new(issuer_did.clone()).unwrap().as_ptr(), CString::new(schema_json.clone()).unwrap().as_ptr(), CString::new(tag.clone()).unwrap().as_ptr(), null(), CString::new(config.clone()).unwrap().as_ptr(), issuer_create_credential_definition_callback); assert_eq!(ErrorCode::Success, err); let (err, credential_def_id, credential_def_json) = issuer_create_credential_definition_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap(); assert_eq!(ErrorCode::Success, err); // 4.1 Issuer configure blob storage for Tails then create and store RevocationRegistry let tails_writer_config = json!({ "base_dir": EnvironmentUtils::tmp_file_path("tails").to_str().unwrap(), "uri_pattern":"", }).to_string(); let err = indy_open_blob_storage_writer(bs_writer_command_handle, CString::new("default").unwrap().as_ptr(), CString::new(tails_writer_config).unwrap().as_ptr(), bs_writer_cb); assert_eq!(ErrorCode::Success, err); let (err, tails_writer_handle) = bs_writer_receiver.recv_timeout(TimeoutUtils::short_timeout()).unwrap(); assert_eq!(ErrorCode::Success, err); let err = indy_issuer_create_and_store_revoc_reg(cs_rev_reg_command_handle, wallet_handle, CString::new(issuer_did).unwrap().as_ptr(), null(), CString::new("TAG1").unwrap().as_ptr(), CString::new(credential_def_id.clone()).unwrap().as_ptr(), CString::new(r#"{"max_cred_num":5, "issuance_type":"ISSUANCE_ON_DEMAND"}"#).unwrap().as_ptr(), tails_writer_handle, cs_rev_reg_cb); assert_eq!(ErrorCode::Success, err); let (err, rev_reg_id, revoc_reg_def_json, _) = cs_rev_reg_receiver.recv().unwrap(); assert_eq!(ErrorCode::Success, err); // 5. Prover create Master Secret let master_secret_id = "master_secret"; let err = indy_prover_create_master_secret(prover_create_master_secret_command_handle, wallet_handle, CString::new(master_secret_id).unwrap().as_ptr(), prover_create_master_secret_callback); assert_eq!(ErrorCode::Success, err); let (err, _) = prover_create_master_secret_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap(); assert_eq!(ErrorCode::Success, err); // 6. Issuer create Credential Offer let err = indy_issuer_create_credential_offer(issuer_create_credential_offer_command_handle, wallet_handle, CString::new(credential_def_id.clone()).unwrap().as_ptr(), issuer_create_credential_offer_callback); assert_eq!(ErrorCode::Success, err); let (err, credential_offer_json) = issuer_create_credential_offer_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap(); assert_eq!(ErrorCode::Success, err); // 7. Prover create Credential Request let err = indy_prover_create_credential_req(prover_create_credential_req_command_handle, wallet_handle, CString::new(prover_did).unwrap().as_ptr(), CString::new(credential_offer_json.clone()).unwrap().as_ptr(), CString::new(credential_def_json.clone()).unwrap().as_ptr(), CString::new(master_secret_id).unwrap().as_ptr(), prover_create_credential_req_callback); assert_eq!(ErrorCode::Success, err); let (err, credential_req_json, credential_req_metadata_json) = prover_create_credential_req_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap(); assert_eq!(ErrorCode::Success, err); // 8. Issuer create Credential for Credential Request let credential_json = r#"{ "sex":{"raw":"male", "encoded":"5944657099558967239210949258394887428692050081607692519917050011144233115103"}, "name":{"raw":"Alex", "encoded":"1139481716457488690172217916278103335"}, "height":{"raw":"175", "encoded":"175"}, "age":{"raw":"28", "encoded":"28"} }"#; // 8.1 Creating credential requires access to Tails: Issuer configure blob storage to read let tails_reader_config = json!({ "base_dir": EnvironmentUtils::tmp_file_path("tails").to_str().unwrap(), }).to_string(); let err = indy_open_blob_storage_reader(bs_reader_command_handle, CString::new("default").unwrap().as_ptr(), CString::new(tails_reader_config).unwrap().as_ptr(), bs_reader_cb); assert_eq!(ErrorCode::Success, err); let (err, blob_storage_reader_handle) = bs_reader_receiver.recv_timeout(TimeoutUtils::short_timeout()).unwrap(); assert_eq!(ErrorCode::Success, err); let err = indy_issuer_create_credential(issuer_create_credential_command_handle, wallet_handle, CString::new(credential_offer_json).unwrap().as_ptr(), CString::new(credential_req_json.clone()).unwrap().as_ptr(), CString::new(credential_json).unwrap().as_ptr(), CString::new(rev_reg_id.clone()).unwrap().as_ptr(), blob_storage_reader_handle, issuer_create_credential_callback); assert_eq!(ErrorCode::Success, err); let (err, credential_json, cred_rev_id, rreg_issue_delta_json) = issuer_create_credential_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap(); assert_eq!(ErrorCode::Success, err); let rreg_issue_delta_json = rreg_issue_delta_json.unwrap(); let cred_rev_id = cred_rev_id.unwrap(); // 9. Prover process and store Credential let credential_id = "credential_id"; let err = indy_prover_store_credential(prover_store_credential_command_handle, wallet_handle, CString::new(credential_id).unwrap().as_ptr(), CString::new(credential_req_metadata_json).unwrap().as_ptr(), CString::new(credential_json).unwrap().as_ptr(), CString::new(credential_def_json.clone()).unwrap().as_ptr(), CString::new(revoc_reg_def_json.clone()).unwrap().as_ptr(), prover_store_credential_callback); assert_eq!(ErrorCode::Success, err); let (err, _) = prover_store_credential_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap(); assert_eq!(ErrorCode::Success, err); let proof_req_json = r#"{ "nonce":"123432421212", "name":"proof_req_1", "version":"0.1", "requested_attributes":{ "attr1_referent":{ "name":"name" } }, "requested_predicates":{ "predicate1_referent":{ "name":"age", "p_type":">=", "p_value":18 } }, "non_revoked": { "from": 80, "to": 120 } }"#; // 10 Prover prepare Credential to prove // 10.1 Prover gets Credentials for Proof Request let err = indy_prover_get_credentials_for_proof_req(prover_get_credentials_for_proof_req_command_handle, wallet_handle, CString::new(proof_req_json.clone()).unwrap().as_ptr(), prover_get_credentials_for_proof_req_callback); assert_eq!(ErrorCode::Success, err); let (err, credentials_json) = prover_get_credentials_for_proof_req_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap(); assert_eq!(ErrorCode::Success, err); let credentials: CredentialsForProofRequest = serde_json::from_str(&credentials_json).unwrap(); let credentials_for_attr_1 = credentials.attrs.get("attr1_referent").unwrap(); assert_eq!(1, credentials_for_attr_1.len()); let credential = credentials_for_attr_1[0].cred_info.clone(); // 10.2 Prover select appropriate timestamp for revocation part of each credential and build states let issue_ts = 100; let err = indy_create_revocation_state(create_rev_state_command_handle, blob_storage_reader_handle, CString::new(revoc_reg_def_json.clone()).unwrap().as_ptr(), CString::new(rreg_issue_delta_json.clone()).unwrap().as_ptr(), issue_ts, CString::new(cred_rev_id).unwrap().as_ptr(), create_rev_state_cb); assert_eq!(ErrorCode::Success, err); let (err, rev_state_json) = create_rev_state_receiver.recv().unwrap(); assert_eq!(ErrorCode::Success, err); let rev_state_json: RevocationState = serde_json::from_str(&rev_state_json).unwrap(); // 10.3 Prover collect map with revocation states in the next format: // rev_reg_id1 -> { // ts1 -> state1_1, // ts2 -> state1_2 // ... // }, // rev_reg2 -> { ... } let rev_states_json = json!({ rev_reg_id.as_str(): { issue_ts.to_string(): rev_state_json } }).to_string(); let requested_credentials_json = json!({ "self_attested_attributes": {}, "requested_attributes": { "attr1_referent": { "cred_id": credential.referent, "timestamp": issue_ts, "revealed": true } }, "requested_predicates":{ "predicate1_referent":{ "cred_id": credential.referent, "timestamp": issue_ts } } }).to_string(); let schemas_json = json!({ schema_id.clone(): serde_json::from_str::<Schema>(&schema_json).unwrap() }).to_string(); let credential_defs_json = json!({ credential_def_id.clone(): serde_json::from_str::<CredentialDefinition>(&credential_def_json).unwrap() }).to_string(); // 11. Prover create Proof for Proof Request let err = indy_prover_create_proof(prover_create_proof_command_handle, wallet_handle, CString::new(proof_req_json.clone()).unwrap().as_ptr(), CString::new(requested_credentials_json).unwrap().as_ptr(), CString::new(master_secret_id).unwrap().as_ptr(), CString::new(schemas_json.clone()).unwrap().as_ptr(), CString::new(credential_defs_json.clone()).unwrap().as_ptr(), CString::new(rev_states_json.clone()).unwrap().as_ptr(), prover_create_proof_callback); assert_eq!(ErrorCode::Success, err); let (err, proof_json) = prover_create_proof_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap(); assert_eq!(ErrorCode::Success, err); // 12. Verifier verify proof let proof: Proof = serde_json::from_str(&proof_json).unwrap(); let revealed_attr_1 = proof.requested_proof.revealed_attrs.get("attr1_referent").unwrap(); assert_eq!("Alex", revealed_attr_1.raw); let rev_reg_defs_json = json!({ rev_reg_id.as_str(): serde_json::from_str::<RevocationRegistryDefinition>(&revoc_reg_def_json).unwrap() }).to_string(); let rev_regs_json = json!({ rev_reg_id: { issue_ts.to_string(): serde_json::from_str::<RevocationRegistry>(&rreg_issue_delta_json).unwrap() } }).to_string(); let err = indy_verifier_verify_proof(verifier_verify_proof_command_handle, CString::new(proof_req_json).unwrap().as_ptr(), CString::new(proof_json).unwrap().as_ptr(), CString::new(schemas_json).unwrap().as_ptr(), CString::new(credential_defs_json).unwrap().as_ptr(), CString::new(rev_reg_defs_json).unwrap().as_ptr(), CString::new(rev_regs_json).unwrap().as_ptr(), verifier_verify_proof_callback); assert_eq!(ErrorCode::Success, err); let (err, valid) = verifier_verify_proof_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap(); assert_eq!(ErrorCode::Success, err); assert!(valid); // 13. Close wallet let res = indy_close_wallet(close_wallet_command_handle, wallet_handle, close_wallet_callback); assert_eq!(res, ErrorCode::Success); let res = close_wallet_receiver.recv_timeout(TimeoutUtils::medium_timeout()).unwrap(); assert_eq!(res, ErrorCode::Success); TestUtils::cleanup_storage(); } #[test] #[cfg(feature = "local_nodes_pool")] fn ledger_demo_works() { TestUtils::cleanup_storage(); let my_wallet_name = "my_wallet"; let their_wallet_name = "their_wallet"; let wallet_type = "default"; let pool_name = "pool_1"; let c_pool_name = CString::new(pool_name).unwrap(); let (open_receiver, open_command_handle, open_callback) = CallbackUtils::_closure_to_cb_ec_i32(); let (create_receiver, create_command_handle, create_callback) = CallbackUtils::_closure_to_cb_ec(); let (send_receiver, send_command_handle, send_callback) = CallbackUtils::_closure_to_cb_ec_string(); let (get_nym_receiver, get_nym_command_handle, get_nym_callback) = CallbackUtils::_closure_to_cb_ec_string(); let (create_my_wallet_receiver, create_my_wallet_command_handle, create_my_wallet_callback) = CallbackUtils::_closure_to_cb_ec(); let (create_their_wallet_receiver, create_their_wallet_command_handle, create_their_wallet_callback) = CallbackUtils::_closure_to_cb_ec(); let (open_my_wallet_receiver, open_my_wallet_command_handle, open_my_wallet_callback) = CallbackUtils::_closure_to_cb_ec_i32(); let (open_their_wallet_receiver, open_their_wallet_command_handle, open_their_wallet_callback) = CallbackUtils::_closure_to_cb_ec_i32(); let (create_and_store_my_did_receiver, create_and_store_my_did_command_handle, create_and_store_my_did_callback) = CallbackUtils::_closure_to_cb_ec_string_string(); let (create_and_store_their_did_receiver, create_and_store_their_did_command_handle, create_and_store_their_did_callback) = CallbackUtils::_closure_to_cb_ec_string_string(); let (store_their_did_receiver, store_their_did_command_handle, store_their_did_callback) = CallbackUtils::_closure_to_cb_ec(); let (close_pool_receiver, close_pool_command_handle, close_pool_callback) = CallbackUtils::_closure_to_cb_ec(); let (close_my_wallet_receiver, close_my_wallet_command_handle, close_my_wallet_callback) = CallbackUtils::_closure_to_cb_ec(); let (close_their_wallet_receiver, close_their_wallet_command_handle, close_their_wallet_callback) = CallbackUtils::_closure_to_cb_ec(); // 1. Create ledger config from genesis txn file let txn_file_path = PoolUtils::create_genesis_txn_file_for_test_pool(pool_name, None, None); let pool_config = PoolUtils::pool_config_json(txn_file_path.as_path()); let c_pool_config = CString::new(pool_config).unwrap(); let err = indy_create_pool_ledger_config(create_command_handle, c_pool_name.as_ptr(), c_pool_config.as_ptr(), create_callback); assert_eq!(err, ErrorCode::Success); let err = create_receiver.recv_timeout(TimeoutUtils::short_timeout()).unwrap(); assert_eq!(err, ErrorCode::Success); // 2. Open pool ledger let err = indy_open_pool_ledger(open_command_handle, c_pool_name.as_ptr(), null(), open_callback); assert_eq!(err, ErrorCode::Success); let (err, pool_handle) = open_receiver.recv_timeout(TimeoutUtils::short_timeout()).unwrap(); assert_eq!(err, ErrorCode::Success); thread::sleep(TimeoutUtils::short_timeout()); // 3. Create My Wallet let err = indy_create_wallet(create_my_wallet_command_handle, CString::new(pool_name).unwrap().as_ptr(), CString::new(my_wallet_name).unwrap().as_ptr(), CString::new(wallet_type).unwrap().as_ptr(), null(), null(), create_my_wallet_callback); assert_eq!(ErrorCode::Success, err); let err = create_my_wallet_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap(); assert_eq!(ErrorCode::Success, err); // 4. Open My Wallet. Gets My wallet handle let err = indy_open_wallet(open_my_wallet_command_handle, CString::new(my_wallet_name).unwrap().as_ptr(), null(), null(), open_my_wallet_callback); assert_eq!(ErrorCode::Success, err); let (err, my_wallet_handle) = open_my_wallet_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap(); assert_eq!(ErrorCode::Success, err); // 5. Create Their Wallet let err = indy_create_wallet(create_their_wallet_command_handle, CString::new(pool_name).unwrap().as_ptr(), CString::new(their_wallet_name).unwrap().as_ptr(), CString::new(wallet_type).unwrap().as_ptr(), null(), null(), create_their_wallet_callback); assert_eq!(ErrorCode::Success, err); let err = create_their_wallet_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap(); assert_eq!(ErrorCode::Success, err); // 6. Open Their Wallet. Gets Their wallet handle let err = indy_open_wallet(open_their_wallet_command_handle, CString::new(their_wallet_name).unwrap().as_ptr(), null(), null(), open_their_wallet_callback); assert_eq!(ErrorCode::Success, err); let (err, their_wallet_handle) = open_their_wallet_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap(); assert_eq!(ErrorCode::Success, err); // 7. Create My DID let my_did_json = "{}"; let err = indy_create_and_store_my_did(create_and_store_my_did_command_handle, my_wallet_handle, CString::new(my_did_json).unwrap().as_ptr(), create_and_store_my_did_callback); assert_eq!(ErrorCode::Success, err); let (err, my_did, my_verkey) = create_and_store_my_did_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap(); info!("did {:?}", my_did); info!("verkey {:?}", my_verkey); assert_eq!(ErrorCode::Success, err); // 8. Create Their DID from Trustee1 seed let their_did_json = r#"{"seed":"000000000000000000000000Trustee1"}"#; let err = indy_create_and_store_my_did(create_and_store_their_did_command_handle, their_wallet_handle, CString::new(their_did_json).unwrap().as_ptr(), create_and_store_their_did_callback); assert_eq!(ErrorCode::Success, err); let (err, their_did, their_verkey) = create_and_store_their_did_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap(); info!("their_did {:?}", their_did); info!("their_verkey {:?}", their_verkey); assert_eq!(ErrorCode::Success, err); // 9. Store Their DID let their_identity_json = format!(r#"{{"did":"{}", "verkey":"{}" }}"#, their_did, their_verkey); let err = indy_store_their_did(store_their_did_command_handle, my_wallet_handle, CString::new(their_identity_json).unwrap().as_ptr(), store_their_did_callback); assert_eq!(ErrorCode::Success, err); let err = store_their_did_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap(); assert_eq!(ErrorCode::Success, err); // 10. Prepare NYM transaction let nym_req_id = PoolUtils::get_req_id(); let nym_txn_req = Request { identifier: their_did.clone(), operation: Operation { dest: my_did.clone(), type_: "1".to_string(), }, protocol_version: 1, req_id: nym_req_id, signature: None, }; // 11. Send NYM request with signing let msg = serde_json::to_string(&nym_txn_req).unwrap(); let req = CString::new(msg).unwrap(); let did_for_sign = CString::new(their_did).unwrap(); let err = indy_sign_and_submit_request(send_command_handle, pool_handle, their_wallet_handle, did_for_sign.as_ptr(), req.as_ptr(), send_callback); assert_eq!(err, ErrorCode::Success); let (err, resp) = send_receiver.recv_timeout(TimeoutUtils::medium_timeout()).unwrap(); assert_eq!(err, ErrorCode::Success); let nym_resp: Reply = serde_json::from_str(&resp).unwrap(); info!("nym_resp_raw : {:?}", resp); info!("nym_resp : {:?}", nym_resp); // pause for syncronization of all nodes in the ledger ::std::thread::sleep(TimeoutUtils::short_timeout()); // 12. Prepare and send GET_NYM request let get_nym_req_id = PoolUtils::get_req_id(); let get_nym_txn = Request { req_id: get_nym_req_id, signature: None, identifier: my_verkey.clone(), operation: Operation { type_: "105".to_string(), dest: my_did.clone(), }, protocol_version: 1, }; let request = serde_json::to_string(&get_nym_txn).unwrap(); let req = CString::new(request).unwrap(); let err = indy_submit_request(get_nym_command_handle, pool_handle, req.as_ptr(), get_nym_callback); assert_eq!(err, ErrorCode::Success); let (err, resp) = get_nym_receiver.recv_timeout(TimeoutUtils::medium_timeout()).unwrap(); assert_eq!(err, ErrorCode::Success); let get_nym_resp: Reply = serde_json::from_str(&resp).unwrap(); let get_nym_resp_data: ReplyResultData = serde_json::from_str(&get_nym_resp.result.data.as_ref().unwrap()).unwrap(); info!("get_nym_resp {:?}\n{:?}\n{:?}", resp, get_nym_resp, get_nym_resp_data); assert_eq!(get_nym_resp_data.dest, my_did); // 13. Close pool let res = indy_close_pool_ledger(close_pool_command_handle, pool_handle, close_pool_callback); assert_eq!(res, ErrorCode::Success); let res = close_pool_receiver.recv_timeout(TimeoutUtils::medium_timeout()).unwrap(); assert_eq!(res, ErrorCode::Success); // 14. Close my wallet let res = indy_close_wallet(close_my_wallet_command_handle, my_wallet_handle, close_my_wallet_callback); assert_eq!(res, ErrorCode::Success); let res = close_my_wallet_receiver.recv_timeout(TimeoutUtils::medium_timeout()).unwrap(); assert_eq!(res, ErrorCode::Success); // 15. Close their wallet let res = indy_close_wallet(close_their_wallet_command_handle, their_wallet_handle, close_their_wallet_callback); assert_eq!(res, ErrorCode::Success); let res = close_their_wallet_receiver.recv_timeout(TimeoutUtils::medium_timeout()).unwrap(); assert_eq!(res, ErrorCode::Success); TestUtils::cleanup_storage(); #[derive(Serialize, Eq, PartialEq, Debug)] #[serde(rename_all = "camelCase")] struct Request { req_id: u64, identifier: String, operation: Operation, protocol_version: u64, #[serde(skip_serializing_if = "Option::is_none")] signature: Option<String>, } #[derive(Serialize, Eq, PartialEq, Debug)] struct Operation { #[serde(rename = "type")] type_: String, dest: String, } #[derive(Deserialize, Eq, PartialEq, Debug)] struct Reply { op: String, result: ReplyResult, } #[derive(Deserialize, Eq, PartialEq, Debug)] #[serde(rename_all = "camelCase")] struct ReplyResult { identifier: String, req_id: u64, data: Option<String>, } #[derive(Deserialize, Eq, PartialEq, Debug)] #[serde(rename_all = "camelCase")] struct ReplyResultData { dest: String, identifier: String, role: Option<String>, } } #[test] fn crypto_demo_works() { TestUtils::cleanup_storage(); let (create_wallet_receiver, create_wallet_command_handle, create_wallet_callback) = CallbackUtils::_closure_to_cb_ec(); let (open_wallet_receiver, open_wallet_command_handle, open_wallet_callback) = CallbackUtils::_closure_to_cb_ec_i32(); let (create_and_store_did_receiver, create_and_store_did_command_handle, create_and_store_did_callback) = CallbackUtils::_closure_to_cb_ec_string_string(); let (sign_receiver, sign_command_handle, sign_callback) = CallbackUtils::_closure_to_cb_ec_vec_u8(); let (verify_receiver, verify_command_handle, verify_callback) = CallbackUtils::_closure_to_cb_ec_bool(); let (close_wallet_receiver, close_wallet_command_handle, close_wallet_callback) = CallbackUtils::_closure_to_cb_ec(); let pool_name = "pool_1"; let wallet_name = "wallet_1"; let xtype = "default"; // 1. Create Wallet let err = indy_create_wallet(create_wallet_command_handle, CString::new(pool_name).unwrap().as_ptr(), CString::new(wallet_name).unwrap().as_ptr(), CString::new(xtype).unwrap().as_ptr(), null(), null(), create_wallet_callback); assert_eq!(ErrorCode::Success, err); let err = create_wallet_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap(); assert_eq!(ErrorCode::Success, err); // 2. Open Wallet. Gets wallet handle let err = indy_open_wallet(open_wallet_command_handle, CString::new(wallet_name).unwrap().as_ptr(), null(), null(), open_wallet_callback); assert_eq!(ErrorCode::Success, err); let (err, wallet_handle) = open_wallet_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap(); assert_eq!(ErrorCode::Success, err); // 3. Create DID let did_json = "{}"; let err = indy_create_and_store_my_did(create_and_store_did_command_handle, wallet_handle, CString::new(did_json).unwrap().as_ptr(), create_and_store_did_callback); assert_eq!(ErrorCode::Success, err); let (err, _, verkey) = create_and_store_did_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap(); assert_eq!(ErrorCode::Success, err); // 4. Sign message let message = r#"{ "reqId":1495034346617224651, "identifier":"GJ1SzoWzavQYfNL9XkaJdrQejfztN4XqdsiV4ct3LXKL", "operation":{ "type":"1", "dest":"4efZu2SXufS556yss7W5k6Po37jt4371RM4whbPKBKdB" } }"#; let message_ptr = message.as_ptr() as *const u8; let message_len = message.len() as u32; let err = indy_crypto_sign(sign_command_handle, wallet_handle, CString::new(verkey.clone()).unwrap().as_ptr(), message_ptr, message_len, sign_callback); assert_eq!(ErrorCode::Success, err); let (err, signature) = sign_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap(); assert_eq!(ErrorCode::Success, err); // 5. Verify message let err = indy_crypto_verify(verify_command_handle, CString::new(verkey).unwrap().as_ptr(), message_ptr, message_len, signature.as_ptr() as *const u8, signature.len() as u32, verify_callback); assert_eq!(ErrorCode::Success, err); let (err, valid) = verify_receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap(); assert!(valid); assert_eq!(ErrorCode::Success, err); // 6. Close Wallet let res = indy_close_wallet(close_wallet_command_handle, wallet_handle, close_wallet_callback); assert_eq!(res, ErrorCode::Success); let res = close_wallet_receiver.recv_timeout(TimeoutUtils::medium_timeout()).unwrap(); assert_eq!(res, ErrorCode::Success); TestUtils::cleanup_storage(); }
48.839378
204
0.609803
e9b4fa409069d99a8d5d71e15f78063d5d25f147
3,910
use crate::ops::prelude::*; use ndarray::prelude::*; use super::{DataFormat, PaddingSpec, Patch}; #[derive(Debug, Clone, new, Default)] pub struct MaxPool { data_fmt: DataFormat, kernel_shape: TVec<usize>, padding: PaddingSpec, strides: Option<TVec<usize>>, with_index_outputs: Option<DatumType>, } impl MaxPool { fn patch(&self, input_full_shape: &[usize]) -> Patch { let hw_rank = self.data_fmt.shape(input_full_shape).hw_rank(); Patch::new( self.data_fmt, tvec![1; hw_rank], self.kernel_shape.clone(), &self.padding, self.strides.clone().unwrap_or_else(|| tvec![1; hw_rank]), input_full_shape.into(), ) } } impl Op for MaxPool { fn name(&self) -> Cow<str> { "MaxPool".into() } fn noutputs(&self) -> usize { if self.with_index_outputs.is_some() { 2 } else { 1 } } } impl StatelessOp for MaxPool { fn eval(&self, mut inputs: TVec<SharedTensor>) -> TractResult<TVec<SharedTensor>> { let input = args_1!(inputs); let input: ArrayViewD<f32> = input.to_array_view()?; let patch = self.patch(input.shape()); let shape: TVec<usize> = patch.output_full_shape(patch.input_shape.c_dim()); let visitor = patch.wrap(&input); let mut values = unsafe { ArrayD::uninitialized(&*shape) }; let mut indices = if self.with_index_outputs.is_some() { Some(unsafe { ArrayD::uninitialized(&*shape) }) } else { None }; ::ndarray::indices(&*shape).into_iter().for_each(|coords| { let max = visitor .at(&coords.slice()) .enumerate() .filter_map(|(ix, v)| v.map(|v| (ix, v))) .fold( (0, ::std::f32::MIN), |acc, v| if acc.1 < v.1 { v } else { acc }, ); values[&coords] = max.1; if self.with_index_outputs.is_some() { indices.as_mut().unwrap()[coords] = visitor.global_offset_for(&coords.slice(), max.0) as i32; } }); if let Some(dt) = self.with_index_outputs { Ok(tvec!( values.into(), Tensor::from(indices.unwrap()) .cast_to_dt(dt)? .into_owned() .into_tensor() )) } else { Ok(tvec!(values.into())) } } } impl InferenceRulesOp for MaxPool { fn rules<'r, 'p: 'r, 's: 'r>( &'s self, s: &mut Solver<'r>, inputs: &'p [TensorProxy], outputs: &'p [TensorProxy], ) -> InferenceResult { check_output_arity(&outputs, self.noutputs())?; s.equals(&outputs[0].datum_type, &inputs[0].datum_type)?; s.equals(&outputs[0].rank, &inputs[0].rank)?; if let Some(idt) = self.with_index_outputs { s.equals(&outputs[1].datum_type, idt)?; s.equals(&outputs[1].rank, &inputs[0].rank)?; } s.given(&inputs[0].shape, move |s, ishape| { let ishape = self.data_fmt.shape(ishape); let ones = tvec![1; ishape.hw_rank()]; let computed = self.padding.compute( ishape.hw_dims(), &*self.kernel_shape, &ones, self.strides.as_ref().unwrap_or(&ones), ); for o in 0..self.noutputs() { for (ix, &d) in computed.output.iter().enumerate() { s.equals(&outputs[o].shape[ix + ishape.h_axis()], d)?; } s.equals(&outputs[o].shape[ishape.n_axis()], ishape.n_dim())?; s.equals(&outputs[o].shape[ishape.c_axis()], ishape.c_dim())?; } Ok(()) }) } }
32.31405
87
0.503836
905b57fe2f64aedd237024cfb8d5a99e2009889c
2,272
//! Tests related to HTTP semantics (e.g. framing headers, status codes). mod common; use { common::{Test, TestResult}, hyper::{header, Request, Response, StatusCode}, }; #[tokio::test(flavor = "multi_thread")] async fn framing_headers_are_overridden() -> TestResult { // Set up the test harness let test = Test::using_fixture("bad-framing-headers.wasm") // The "TheOrigin" backend checks framing headers on the request and then echos its body. .backend("TheOrigin", "http://127.0.0.1:9000/", None) .host(9000, |req| { assert!(!req.headers().contains_key(header::TRANSFER_ENCODING)); assert_eq!( req.headers().get(header::CONTENT_LENGTH), Some(&hyper::header::HeaderValue::from(9)) ); Response::new(Vec::from(&b"salutations"[..])) }); let resp = test .via_hyper() .against( Request::post("http://127.0.0.1:7878") .body("greetings") .unwrap(), ) .await; assert_eq!(resp.status(), StatusCode::OK); assert!(!resp.headers().contains_key(header::TRANSFER_ENCODING)); assert_eq!( resp.headers().get(header::CONTENT_LENGTH), Some(&hyper::header::HeaderValue::from(11)) ); Ok(()) } #[tokio::test(flavor = "multi_thread")] async fn content_length_is_computed_correctly() -> TestResult { // Set up the test harness let test = Test::using_fixture("content-length.wasm") // The "TheOrigin" backend supplies a fixed-size body. .backend("TheOrigin", "http://127.0.0.1:9000/", None) .host(9000, |_| { Response::new(Vec::from(&b"ABCDEFGHIJKLMNOPQRST"[..])) }); let resp = test .via_hyper() .against(Request::get("http://127.0.0.1:7878").body("").unwrap()) .await; assert_eq!(resp.status(), StatusCode::OK); assert!(!resp.headers().contains_key(header::TRANSFER_ENCODING)); assert_eq!( resp.headers().get(header::CONTENT_LENGTH), Some(&hyper::header::HeaderValue::from(28)) ); let resp_body = resp.into_body().read_into_string().await.unwrap(); assert_eq!(resp_body, "ABCD12345xyzEFGHIJKLMNOPQRST"); Ok(()) }
31.555556
97
0.59507
33f7663cffc01a48aa487922546d9bfe4a691ea8
402
use nu_test_support::{nu, pipeline}; #[test] fn count_columns_in_cal_table() { let actual = nu!( cwd: ".", pipeline( r#" cal | count -c "# )); assert_eq!(actual.out, "7"); } #[test] fn count_columns_no_rows() { let actual = nu!( cwd: ".", pipeline( r#" echo [] | count -c "# )); assert_eq!(actual.out, "0"); }
15.461538
36
0.472637
2306e6ea51f5ec593b3ed6cb861b28f8189debc6
1,352
use std::cmp::Ordering; macro_rules! read_line { ($v:ident) => { let mut temp = String::new(); std::io::stdin().read_line(&mut temp).unwrap(); let $v = temp; }; (var, $t:ty, $($v:ident), *) => { read_line!(input_line); let mut iter = parse_token!($t, input_line); $( let $v = iter.next().unwrap(); )* }; (vec, $t:ty, $v:ident) => { read_line!(input_line); let iter = parse_token!($t, input_line); let $v: Vec<$t> = iter.collect(); }; ($($v:ident; $t:ty), *) => { read_line!(input_line); let mut iter = input_line.split_whitespace(); $( let $v: $t = iter.next().unwrap().parse().unwrap(); )* }; } macro_rules! parse_token { ($t:ty, $e:expr) => { $e.split_whitespace().map(|x| x.parse::<$t>().unwrap()); }; } fn main() { read_line!(input_line); read_line!(input_line); let sum = input_line.trim().chars().fold((0, 0), |acc, x| { match x { 'A' => (acc.0 + 1, acc.1), 'D' => (acc.0, acc.1 + 1), _ => unreachable!() } }); match sum.0.cmp(&sum.1) { Ordering::Greater => println!("Anton"), Ordering::Equal => println!("Friendship"), Ordering::Less => println!("Danik") } }
26.509804
64
0.468935
e6e32a08a5df05b7d2887966e5f9e1f16a746335
1,561
use crate::color::Color; use crate::constants::black; use crate::constants::white; use crate::pattern::pattern::BasePattern; use crate::pattern::pattern::Pattern; use crate::tuple::Tuple; #[derive(Clone, Debug, PartialEq)] pub struct Rings { base: BasePattern, a: Color, b: Color, } impl Rings { pub fn new(a: Color, b: Color) -> Rings { Rings { base: BasePattern::new(), a, b, } } } impl Default for Rings { fn default() -> Self { Self::new(white(), black()) } } impl Pattern for Rings { fn get_base(&self) -> &BasePattern { &self.base } fn get_base_mut(&mut self) -> &mut BasePattern { &mut self.base } fn color_at_world(&self, world_point: Tuple) -> Color { // TODO: is any kind of overflow possible here? if (world_point.x.powi(2) + world_point.z.powi(2)) .sqrt() .floor() as i32 % 2 == 0 { self.a } else { self.b } } } #[cfg(test)] mod tests { use super::*; #[test] fn rings_extend_in_both_x_and_z() { let pattern = Rings::new(white(), black()); assert_eq!(pattern.color_at_world(point!(0, 0, 0)), white()); assert_eq!(pattern.color_at_world(point!(1, 0, 0)), black()); assert_eq!(pattern.color_at_world(point!(0, 0, 1)), black()); // 0.708 = just slightly more than √2/2 assert_eq!(pattern.color_at_world(point!(0.708, 0, 0.708)), black()); } }
23.298507
77
0.54132
d51237c454007fe3d501a93d56f2873b9fa29e59
225
//! Challenge for the "Bearer" HTTP Authentication Scheme mod builder; mod challenge; mod errors; pub use self::builder::BearerBuilder; pub use self::challenge::Bearer; pub use self::errors::Error; #[cfg(test)] mod tests;
17.307692
57
0.742222
bf94b30e52435f9cf90c4f62ad8fabae2c38b437
19,016
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::{cmp::Ordering, ops::Range, result::Result::*}; use bumpalo::Bump; use serde::Serialize; use ocamlrep_derive::{FromOcamlRepIn, ToOcamlRep}; use oxidized::file_pos::FilePos; use oxidized::file_pos_large::FilePosLarge; use oxidized::file_pos_small::FilePosSmall; use oxidized::pos_span_raw::PosSpanRaw; use oxidized::pos_span_tiny::PosSpanTiny; use crate::relative_path::RelativePath; #[derive(Clone, Hash, Serialize, ToOcamlRep, FromOcamlRepIn)] enum PosImpl<'a> { Small { file: &'a RelativePath<'a>, start: FilePosSmall, end: FilePosSmall, }, Large { file: &'a RelativePath<'a>, start: &'a FilePosLarge, end: &'a FilePosLarge, }, Tiny { file: &'a RelativePath<'a>, span: PosSpanTiny, }, } impl arena_trait::TrivialDrop for PosImpl<'_> {} use PosImpl::*; #[derive(Clone, Hash, Serialize, ToOcamlRep, FromOcamlRepIn)] pub struct Pos<'a>(PosImpl<'a>); impl arena_trait::TrivialDrop for Pos<'_> {} const NONE: Pos<'_> = Pos(Tiny { file: RelativePath::empty(), span: PosSpanTiny::make_dummy(), }); impl<'a> Pos<'a> { pub const fn none() -> &'static Pos<'static> { &NONE } pub fn is_none(&self) -> bool { match self { Pos(PosImpl::Tiny { file, span }) => span.is_dummy() && file.is_empty(), _ => false, } } fn from_raw_span(b: &'a Bump, file: &'a RelativePath<'a>, span: PosSpanRaw) -> &'a Self { if let Some(span) = PosSpanTiny::make(&span.start, &span.end) { return b.alloc(Pos(Tiny { file, span })); } let (lnum, bol, cnum) = span.start.line_beg_offset(); if let Some(start) = FilePosSmall::from_lnum_bol_cnum(lnum, bol, cnum) { let (lnum, bol, cnum) = span.end.line_beg_offset(); if let Some(end) = FilePosSmall::from_lnum_bol_cnum(lnum, bol, cnum) { return b.alloc(Pos(Small { file, start, end })); } } b.alloc(Pos(Large { file, start: b.alloc(span.start), end: b.alloc(span.end), })) } fn to_raw_span(&self) -> PosSpanRaw { match &self.0 { Tiny { span, .. } => span.to_raw_span(), &Small { start, end, .. } => PosSpanRaw { start: start.into(), end: end.into(), }, Large { start, end, .. } => PosSpanRaw { start: **start, end: **end, }, } } pub fn filename(&self) -> &'a RelativePath<'a> { match &self.0 { Small { file, .. } | Large { file, .. } | Tiny { file, .. } => &file, } } /// Returns a closed interval that's incorrect for multi-line spans. pub fn info_pos(&self) -> (usize, usize, usize) { fn compute<P: FilePos>(pos_start: P, pos_end: P) -> (usize, usize, usize) { let (line, start_minus1, bol) = pos_start.line_column_beg(); let start = start_minus1.wrapping_add(1); let end_offset = pos_end.offset(); let mut end = end_offset - bol; // To represent the empty interval, pos_start and pos_end are equal because // end_offset is exclusive. Here, it's best for error messages to the user if // we print characters N to N (highlighting a single character) rather than characters // N to (N-1), which is very unintuitive. if start_minus1 == end { end = start } (line, start, end) } match self.0 { Small { start, end, .. } => compute(start, end), Large { start, end, .. } => compute(*start, *end), Tiny { span, .. } => { let PosSpanRaw { start, end } = span.to_raw_span(); compute(start, end) } } } pub fn info_pos_extended(&self) -> (usize, usize, usize, usize) { let (line_begin, start, end) = self.info_pos(); let line_end = match self.0 { Small { end, .. } => end.line_column_beg(), Large { end, .. } => end.line_column_beg(), Tiny { span, .. } => span.to_raw_span().end.line_column_beg(), } .0; (line_begin, line_end, start, end) } pub fn info_raw(&self) -> (usize, usize) { (self.start_cnum(), self.end_cnum()) } pub fn line(&self) -> usize { match self.0 { Small { start, .. } => start.line(), Large { start, .. } => start.line(), Tiny { span, .. } => span.start_line(), } } pub fn from_lnum_bol_cnum( b: &'a Bump, file: &'a RelativePath<'a>, start: (usize, usize, usize), end: (usize, usize, usize), ) -> &'a Self { let (start_line, start_bol, start_cnum) = start; let (end_line, end_bol, end_cnum) = end; let start = FilePosLarge::from_lnum_bol_cnum(start_line, start_bol, start_cnum); let end = FilePosLarge::from_lnum_bol_cnum(end_line, end_bol, end_cnum); Self::from_raw_span(b, file, PosSpanRaw { start, end }) } pub fn to_start_and_end_lnum_bol_cnum(&self) -> ((usize, usize, usize), (usize, usize, usize)) { match &self.0 { Small { start, end, .. } => (start.line_beg_offset(), end.line_beg_offset()), Large { start, end, .. } => (start.line_beg_offset(), end.line_beg_offset()), Tiny { span, .. } => { let PosSpanRaw { start, end } = span.to_raw_span(); (start.line_beg_offset(), end.line_beg_offset()) } } } /// For single-line spans only. pub fn from_line_cols_offset( b: &'a Bump, file: &'a RelativePath<'a>, line: usize, cols: Range<usize>, start_offset: usize, ) -> &'a Self { let start = FilePosLarge::from_line_column_offset(line, cols.start, start_offset); let end = FilePosLarge::from_line_column_offset( line, cols.end, start_offset + (cols.end - cols.start), ); Self::from_raw_span(b, file, PosSpanRaw { start, end }) } pub fn btw_nocheck(b: &'a Bump, x1: &'a Self, x2: &'a Self) -> &'a Self { let start = x1.to_raw_span().start; let end = x2.to_raw_span().end; Self::from_raw_span(b, x1.filename(), PosSpanRaw { start, end }) } pub fn btw(b: &'a Bump, x1: &'a Self, x2: &'a Self) -> Result<&'a Self, String> { if x1.filename() != x2.filename() { // using string concatenation instead of format!, // it is not stable see T52404885 Err(String::from("Position in separate files ") + &x1.filename().to_string() + " and " + &x2.filename().to_string()) } else if x1.end_cnum() > x2.end_cnum() { Err(String::from("btw: invalid positions") + &x1.end_cnum().to_string() + "and" + &x2.end_cnum().to_string()) } else { Ok(Self::btw_nocheck(b, x1, x2)) } } pub fn merge(b: &'a Bump, x1: &'a Self, x2: &'a Self) -> Result<&'a Self, String> { if x1.filename() != x2.filename() { // see comment above (T52404885) return Err(String::from("Position in separate files ") + &x1.filename().to_string() + " and " + &x2.filename().to_string()); } let span1 = x1.to_raw_span(); let span2 = x2.to_raw_span(); let start = if span1.start.is_dummy() { span2.start } else if span2.start.is_dummy() { span1.start } else if span1.start.offset() < span2.start.offset() { span1.start } else { span2.start }; let end = if span1.end.is_dummy() { span2.end } else if span2.end.is_dummy() { span1.end } else if span1.end.offset() < span2.end.offset() { span2.end } else { span1.end }; Ok(Self::from_raw_span( b, x1.filename(), PosSpanRaw { start, end }, )) } pub fn last_char(&'a self, b: &'a Bump) -> &'a Self { if self.is_none() { self } else { let end = self.to_raw_span().end; Self::from_raw_span(b, self.filename(), PosSpanRaw { start: end, end }) } } pub fn first_char_of_line(&'a self, b: &'a Bump) -> &'a Self { if self.is_none() { self } else { let start = self.to_raw_span().start.with_column(0); Self::from_raw_span(b, self.filename(), PosSpanRaw { start, end: start }) } } pub fn end_cnum(&self) -> usize { match &self.0 { Small { end, .. } => end.offset(), Large { end, .. } => end.offset(), Tiny { span, .. } => span.end_character_number(), } } pub fn start_cnum(&self) -> usize { match &self.0 { Small { start, .. } => start.offset(), Large { start, .. } => start.offset(), Tiny { span, .. } => span.start_character_number(), } } pub fn to_owned(&self) -> oxidized::pos::Pos { let file = self.filename(); let PosSpanRaw { start, end } = self.to_raw_span(); oxidized::pos::Pos::from_lnum_bol_cnum( ocamlrep::rc::RcOc::new(file.to_oxidized()), start.line_beg_offset(), end.line_beg_offset(), ) } } impl<'a> Pos<'a> { pub fn from_oxidized_in(pos: &oxidized::pos::Pos, arena: &'a Bump) -> &'a Self { let file = RelativePath::from_oxidized_in(pos.filename(), arena); let (start, end) = pos.to_start_and_end_lnum_bol_cnum(); Self::from_lnum_bol_cnum(arena, file, start, end) } pub fn from_oxidized_with_file_in( pos: &oxidized::pos::Pos, file: &'a RelativePath<'a>, arena: &'a Bump, ) -> &'a Self { debug_assert!(pos.filename().prefix() == file.prefix()); debug_assert!(pos.filename().path() == file.path()); let (start, end) = pos.to_start_and_end_lnum_bol_cnum(); Self::from_lnum_bol_cnum(arena, file, start, end) } } impl std::fmt::Debug for Pos<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn do_fmt<P: FilePos>( f: &mut std::fmt::Formatter<'_>, file: &RelativePath, start: &P, end: &P, ) -> std::fmt::Result { let (start_line, start_col, _) = start.line_column_beg(); let (end_line, end_col, _) = end.line_column_beg(); // Use a format string rather than Formatter::debug_tuple to prevent // adding line breaks. Positions occur very frequently in ASTs and // types, so the Debug implementation of those data structures is // more readable if we minimize the vertical space taken up by // positions. Depends upon RelativePath's implementation of Display // also being single-line. if start_line == end_line { write!( f, "Pos({}, {}:{}-{})", &file, &start_line, &start_col, &end_col, ) } else { write!( f, "Pos({}, {}:{}-{}:{})", &file, &start_line, &start_col, &end_line, &end_col, ) } } match &self.0 { Small { file, start, end } => do_fmt(f, file, start, end), Large { file, start, end } => do_fmt(f, file, *start, *end), Tiny { file, span } => { let PosSpanRaw { start, end } = span.to_raw_span(); do_fmt(f, file, &start, &end) } } } } impl std::fmt::Display for Pos<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn do_fmt<P: FilePos>( f: &mut std::fmt::Formatter<'_>, file: &RelativePath, start: P, end: P, ) -> std::fmt::Result { write!(f, "{}", file)?; let (start_line, start_col, _) = start.line_column_beg(); let (end_line, end_col, _) = end.line_column_beg(); if start_line == end_line { write!(f, "({}:{}-{})", start_line, start_col, end_col) } else { write!(f, "({}:{}-{}:{})", start_line, start_col, end_line, end_col) } } match self.0 { Small { file, start, end } => do_fmt(f, file, start, end), Large { file, start, end } => do_fmt(f, file, *start, *end), Tiny { file, span } => { let PosSpanRaw { start, end } = span.to_raw_span(); do_fmt(f, file, start, end) } } } } impl Ord for Pos<'_> { // Intended to match the implementation of `Pos.compare` in OCaml. fn cmp(&self, other: &Pos) -> Ordering { self.filename() .cmp(&other.filename()) .then(self.start_cnum().cmp(&other.start_cnum())) .then(self.end_cnum().cmp(&other.end_cnum())) } } impl PartialOrd for Pos<'_> { fn partial_cmp(&self, other: &Pos) -> Option<Ordering> { Some(self.cmp(other)) } } impl PartialEq for Pos<'_> { fn eq(&self, other: &Self) -> bool { self.cmp(other) == Ordering::Equal } } impl Eq for Pos<'_> {} impl<'a> Pos<'a> { /// Returns a struct implementing Display which produces the same format as /// `Pos.string` in OCaml. pub fn string(&self) -> PosString<'_> { PosString(self) } } /// This struct has an impl of Display which produces the same format as /// `Pos.string` in OCaml. pub struct PosString<'a>(&'a Pos<'a>); impl std::fmt::Display for PosString<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let (line, start, end) = self.0.info_pos(); write!( f, "File {:?}, line {}, characters {}-{}:", self.0.filename().path(), line, start, end ) } } // NoPosHash is meant to be position-insensitive, so don't do anything! impl no_pos_hash::NoPosHash for Pos<'_> { fn hash<H: std::hash::Hasher>(&self, _state: &mut H) {} } #[cfg(test)] mod tests { use super::*; use crate::relative_path::Prefix; use pretty_assertions::assert_eq; fn make_pos<'a>( b: &'a Bump, name: &'a RelativePath<'a>, start: (usize, usize, usize), end: (usize, usize, usize), ) -> &'a Pos<'a> { b.alloc(Pos::from_lnum_bol_cnum(b, name, start, end)) } #[test] fn test_pos_is_none() { let b = Bump::new(); assert_eq!(Pos::none().is_none(), true); let path_a = b.alloc(RelativePath::make(Prefix::Dummy, "a")); assert_eq!( Pos::from_lnum_bol_cnum(&b, path_a, (0, 0, 0), (0, 0, 0)).is_none(), false ); let empty_path = b.alloc(RelativePath::make(Prefix::Dummy, "")); assert_eq!( Pos::from_lnum_bol_cnum(&b, empty_path, (1, 0, 0), (0, 0, 0)).is_none(), false ); } #[test] fn test_pos_string() { assert_eq!( Pos::none().string().to_string(), r#"File "", line 0, characters 0-0:"# ); let b = Bump::new(); let path = b.alloc(RelativePath::make(Prefix::Dummy, "a.php")); assert_eq!( Pos::from_lnum_bol_cnum(&b, path, (5, 100, 117), (5, 100, 142)) .string() .to_string(), r#"File "a.php", line 5, characters 18-42:"# ); } #[test] fn test_pos_merge() { let b = Bump::new(); let test = |name, (exp_start, exp_end), ((fst_start, fst_end), (snd_start, snd_end))| { let path = b.alloc(RelativePath::make(Prefix::Dummy, "a")); assert_eq!( Ok(make_pos(&b, path, exp_start, exp_end)), Pos::merge( &b, make_pos(&b, path, fst_start, fst_end), make_pos(&b, path, snd_start, snd_end) ), "{}", name ); // Run this again because we want to test that we get the same // result regardless of order. assert_eq!( Ok(make_pos(&b, path, exp_start, exp_end)), Pos::merge( &b, make_pos(&b, path, snd_start, snd_end), make_pos(&b, path, fst_start, fst_end), ), "{} (reversed)", name ); }; test( "basic test", ((0, 0, 0), (0, 0, 5)), (((0, 0, 0), (0, 0, 2)), ((0, 0, 2), (0, 0, 5))), ); test( "merge should work with gaps", ((0, 0, 0), (0, 0, 15)), (((0, 0, 0), (0, 0, 5)), ((0, 0, 10), (0, 0, 15))), ); test( "merge should work with overlaps", ((0, 0, 0), (0, 0, 15)), (((0, 0, 0), (0, 0, 12)), ((0, 0, 7), (0, 0, 15))), ); test( "merge should work between lines", ((0, 0, 0), (2, 20, 25)), (((0, 0, 0), (1, 10, 15)), ((1, 10, 20), (2, 20, 25))), ); assert_eq!( Err("Position in separate files |a and |b".to_string()), Pos::merge( &b, make_pos( &b, &RelativePath::make(Prefix::Dummy, "a"), (0, 0, 0), (0, 0, 0) ), make_pos( &b, &RelativePath::make(Prefix::Dummy, "b"), (0, 0, 0), (0, 0, 0) ) ), "should reject merges with different filenames" ); } #[test] fn position_insensitive_hash() { use crate::ast_defs::Id; let b = &bumpalo::Bump::new(); let hash = no_pos_hash::position_insensitive_hash; let none = Pos::none(); let pos = Pos::from_line_cols_offset(b, RelativePath::empty(), 2, 2..10, 17); assert_eq!(hash(&Id(pos, "foo")), hash(&Id(pos, "foo"))); assert_eq!(hash(&Id(none, "foo")), hash(&Id(pos, "foo"))); assert_ne!(hash(&Id(pos, "foo")), hash(&Id(pos, "bar"))); } }
32.786207
100
0.49532
ef3985191f4b988a8cde8dfc306b54e25745af11
1,028
#[allow(dead_code)] #[deny(trivial_casts)] mod policykit_client; use dbus::arg::{RefArg, Variant}; use policykit_client::OrgFreedesktopPolicyKit1AuthorityProperties; use std::collections::HashMap; #[test] fn test_parse_properties() { let mut properties: HashMap<String, Variant<Box<dyn RefArg>>> = HashMap::new(); properties.insert("BackendFeatures".to_string(), Variant(Box::new(42u32))); properties.insert( "BackendName".to_string(), Variant(Box::new("name".to_string())), ); let mut interfaces = HashMap::new(); interfaces.insert( "org.freedesktop.PolicyKit1.Authority".to_string(), properties, ); let authority_properties = OrgFreedesktopPolicyKit1AuthorityProperties::from_interfaces(&interfaces).unwrap(); assert_eq!(authority_properties.backend_features(), Some(42)); assert_eq!( authority_properties.backend_name().cloned(), Some("name".to_string()) ); assert_eq!(authority_properties.backend_version(), None); }
32.125
91
0.698444
ffa77722e1e8ae933ef8e1978847099fbb3afc11
3,251
use derive_more::Constructor; use crate::{ client::{Api, Client, FApi, SApi}, error::Result, models::*, }; #[derive(Clone, Constructor, Debug)] pub struct Market<A: Api + MarketApi> { client: Client<A>, } impl<A> Market<A> where A: Api + MarketApi, { pub async fn agg_trades<S>( &self, req: AggTradesRequest<S>, ) -> Result<Vec<AggTradesRecord>, A::ErrorCode> where S: AsRef<str>, { self.client.get(A::agg_trades(), req).await } pub async fn klines<S>(&self, req: KlinesRequest<S>) -> Result<Vec<KlinesRecord>, A::ErrorCode> where S: AsRef<str>, { self.client.get(A::klines(), req).await } pub async fn order_book<S>(&self, req: OrderBookRequest<S>) -> Result<OrderBook, A::ErrorCode> where S: AsRef<str>, { self.client.get(A::order_book(), req).await } } pub trait MarketApi { fn agg_trades() -> &'static str; fn klines() -> &'static str; fn order_book() -> &'static str; } impl MarketApi for FApi { fn agg_trades() -> &'static str { "/fapi/v1/aggTrades" } fn klines() -> &'static str { "/fapi/v1/klines" } fn order_book() -> &'static str { "/fapi/v1/depth" } } impl MarketApi for SApi { fn agg_trades() -> &'static str { "/api/v3/aggTrades" } fn klines() -> &'static str { "/api/v3/klines" } fn order_book() -> &'static str { "/api/v3/depth" } } // #[cfg(test)] // mod tests { // use super::*; // use crate::Exchange; // #[tokio::test] // async fn agg_trades() { // let client = Client::<FApi>::new(None); // let exchange = Exchange::new(client.clone()); // let market = Market::new(client); // let info = exchange.info().await.unwrap(); // let btc = info.symbols.iter().find(|s| s.symbol == "BTCUSDT").unwrap(); // let trades = market.agg_trades(AggTradesRequest::new(btc)).await.unwrap(); // eprintln!("{:?}", trades); // } // #[tokio::test] // async fn klines() { // let client = Client::<FApi>::new(None); // let exchange = Exchange::new(client.clone()); // let market = Market::new(client); // let info = exchange.info().await.unwrap(); // let btc = info.symbols.iter().find(|s| s.symbol == "BTCUSDT").unwrap(); // let klines = market // .klines( // KlinesRequest::new(btc, ChartInterval::OneMinute) // .start_time("2020-06-01T00:00:00Z") // .end_time("2020-06-01T07:59:59Z"), // ) // .await // .unwrap(); // eprintln!("{:?}", klines); // } // #[tokio::test] // async fn order_book() { // let client = Client::<SApi>::new(None); // let exchange = Exchange::new(client.clone()); // let market = Market::new(client); // let info = exchange.info().await.unwrap(); // let btc = info.symbols.iter().find(|s| s.symbol == "BTCUSDT").unwrap(); // let order_book = market.order_book(OrderBookRequest::new(btc)).await.unwrap(); // eprintln!("{:?}", order_book); // } // }
25.398438
99
0.525684
0a8f5b5096ecffe74fba2817deeaf1cf507690db
8,313
// Copyright 2018 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use anyhow::Error; use futures::future::LocalBoxFuture; use serde_derive::{Deserialize, Serialize}; use serde_json::Value; use std::{fmt::Debug, str::FromStr, sync::mpsc}; use thiserror::Error; use crate::server::constants::{COMMAND_DELIMITER, COMMAND_SIZE}; /// An Sl4f facade that can handle incoming requests. pub trait Facade: Debug { /// Asynchronously handle the incoming request for the given method and arguments, returning a /// future object representing the pending operation. fn handle_request( &self, method: String, args: Value, ) -> LocalBoxFuture<'_, Result<Value, Error>>; /// In response to a request to /cleanup, cleanup any cross-request state. fn cleanup(&self) {} /// In response to a request to /print, log relevant facade state. fn print(&self) {} } /// Information about each client that has connected #[derive(Serialize, Deserialize, Debug, Clone)] pub struct ClientData { // client_id: String ID of client (ACTS test suite) pub command_id: Value, // command_result: The response of running the command (to be stored in the table) pub command_result: AsyncResponse, } impl ClientData { pub fn new(id: Value, result: AsyncResponse) -> ClientData { ClientData { command_id: id, command_result: result } } } /// The parsed `id` field from an incoming json-rpc request. #[derive(Debug, PartialEq, Clone)] pub struct RequestId { /// If the request ID is a string that contains a single '.', the text leading up to the '.' is /// extracted as the client identifier. client: Option<String>, /// The ID to send in the response. If client is Some(_), this will be a substring of the /// request ID. id: Value, } impl RequestId { /// Parse a raw request ID into its session id (if present) and response id. pub fn new(raw: Value) -> Self { if let Some(s) = raw.as_str() { let parts = s.split('.').collect::<Vec<_>>(); if parts.len() == 2 { return Self { client: Some(parts[0].to_owned()), id: Value::String(parts[1..].join(".")), }; } } // If the raw ID wasn't a string that contained exactly 1 '.', pass it through to the // response unmodified. Self { client: None, id: raw } } /// Returns a reference to the session id, if present. pub fn session_id(&self) -> Option<&str> { self.client.as_ref().map(String::as_str) } /// Returns a reference to the response id. pub fn response_id(&self) -> &Value { &self.id } /// Returns the response id, consuming self. pub fn into_response_id(self) -> Value { self.id } } /// The parsed `method` field from an incoming json-rpc request. #[derive(Debug, PartialEq, Eq, Clone, Default)] pub struct MethodId { /// Method type of the request (e.g bluetooth, wlan, etc...) pub facade: String, /// Name of the method pub method: String, } impl FromStr for MethodId { type Err = MethodIdParseError; fn from_str(s: &str) -> Result<Self, Self::Err> { let parts = s.split(COMMAND_DELIMITER).collect::<Vec<_>>(); if parts.len() != COMMAND_SIZE { return Err(MethodIdParseError(s.to_string())); } Ok(Self { facade: parts[0].to_string(), method: parts[1].to_string() }) } } #[derive(Debug, PartialEq, Eq, Clone, Error)] #[error("invalid method id: {}", _0)] pub struct MethodIdParseError(String); /// Required fields for making a request #[derive(Serialize, Deserialize, Debug, Clone)] pub struct CommandRequest { // method: name of method to be called pub method: String, // id: String id of command pub id: Value, // params: Arguments required for method pub params: Value, } /// Return packet after SL4F runs command #[derive(Serialize, Clone, Debug)] pub struct CommandResponse { // id: String id of command pub id: Value, // result: Result value of method call, can be None pub result: Option<Value>, // error: Error message of method call, can be None pub error: Option<String>, } impl CommandResponse { pub fn new(id: Value, result: Option<Value>, error: Option<String>) -> CommandResponse { CommandResponse { id, result, error } } } /// Represents a RPC request to be fulfilled by the FIDL event loop #[derive(Debug)] pub enum AsyncRequest { Cleanup(mpsc::Sender<()>), Command(AsyncCommandRequest), } /// Represents a RPC command request to be fulfilled by the FIDL event loop #[derive(Debug)] pub struct AsyncCommandRequest { // tx: Transmit channel from FIDL event loop to RPC request side pub tx: mpsc::Sender<AsyncResponse>, // method_id: struct containing: // * facade: Method type of the request (e.g bluetooth, wlan, etc...) // * method: Name of the method pub method_id: MethodId, // params: serde_json::Value representing args for method pub params: Value, } impl AsyncCommandRequest { pub fn new( tx: mpsc::Sender<AsyncResponse>, method_id: MethodId, params: Value, ) -> AsyncCommandRequest { AsyncCommandRequest { tx, method_id, params } } } /// Represents a RPC response from the FIDL event loop to the RPC request side #[derive(Debug, Clone, Serialize, Deserialize)] pub struct AsyncResponse { // res: serde_json::Value of FIDL method result pub result: Option<Value>, pub error: Option<String>, } impl AsyncResponse { pub fn new(res: Result<Value, Error>) -> AsyncResponse { match res { Ok(v) => AsyncResponse { result: Some(v), error: None }, Err(e) => AsyncResponse { result: None, error: Some(e.to_string()) }, } } } #[cfg(test)] mod tests { use super::*; use serde_json::json; #[test] fn parse_method_id_ok() { assert_eq!( "bt.send".parse(), Ok(MethodId { facade: "bt".to_string(), method: "send".to_string() }) ); assert_eq!( "FooFacade.BarMethod".parse(), Ok(MethodId { facade: "FooFacade".to_string(), method: "BarMethod".to_string() }) ); assert_eq!( "EmptyMethod.".parse(), Ok(MethodId { facade: "EmptyMethod".to_string(), method: "".to_string() }) ); assert_eq!( ".EmptyFacade".parse(), Ok(MethodId { facade: "".to_string(), method: "EmptyFacade".to_string() }) ); } #[test] fn parse_method_id_invalid() { fn assert_parse_error(s: &str) { assert_eq!(s.parse::<MethodId>(), Err(MethodIdParseError(s.to_string()))); } // Invalid command (should result in empty result) assert_parse_error("bluetooth_send"); // Too many separators in command assert_parse_error("wlan.scan.start"); // Empty command assert_parse_error(""); // No separator assert_parse_error("BluetoothSend"); // Invalid separator assert_parse_error("Bluetooth,Scan"); } #[test] fn parse_request_id_int() { let id = RequestId::new(json!(42)); assert_eq!(id, RequestId { client: None, id: json!(42) }); assert_eq!(id.session_id(), None); assert_eq!(id.response_id(), &json!(42)); assert_eq!(id.into_response_id(), json!(42)); } #[test] fn parse_request_id_single_str() { assert_eq!(RequestId::new(json!("123")), RequestId { client: None, id: json!("123") }); } #[test] fn parse_request_id_too_many_dots() { assert_eq!(RequestId::new(json!("1.2.3")), RequestId { client: None, id: json!("1.2.3") }); } #[test] fn parse_request_id_with_session_id() { let id = RequestId::new(json!("12.34")); assert_eq!(id, RequestId { client: Some("12".to_string()), id: json!("34") }); assert_eq!(id.session_id(), Some("12")); assert_eq!(id.response_id(), &json!("34")); assert_eq!(id.into_response_id(), json!("34")); } }
29.902878
99
0.617948
f913bbe0a998456faf05ea30b122e004ebeebb4a
1,547
// Copyright 2020-2021, The Tremor Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. pub(crate) use crate::async_sink::{AsyncSink, SinkDequeueError}; pub(crate) use crate::codec::Codec; pub(crate) use crate::errors::*; pub(crate) use crate::offramp::{self, Offramp}; pub(crate) use crate::postprocessor::{ make_postprocessors, postprocess, Postprocessor, Postprocessors, }; pub(crate) use crate::preprocessor::{make_preprocessors, preprocess, Preprocessor, Preprocessors}; pub(crate) use crate::sink::{self, Reply, ResultVec, Sink, SinkManager}; pub(crate) use crate::source::Processors; pub(crate) use crate::url::ports::{ERR, OUT}; pub(crate) use crate::url::TremorURL; pub(crate) use crate::utils::hostname; pub(crate) use crate::{Event, OpConfig}; pub(crate) use async_channel::Sender; pub(crate) use async_std::prelude::*; pub(crate) use async_std::task; pub(crate) use beef::Cow; pub(crate) use tremor_common::time::nanotime; pub(crate) use tremor_pipeline::{CBAction, ConfigImpl}; pub(crate) use tremor_script::prelude::*;
44.2
98
0.747899
f9abae6e352f17d13a45ef3a76298b50edd45795
1,116
#[doc = "Reader of register IOM3IRQ"] pub type R = crate::R<u32, super::IOM3IRQ>; #[doc = "Writer for register IOM3IRQ"] pub type W = crate::W<u32, super::IOM3IRQ>; #[doc = "Register IOM3IRQ `reset()`'s with value 0x3f"] impl crate::ResetValue for super::IOM3IRQ { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0x3f } } #[doc = "Reader of field `IOM3IRQ`"] pub type IOM3IRQ_R = crate::R<u8, u8>; #[doc = "Write proxy for field `IOM3IRQ`"] pub struct IOM3IRQ_W<'a> { w: &'a mut W, } impl<'a> IOM3IRQ_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0x3f) | ((value as u32) & 0x3f); self.w } } impl R { #[doc = "Bits 0:5 - IOMSTR3 IRQ pad select."] #[inline(always)] pub fn iom3irq(&self) -> IOM3IRQ_R { IOM3IRQ_R::new((self.bits & 0x3f) as u8) } } impl W { #[doc = "Bits 0:5 - IOMSTR3 IRQ pad select."] #[inline(always)] pub fn iom3irq(&mut self) -> IOM3IRQ_W { IOM3IRQ_W { w: self } } }
27.219512
70
0.576165
1cd37c33c62fae7e3c875b063b06c6bba2364d65
24,374
use std::fs::{self, File, OpenOptions}; use std::hash::{self, Hasher}; use std::io::prelude::*; use std::io::{BufReader, SeekFrom}; use std::path::{Path, PathBuf}; use std::sync::{Arc, Mutex}; use filetime::FileTime; use rustc_serialize::{json, Encodable, Decodable, Encoder, Decoder}; use core::{Package, TargetKind}; use util; use util::{CargoResult, Fresh, Dirty, Freshness, internal, profile, ChainError}; use util::paths; use super::job::Work; use super::context::{Context, Unit}; /// A tuple result of the `prepare_foo` functions in this module. /// /// The first element of the triple is whether the target in question is /// currently fresh or not, and the second two elements are work to perform when /// the target is dirty or fresh, respectively. /// /// Both units of work are always generated because a fresh package may still be /// rebuilt if some upstream dependency changes. pub type Preparation = (Freshness, Work, Work); /// Prepare the necessary work for the fingerprint for a specific target. /// /// When dealing with fingerprints, cargo gets to choose what granularity /// "freshness" is considered at. One option is considering freshness at the /// package level. This means that if anything in a package changes, the entire /// package is rebuilt, unconditionally. This simplicity comes at a cost, /// however, in that test-only changes will cause libraries to be rebuilt, which /// is quite unfortunate! /// /// The cost was deemed high enough that fingerprints are now calculated at the /// layer of a target rather than a package. Each target can then be kept track /// of separately and only rebuilt as necessary. This requires cargo to /// understand what the inputs are to a target, so we drive rustc with the /// --dep-info flag to learn about all input files to a unit of compilation. /// /// This function will calculate the fingerprint for a target and prepare the /// work necessary to either write the fingerprint or copy over all fresh files /// from the old directories to their new locations. pub fn prepare_target<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult<Preparation> { let _p = profile::start(format!("fingerprint: {} / {}", unit.pkg.package_id(), unit.target.name())); let new = dir(cx, unit); let loc = new.join(&filename(unit)); debug!("fingerprint at: {}", loc.display()); let fingerprint = try!(calculate(cx, unit)); let compare = compare_old_fingerprint(&loc, &*fingerprint); log_compare(unit, &compare); let root = cx.out_dir(unit); let mut missing_outputs = false; if !unit.profile.doc { for filename in try!(cx.target_filenames(unit)).iter() { missing_outputs |= fs::metadata(root.join(filename)).is_err(); } } let allow_failure = unit.profile.rustc_args.is_some(); let write_fingerprint = Work::new(move |_| { match fingerprint.update_local() { Ok(()) => {} Err(..) if allow_failure => return Ok(()), Err(e) => return Err(e) } write_fingerprint(&loc, &*fingerprint) }); let fresh = compare.is_ok() && !missing_outputs; Ok((if fresh {Fresh} else {Dirty}, write_fingerprint, Work::noop())) } /// A fingerprint can be considered to be a "short string" representing the /// state of a world for a package. /// /// If a fingerprint ever changes, then the package itself needs to be /// recompiled. Inputs to the fingerprint include source code modifications, /// compiler flags, compiler version, etc. This structure is not simply a /// `String` due to the fact that some fingerprints cannot be calculated lazily. /// /// Path sources, for example, use the mtime of the corresponding dep-info file /// as a fingerprint (all source files must be modified *before* this mtime). /// This dep-info file is not generated, however, until after the crate is /// compiled. As a result, this structure can be thought of as a fingerprint /// to-be. The actual value can be calculated via `hash()`, but the operation /// may fail as some files may not have been generated. /// /// Note that dependencies are taken into account for fingerprints because rustc /// requires that whenever an upstream crate is recompiled that all downstream /// dependants are also recompiled. This is typically tracked through /// `DependencyQueue`, but it also needs to be retained here because Cargo can /// be interrupted while executing, losing the state of the `DependencyQueue` /// graph. pub struct Fingerprint { rustc: u64, features: String, target: u64, profile: u64, deps: Vec<(String, Arc<Fingerprint>)>, local: LocalFingerprint, memoized_hash: Mutex<Option<u64>>, } #[derive(RustcEncodable, RustcDecodable, Hash)] enum LocalFingerprint { Precalculated(String), MtimeBased(MtimeSlot, PathBuf), } struct MtimeSlot(Mutex<Option<FileTime>>); impl Fingerprint { fn update_local(&self) -> CargoResult<()> { match self.local { LocalFingerprint::MtimeBased(ref slot, ref path) => { let meta = try!(fs::metadata(path).chain_error(|| { internal(format!("failed to stat `{}`", path.display())) })); let mtime = FileTime::from_last_modification_time(&meta); *slot.0.lock().unwrap() = Some(mtime); } LocalFingerprint::Precalculated(..) => return Ok(()) } *self.memoized_hash.lock().unwrap() = None; Ok(()) } fn hash(&self) -> u64 { if let Some(s) = *self.memoized_hash.lock().unwrap() { return s } let ret = util::hash_u64(self); *self.memoized_hash.lock().unwrap() = Some(ret); return ret } fn compare(&self, old: &Fingerprint) -> CargoResult<()> { if self.rustc != old.rustc { bail!("rust compiler has changed") } if self.features != old.features { bail!("features have changed: {} != {}", self.features, old.features) } if self.target != old.target { bail!("target configuration has changed") } if self.profile != old.profile { bail!("profile configuration has changed") } match (&self.local, &old.local) { (&LocalFingerprint::Precalculated(ref a), &LocalFingerprint::Precalculated(ref b)) => { if a != b { bail!("precalculated components have changed: {} != {}", a, b) } } (&LocalFingerprint::MtimeBased(ref a, ref ap), &LocalFingerprint::MtimeBased(ref b, ref bp)) => { let a = a.0.lock().unwrap(); let b = b.0.lock().unwrap(); if *a != *b { bail!("mtime based comopnents have changed: {:?} != {:?}, \ paths are {:?} and {:?}", *a, *b, ap, bp) } } _ => bail!("local fingerprint type has changed"), } if self.deps.len() != old.deps.len() { bail!("number of dependencies has changed") } for (a, b) in self.deps.iter().zip(old.deps.iter()) { if a.1.hash() != b.1.hash() { bail!("new ({}) != old ({})", a.0, b.0) } } Ok(()) } } impl hash::Hash for Fingerprint { fn hash<H: Hasher>(&self, h: &mut H) { let Fingerprint { rustc, ref features, target, profile, ref deps, ref local, memoized_hash: _, } = *self; (rustc, features, target, profile, deps, local).hash(h) } } impl Encodable for Fingerprint { fn encode<E: Encoder>(&self, e: &mut E) -> Result<(), E::Error> { e.emit_struct("Fingerprint", 6, |e| { try!(e.emit_struct_field("rustc", 0, |e| self.rustc.encode(e))); try!(e.emit_struct_field("target", 1, |e| self.target.encode(e))); try!(e.emit_struct_field("profile", 2, |e| self.profile.encode(e))); try!(e.emit_struct_field("local", 3, |e| self.local.encode(e))); try!(e.emit_struct_field("features", 4, |e| { self.features.encode(e) })); try!(e.emit_struct_field("deps", 5, |e| { self.deps.iter().map(|&(ref a, ref b)| { (a, b.hash()) }).collect::<Vec<_>>().encode(e) })); Ok(()) }) } } impl Decodable for Fingerprint { fn decode<D: Decoder>(d: &mut D) -> Result<Fingerprint, D::Error> { fn decode<T: Decodable, D: Decoder>(d: &mut D) -> Result<T, D::Error> { Decodable::decode(d) } d.read_struct("Fingerprint", 6, |d| { Ok(Fingerprint { rustc: try!(d.read_struct_field("rustc", 0, decode)), target: try!(d.read_struct_field("target", 1, decode)), profile: try!(d.read_struct_field("profile", 2, decode)), local: try!(d.read_struct_field("local", 3, decode)), features: try!(d.read_struct_field("features", 4, decode)), memoized_hash: Mutex::new(None), deps: { let decode = decode::<Vec<(String, u64)>, D>; let v = try!(d.read_struct_field("deps", 5, decode)); v.into_iter().map(|(name, hash)| { (name, Arc::new(Fingerprint { rustc: 0, target: 0, profile: 0, local: LocalFingerprint::Precalculated(String::new()), features: String::new(), deps: Vec::new(), memoized_hash: Mutex::new(Some(hash)), })) }).collect() } }) }) } } impl hash::Hash for MtimeSlot { fn hash<H: Hasher>(&self, h: &mut H) { self.0.lock().unwrap().hash(h) } } impl Encodable for MtimeSlot { fn encode<E: Encoder>(&self, e: &mut E) -> Result<(), E::Error> { self.0.lock().unwrap().map(|ft| { (ft.seconds_relative_to_1970(), ft.nanoseconds()) }).encode(e) } } impl Decodable for MtimeSlot { fn decode<D: Decoder>(e: &mut D) -> Result<MtimeSlot, D::Error> { let kind: Option<(u64, u32)> = try!(Decodable::decode(e)); Ok(MtimeSlot(Mutex::new(kind.map(|(s, n)| { FileTime::from_seconds_since_1970(s, n) })))) } } /// Calculates the fingerprint for a package/target pair. /// /// This fingerprint is used by Cargo to learn about when information such as: /// /// * A non-path package changes (changes version, changes revision, etc). /// * Any dependency changes /// * The compiler changes /// * The set of features a package is built with changes /// * The profile a target is compiled with changes (e.g. opt-level changes) /// /// Information like file modification time is only calculated for path /// dependencies and is calculated in `calculate_target_fresh`. fn calculate<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult<Arc<Fingerprint>> { if let Some(s) = cx.fingerprints.get(unit) { return Ok(s.clone()) } // First, calculate all statically known "salt data" such as the profile // information (compiler flags), the compiler version, activated features, // and target configuration. let features = cx.resolve.features(unit.pkg.package_id()); let features = features.map(|s| { let mut v = s.iter().collect::<Vec<_>>(); v.sort(); v }); // Next, recursively calculate the fingerprint for all of our dependencies. // // Skip the fingerprints of build scripts as they may not always be // available and the dirtiness propagation for modification is tracked // elsewhere. Also skip fingerprints of binaries because they don't actually // induce a recompile, they're just dependencies in the sense that they need // to be built. let deps = try!(cx.dep_targets(unit).iter().filter(|u| { !u.target.is_custom_build() && !u.target.is_bin() }).map(|unit| { calculate(cx, unit).map(|fingerprint| { (unit.pkg.package_id().to_string(), fingerprint) }) }).collect::<CargoResult<Vec<_>>>()); // And finally, calculate what our own local fingerprint is let local = if use_dep_info(unit) { let dep_info = dep_info_loc(cx, unit); let mtime = try!(dep_info_mtime_if_fresh(&dep_info)); LocalFingerprint::MtimeBased(MtimeSlot(Mutex::new(mtime)), dep_info) } else { let fingerprint = try!(pkg_fingerprint(cx, unit.pkg)); LocalFingerprint::Precalculated(fingerprint) }; let mut deps = deps; deps.sort_by(|&(ref a, _), &(ref b, _)| a.cmp(b)); let fingerprint = Arc::new(Fingerprint { rustc: util::hash_u64(&cx.config.rustc_info().verbose_version), target: util::hash_u64(&unit.target), profile: util::hash_u64(&unit.profile), features: format!("{:?}", features), deps: deps, local: local, memoized_hash: Mutex::new(None), }); cx.fingerprints.insert(*unit, fingerprint.clone()); Ok(fingerprint) } // We want to use the mtime for files if we're a path source, but if we're a // git/registry source, then the mtime of files may fluctuate, but they won't // change so long as the source itself remains constant (which is the // responsibility of the source) fn use_dep_info(unit: &Unit) -> bool { let path = unit.pkg.summary().source_id().is_path(); !unit.profile.doc && path } /// Prepare the necessary work for the fingerprint of a build command. /// /// Build commands are located on packages, not on targets. Additionally, we /// don't have --dep-info to drive calculation of the fingerprint of a build /// command. This brings up an interesting predicament which gives us a few /// options to figure out whether a build command is dirty or not: /// /// 1. A build command is dirty if *any* file in a package changes. In theory /// all files are candidate for being used by the build command. /// 2. A build command is dirty if any file in a *specific directory* changes. /// This may lose information as it may require files outside of the specific /// directory. /// 3. A build command must itself provide a dep-info-like file stating how it /// should be considered dirty or not. /// /// The currently implemented solution is option (1), although it is planned to /// migrate to option (2) in the near future. pub fn prepare_build_cmd<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult<Preparation> { let _p = profile::start(format!("fingerprint build cmd: {}", unit.pkg.package_id())); let new = dir(cx, unit); let loc = new.join("build"); debug!("fingerprint at: {}", loc.display()); // If this build script execution has been overridden, then the fingerprint // is just a hash of what it was overridden with. Otherwise the fingerprint // is that of the entire package itself as we just consider everything as // input to the build script. let (local, output_path) = { let state = cx.build_state.outputs.lock().unwrap(); match state.get(&(unit.pkg.package_id().clone(), unit.kind)) { Some(output) => { let s = format!("overridden build state with hash: {}", util::hash_u64(output)); (LocalFingerprint::Precalculated(s), None) } None => { let &(ref output, ref deps) = &cx.build_explicit_deps[unit]; let local = if deps.len() == 0 { let s = try!(pkg_fingerprint(cx, unit.pkg)); LocalFingerprint::Precalculated(s) } else { let deps = deps.iter().map(|p| unit.pkg.root().join(p)); let mtime = mtime_if_fresh(output, deps); let mtime = MtimeSlot(Mutex::new(mtime)); LocalFingerprint::MtimeBased(mtime, output.clone()) }; (local, Some(output.clone())) } } }; let mut fingerprint = Fingerprint { rustc: 0, target: 0, profile: 0, features: String::new(), deps: Vec::new(), local: local, memoized_hash: Mutex::new(None), }; let compare = compare_old_fingerprint(&loc, &fingerprint); log_compare(unit, &compare); // When we write out the fingerprint, we may want to actually change the // kind of fingerprint being recorded. If we started out, then the previous // run of the build script (or if it had never run before) may indicate to // use the `Precalculated` variant with the `pkg_fingerprint`. If the build // script then prints `rerun-if-changed`, however, we need to record what's // necessary for that fingerprint. // // Hence, if there were some `rerun-if-changed` directives forcibly change // the kind of fingerprint over to the `MtimeBased` variant where the // relevant mtime is the output path of the build script. let state = cx.build_state.clone(); let key = (unit.pkg.package_id().clone(), unit.kind); let write_fingerprint = Work::new(move |_| { if let Some(output_path) = output_path { let outputs = state.outputs.lock().unwrap(); if outputs[&key].rerun_if_changed.len() > 0 { let slot = MtimeSlot(Mutex::new(None)); fingerprint.local = LocalFingerprint::MtimeBased(slot, output_path); try!(fingerprint.update_local()); } } write_fingerprint(&loc, &fingerprint) }); Ok((if compare.is_ok() {Fresh} else {Dirty}, write_fingerprint, Work::noop())) } fn write_fingerprint(loc: &Path, fingerprint: &Fingerprint) -> CargoResult<()> { let hash = fingerprint.hash(); debug!("write fingerprint: {}", loc.display()); try!(paths::write(&loc, util::to_hex(hash).as_bytes())); try!(paths::write(&loc.with_extension("json"), json::encode(&fingerprint).unwrap().as_bytes())); Ok(()) } /// Prepare work for when a package starts to build pub fn prepare_init(cx: &mut Context, unit: &Unit) -> CargoResult<()> { let new1 = dir(cx, unit); let new2 = new1.clone(); if fs::metadata(&new1).is_err() { try!(fs::create_dir(&new1)); } if fs::metadata(&new2).is_err() { try!(fs::create_dir(&new2)); } Ok(()) } /// Return the (old, new) location for fingerprints for a package pub fn dir(cx: &Context, unit: &Unit) -> PathBuf { cx.layout(unit.pkg, unit.kind).proxy().fingerprint(unit.pkg) } /// Returns the (old, new) location for the dep info file of a target. pub fn dep_info_loc(cx: &Context, unit: &Unit) -> PathBuf { dir(cx, unit).join(&format!("dep-{}", filename(unit))) } fn compare_old_fingerprint(loc: &Path, new_fingerprint: &Fingerprint) -> CargoResult<()> { let old_fingerprint_short = try!(paths::read(loc)); let new_hash = new_fingerprint.hash(); if util::to_hex(new_hash) == old_fingerprint_short { return Ok(()) } let old_fingerprint_json = try!(paths::read(&loc.with_extension("json"))); let old_fingerprint = try!(json::decode(&old_fingerprint_json).chain_error(|| { internal(format!("failed to deserialize json")) })); new_fingerprint.compare(&old_fingerprint) } fn log_compare(unit: &Unit, compare: &CargoResult<()>) { let mut e = match *compare { Ok(..) => return, Err(ref e) => &**e, }; info!("fingerprint error for {}: {}", unit.pkg, e); while let Some(cause) = e.cargo_cause() { info!(" cause: {}", cause); e = cause; } let mut e = e.cause(); while let Some(cause) = e { info!(" cause: {}", cause); e = cause.cause(); } } fn dep_info_mtime_if_fresh(dep_info: &Path) -> CargoResult<Option<FileTime>> { macro_rules! fs_try { ($e:expr) => (match $e { Ok(e) => e, Err(..) => return Ok(None) }) } let mut f = BufReader::new(fs_try!(File::open(dep_info))); // see comments in append_current_dir for where this cwd is manifested from. let mut cwd = Vec::new(); if fs_try!(f.read_until(0, &mut cwd)) == 0 { return Ok(None) } let cwd = try!(util::bytes2path(&cwd[..cwd.len()-1])); let line = match f.lines().next() { Some(Ok(line)) => line, _ => return Ok(None), }; let pos = try!(line.find(": ").chain_error(|| { internal(format!("dep-info not in an understood format: {}", dep_info.display())) })); let deps = &line[pos + 2..]; let mut paths = Vec::new(); let mut deps = deps.split(' ').map(|s| s.trim()).filter(|s| !s.is_empty()); loop { let mut file = match deps.next() { Some(s) => s.to_string(), None => break, }; while file.ends_with("\\") { file.pop(); file.push(' '); file.push_str(try!(deps.next().chain_error(|| { internal(format!("malformed dep-info format, trailing \\")) }))); } paths.push(cwd.join(&file)); } Ok(mtime_if_fresh(&dep_info, paths.iter())) } fn pkg_fingerprint(cx: &Context, pkg: &Package) -> CargoResult<String> { let source_id = pkg.package_id().source_id(); let source = try!(cx.sources.get(source_id).chain_error(|| { internal("missing package source") })); source.fingerprint(pkg) } fn mtime_if_fresh<I>(output: &Path, paths: I) -> Option<FileTime> where I: IntoIterator, I::Item: AsRef<Path>, { let meta = match fs::metadata(output) { Ok(meta) => meta, Err(..) => return None, }; let mtime = FileTime::from_last_modification_time(&meta); let any_stale = paths.into_iter().any(|path| { let path = path.as_ref(); let meta = match fs::metadata(path) { Ok(meta) => meta, Err(..) => { info!("stale: {} -- missing", path.display()); return true } }; let mtime2 = FileTime::from_last_modification_time(&meta); if mtime2 > mtime { info!("stale: {} -- {} vs {}", path.display(), mtime2, mtime); true } else { false } }); if any_stale { None } else { Some(mtime) } } fn filename(unit: &Unit) -> String { let kind = match *unit.target.kind() { TargetKind::Lib(..) => "lib", TargetKind::Bin => "bin", TargetKind::Test => "integration-test", TargetKind::Example => "example", TargetKind::Bench => "bench", TargetKind::CustomBuild => "build-script", }; let flavor = if unit.profile.test { "test-" } else if unit.profile.doc { "doc-" } else { "" }; format!("{}{}-{}", flavor, kind, unit.target.name()) } // The dep-info files emitted by the compiler all have their listed paths // relative to whatever the current directory was at the time that the compiler // was invoked. As the current directory may change over time, we need to record // what that directory was at the beginning of the file so we can know about it // next time. pub fn append_current_dir(path: &Path, cwd: &Path) -> CargoResult<()> { debug!("appending {} <- {}", path.display(), cwd.display()); let mut f = try!(OpenOptions::new().read(true).write(true).open(path)); let mut contents = Vec::new(); try!(f.read_to_end(&mut contents)); try!(f.seek(SeekFrom::Start(0))); try!(f.write_all(try!(util::path2bytes(cwd)))); try!(f.write_all(&[0])); try!(f.write_all(&contents)); Ok(()) }
38.024961
83
0.584229
bf340360c7b5d1c05c40bb3efe0986e3dbe52f44
2,930
use crate::common::{interpolate_fix9_scale, Rect}; use crate::entity::GameEntity; use crate::frame::Frame; use crate::framework::context::Context; use crate::framework::error::GameResult; use crate::shared_game_state::SharedGameState; #[derive(Debug, Copy, Clone)] pub struct NumberPopup { pub value: i16, pub x: i32, pub y: i32, pub prev_x: i32, pub prev_y: i32, counter: u16, } impl NumberPopup { pub fn new() -> NumberPopup { NumberPopup { value: 0, x: 0, y: 0, prev_x: 0, prev_y: 0, counter: 0 } } pub fn set_value(&mut self, value: i16) { if self.counter > 32 { self.counter = 32; } self.value = value; } pub fn add_value(&mut self, value: i16) { self.set_value(self.value + value); } } impl GameEntity<()> for NumberPopup { fn tick(&mut self, _state: &mut SharedGameState, _custom: ()) -> GameResult<()> { if self.value == 0 { return Ok(()); } self.counter += 1; if self.counter == 80 { self.counter = 0; self.value = 0; } Ok(()) } fn draw(&self, state: &mut SharedGameState, ctx: &mut Context, frame: &Frame) -> GameResult<()> { if self.value == 0 { return Ok(()); } // tick 0 - 32 - move up by 0.5 pixels // tick 33 - 72 - stay // tick 73 - 80 - fade up let y_offset = self.counter.min(32) as f32 * 0.5; let clip = self.counter.max(72) - 72; let batch = state.texture_set.get_or_load_batch(ctx, &state.constants, "TextBox")?; let (frame_x, frame_y) = frame.xy_interpolated(state.frame_time); let x = interpolate_fix9_scale(self.prev_x, self.x, state.frame_time) - frame_x; let y = interpolate_fix9_scale(self.prev_y, self.y, state.frame_time) - frame_y - y_offset; let mut n = self.value.to_string(); if self.value > 0 { n = "+".to_owned() + n.as_str(); }; let x = x - n.len() as f32 * 4.0; for (offset, chr) in n.chars().enumerate() { match chr { '+' => { batch.add_rect(x + offset as f32 * 8.0, y, &Rect::new_size(32, 48 + clip, 8, 8 - clip)); } '-' => { batch.add_rect(x + offset as f32 * 8.0, y, &Rect::new_size(40, 48 + clip, 8, 8 - clip)); } '0'..='9' => { let number_set = if self.value < 0 { 64 } else { 56 }; let idx = chr as u16 - '0' as u16; batch.add_rect( x + offset as f32 * 8.0, y, &Rect::new_size(idx * 8, number_set + clip, 8, 8 - clip), ); } _ => {} } } batch.draw(ctx)?; Ok(()) } }
29.009901
108
0.48942
fb4594189e8936726be14a4ae63f019b40a9dd8d
298
//! This library parses RDF content based on content type (or file name) and outputs sophia-compliant quad-source. //! This is useful in situations where one have RDF in some serialization, and just need the parsed triples/quads, without having to concern oneself with picking the correct parser.
74.5
181
0.791946
8a09582b8beda03b279acf87682c8551e7addc30
30,272
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. pub fn serialize_structure_crate_input_add_attributes_to_findings_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::AddAttributesToFindingsInput, ) { if let Some(var_1) = &input.finding_arns { let mut array_2 = object.key("findingArns").start_array(); for item_3 in var_1 { { array_2.value().string(item_3); } } array_2.finish(); } if let Some(var_4) = &input.attributes { let mut array_5 = object.key("attributes").start_array(); for item_6 in var_4 { { let mut object_7 = array_5.value().start_object(); crate::json_ser::serialize_structure_crate_model_attribute(&mut object_7, item_6); object_7.finish(); } } array_5.finish(); } } pub fn serialize_structure_crate_input_create_assessment_target_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::CreateAssessmentTargetInput, ) { if let Some(var_8) = &input.assessment_target_name { object.key("assessmentTargetName").string(var_8); } if let Some(var_9) = &input.resource_group_arn { object.key("resourceGroupArn").string(var_9); } } pub fn serialize_structure_crate_input_create_assessment_template_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::CreateAssessmentTemplateInput, ) { if let Some(var_10) = &input.assessment_target_arn { object.key("assessmentTargetArn").string(var_10); } if let Some(var_11) = &input.assessment_template_name { object.key("assessmentTemplateName").string(var_11); } { object.key("durationInSeconds").number( #[allow(clippy::useless_conversion)] smithy_types::Number::NegInt((input.duration_in_seconds).into()), ); } if let Some(var_12) = &input.rules_package_arns { let mut array_13 = object.key("rulesPackageArns").start_array(); for item_14 in var_12 { { array_13.value().string(item_14); } } array_13.finish(); } if let Some(var_15) = &input.user_attributes_for_findings { let mut array_16 = object.key("userAttributesForFindings").start_array(); for item_17 in var_15 { { let mut object_18 = array_16.value().start_object(); crate::json_ser::serialize_structure_crate_model_attribute(&mut object_18, item_17); object_18.finish(); } } array_16.finish(); } } pub fn serialize_structure_crate_input_create_exclusions_preview_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::CreateExclusionsPreviewInput, ) { if let Some(var_19) = &input.assessment_template_arn { object.key("assessmentTemplateArn").string(var_19); } } pub fn serialize_structure_crate_input_create_resource_group_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::CreateResourceGroupInput, ) { if let Some(var_20) = &input.resource_group_tags { let mut array_21 = object.key("resourceGroupTags").start_array(); for item_22 in var_20 { { let mut object_23 = array_21.value().start_object(); crate::json_ser::serialize_structure_crate_model_resource_group_tag( &mut object_23, item_22, ); object_23.finish(); } } array_21.finish(); } } pub fn serialize_structure_crate_input_delete_assessment_run_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::DeleteAssessmentRunInput, ) { if let Some(var_24) = &input.assessment_run_arn { object.key("assessmentRunArn").string(var_24); } } pub fn serialize_structure_crate_input_delete_assessment_target_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::DeleteAssessmentTargetInput, ) { if let Some(var_25) = &input.assessment_target_arn { object.key("assessmentTargetArn").string(var_25); } } pub fn serialize_structure_crate_input_delete_assessment_template_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::DeleteAssessmentTemplateInput, ) { if let Some(var_26) = &input.assessment_template_arn { object.key("assessmentTemplateArn").string(var_26); } } pub fn serialize_structure_crate_input_describe_assessment_runs_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::DescribeAssessmentRunsInput, ) { if let Some(var_27) = &input.assessment_run_arns { let mut array_28 = object.key("assessmentRunArns").start_array(); for item_29 in var_27 { { array_28.value().string(item_29); } } array_28.finish(); } } pub fn serialize_structure_crate_input_describe_assessment_targets_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::DescribeAssessmentTargetsInput, ) { if let Some(var_30) = &input.assessment_target_arns { let mut array_31 = object.key("assessmentTargetArns").start_array(); for item_32 in var_30 { { array_31.value().string(item_32); } } array_31.finish(); } } pub fn serialize_structure_crate_input_describe_assessment_templates_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::DescribeAssessmentTemplatesInput, ) { if let Some(var_33) = &input.assessment_template_arns { let mut array_34 = object.key("assessmentTemplateArns").start_array(); for item_35 in var_33 { { array_34.value().string(item_35); } } array_34.finish(); } } pub fn serialize_structure_crate_input_describe_exclusions_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::DescribeExclusionsInput, ) { if let Some(var_36) = &input.exclusion_arns { let mut array_37 = object.key("exclusionArns").start_array(); for item_38 in var_36 { { array_37.value().string(item_38); } } array_37.finish(); } if let Some(var_39) = &input.locale { object.key("locale").string(var_39.as_str()); } } pub fn serialize_structure_crate_input_describe_findings_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::DescribeFindingsInput, ) { if let Some(var_40) = &input.finding_arns { let mut array_41 = object.key("findingArns").start_array(); for item_42 in var_40 { { array_41.value().string(item_42); } } array_41.finish(); } if let Some(var_43) = &input.locale { object.key("locale").string(var_43.as_str()); } } pub fn serialize_structure_crate_input_describe_resource_groups_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::DescribeResourceGroupsInput, ) { if let Some(var_44) = &input.resource_group_arns { let mut array_45 = object.key("resourceGroupArns").start_array(); for item_46 in var_44 { { array_45.value().string(item_46); } } array_45.finish(); } } pub fn serialize_structure_crate_input_describe_rules_packages_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::DescribeRulesPackagesInput, ) { if let Some(var_47) = &input.rules_package_arns { let mut array_48 = object.key("rulesPackageArns").start_array(); for item_49 in var_47 { { array_48.value().string(item_49); } } array_48.finish(); } if let Some(var_50) = &input.locale { object.key("locale").string(var_50.as_str()); } } pub fn serialize_structure_crate_input_get_assessment_report_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::GetAssessmentReportInput, ) { if let Some(var_51) = &input.assessment_run_arn { object.key("assessmentRunArn").string(var_51); } if let Some(var_52) = &input.report_file_format { object.key("reportFileFormat").string(var_52.as_str()); } if let Some(var_53) = &input.report_type { object.key("reportType").string(var_53.as_str()); } } pub fn serialize_structure_crate_input_get_exclusions_preview_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::GetExclusionsPreviewInput, ) { if let Some(var_54) = &input.assessment_template_arn { object.key("assessmentTemplateArn").string(var_54); } if let Some(var_55) = &input.preview_token { object.key("previewToken").string(var_55); } if let Some(var_56) = &input.next_token { object.key("nextToken").string(var_56); } if let Some(var_57) = &input.max_results { object.key("maxResults").number( #[allow(clippy::useless_conversion)] smithy_types::Number::NegInt((*var_57).into()), ); } if let Some(var_58) = &input.locale { object.key("locale").string(var_58.as_str()); } } pub fn serialize_structure_crate_input_get_telemetry_metadata_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::GetTelemetryMetadataInput, ) { if let Some(var_59) = &input.assessment_run_arn { object.key("assessmentRunArn").string(var_59); } } pub fn serialize_structure_crate_input_list_assessment_run_agents_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::ListAssessmentRunAgentsInput, ) { if let Some(var_60) = &input.assessment_run_arn { object.key("assessmentRunArn").string(var_60); } if let Some(var_61) = &input.filter { let mut object_62 = object.key("filter").start_object(); crate::json_ser::serialize_structure_crate_model_agent_filter(&mut object_62, var_61); object_62.finish(); } if let Some(var_63) = &input.next_token { object.key("nextToken").string(var_63); } if let Some(var_64) = &input.max_results { object.key("maxResults").number( #[allow(clippy::useless_conversion)] smithy_types::Number::NegInt((*var_64).into()), ); } } pub fn serialize_structure_crate_input_list_assessment_runs_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::ListAssessmentRunsInput, ) { if let Some(var_65) = &input.assessment_template_arns { let mut array_66 = object.key("assessmentTemplateArns").start_array(); for item_67 in var_65 { { array_66.value().string(item_67); } } array_66.finish(); } if let Some(var_68) = &input.filter { let mut object_69 = object.key("filter").start_object(); crate::json_ser::serialize_structure_crate_model_assessment_run_filter( &mut object_69, var_68, ); object_69.finish(); } if let Some(var_70) = &input.next_token { object.key("nextToken").string(var_70); } if let Some(var_71) = &input.max_results { object.key("maxResults").number( #[allow(clippy::useless_conversion)] smithy_types::Number::NegInt((*var_71).into()), ); } } pub fn serialize_structure_crate_input_list_assessment_targets_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::ListAssessmentTargetsInput, ) { if let Some(var_72) = &input.filter { let mut object_73 = object.key("filter").start_object(); crate::json_ser::serialize_structure_crate_model_assessment_target_filter( &mut object_73, var_72, ); object_73.finish(); } if let Some(var_74) = &input.next_token { object.key("nextToken").string(var_74); } if let Some(var_75) = &input.max_results { object.key("maxResults").number( #[allow(clippy::useless_conversion)] smithy_types::Number::NegInt((*var_75).into()), ); } } pub fn serialize_structure_crate_input_list_assessment_templates_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::ListAssessmentTemplatesInput, ) { if let Some(var_76) = &input.assessment_target_arns { let mut array_77 = object.key("assessmentTargetArns").start_array(); for item_78 in var_76 { { array_77.value().string(item_78); } } array_77.finish(); } if let Some(var_79) = &input.filter { let mut object_80 = object.key("filter").start_object(); crate::json_ser::serialize_structure_crate_model_assessment_template_filter( &mut object_80, var_79, ); object_80.finish(); } if let Some(var_81) = &input.next_token { object.key("nextToken").string(var_81); } if let Some(var_82) = &input.max_results { object.key("maxResults").number( #[allow(clippy::useless_conversion)] smithy_types::Number::NegInt((*var_82).into()), ); } } pub fn serialize_structure_crate_input_list_event_subscriptions_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::ListEventSubscriptionsInput, ) { if let Some(var_83) = &input.resource_arn { object.key("resourceArn").string(var_83); } if let Some(var_84) = &input.next_token { object.key("nextToken").string(var_84); } if let Some(var_85) = &input.max_results { object.key("maxResults").number( #[allow(clippy::useless_conversion)] smithy_types::Number::NegInt((*var_85).into()), ); } } pub fn serialize_structure_crate_input_list_exclusions_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::ListExclusionsInput, ) { if let Some(var_86) = &input.assessment_run_arn { object.key("assessmentRunArn").string(var_86); } if let Some(var_87) = &input.next_token { object.key("nextToken").string(var_87); } if let Some(var_88) = &input.max_results { object.key("maxResults").number( #[allow(clippy::useless_conversion)] smithy_types::Number::NegInt((*var_88).into()), ); } } pub fn serialize_structure_crate_input_list_findings_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::ListFindingsInput, ) { if let Some(var_89) = &input.assessment_run_arns { let mut array_90 = object.key("assessmentRunArns").start_array(); for item_91 in var_89 { { array_90.value().string(item_91); } } array_90.finish(); } if let Some(var_92) = &input.filter { let mut object_93 = object.key("filter").start_object(); crate::json_ser::serialize_structure_crate_model_finding_filter(&mut object_93, var_92); object_93.finish(); } if let Some(var_94) = &input.next_token { object.key("nextToken").string(var_94); } if let Some(var_95) = &input.max_results { object.key("maxResults").number( #[allow(clippy::useless_conversion)] smithy_types::Number::NegInt((*var_95).into()), ); } } pub fn serialize_structure_crate_input_list_rules_packages_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::ListRulesPackagesInput, ) { if let Some(var_96) = &input.next_token { object.key("nextToken").string(var_96); } if let Some(var_97) = &input.max_results { object.key("maxResults").number( #[allow(clippy::useless_conversion)] smithy_types::Number::NegInt((*var_97).into()), ); } } pub fn serialize_structure_crate_input_list_tags_for_resource_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::ListTagsForResourceInput, ) { if let Some(var_98) = &input.resource_arn { object.key("resourceArn").string(var_98); } } pub fn serialize_structure_crate_input_preview_agents_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::PreviewAgentsInput, ) { if let Some(var_99) = &input.preview_agents_arn { object.key("previewAgentsArn").string(var_99); } if let Some(var_100) = &input.next_token { object.key("nextToken").string(var_100); } if let Some(var_101) = &input.max_results { object.key("maxResults").number( #[allow(clippy::useless_conversion)] smithy_types::Number::NegInt((*var_101).into()), ); } } pub fn serialize_structure_crate_input_register_cross_account_access_role_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::RegisterCrossAccountAccessRoleInput, ) { if let Some(var_102) = &input.role_arn { object.key("roleArn").string(var_102); } } pub fn serialize_structure_crate_input_remove_attributes_from_findings_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::RemoveAttributesFromFindingsInput, ) { if let Some(var_103) = &input.finding_arns { let mut array_104 = object.key("findingArns").start_array(); for item_105 in var_103 { { array_104.value().string(item_105); } } array_104.finish(); } if let Some(var_106) = &input.attribute_keys { let mut array_107 = object.key("attributeKeys").start_array(); for item_108 in var_106 { { array_107.value().string(item_108); } } array_107.finish(); } } pub fn serialize_structure_crate_input_set_tags_for_resource_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::SetTagsForResourceInput, ) { if let Some(var_109) = &input.resource_arn { object.key("resourceArn").string(var_109); } if let Some(var_110) = &input.tags { let mut array_111 = object.key("tags").start_array(); for item_112 in var_110 { { let mut object_113 = array_111.value().start_object(); crate::json_ser::serialize_structure_crate_model_tag(&mut object_113, item_112); object_113.finish(); } } array_111.finish(); } } pub fn serialize_structure_crate_input_start_assessment_run_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::StartAssessmentRunInput, ) { if let Some(var_114) = &input.assessment_template_arn { object.key("assessmentTemplateArn").string(var_114); } if let Some(var_115) = &input.assessment_run_name { object.key("assessmentRunName").string(var_115); } } pub fn serialize_structure_crate_input_stop_assessment_run_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::StopAssessmentRunInput, ) { if let Some(var_116) = &input.assessment_run_arn { object.key("assessmentRunArn").string(var_116); } if let Some(var_117) = &input.stop_action { object.key("stopAction").string(var_117.as_str()); } } pub fn serialize_structure_crate_input_subscribe_to_event_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::SubscribeToEventInput, ) { if let Some(var_118) = &input.resource_arn { object.key("resourceArn").string(var_118); } if let Some(var_119) = &input.event { object.key("event").string(var_119.as_str()); } if let Some(var_120) = &input.topic_arn { object.key("topicArn").string(var_120); } } pub fn serialize_structure_crate_input_unsubscribe_from_event_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::UnsubscribeFromEventInput, ) { if let Some(var_121) = &input.resource_arn { object.key("resourceArn").string(var_121); } if let Some(var_122) = &input.event { object.key("event").string(var_122.as_str()); } if let Some(var_123) = &input.topic_arn { object.key("topicArn").string(var_123); } } pub fn serialize_structure_crate_input_update_assessment_target_input( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::input::UpdateAssessmentTargetInput, ) { if let Some(var_124) = &input.assessment_target_arn { object.key("assessmentTargetArn").string(var_124); } if let Some(var_125) = &input.assessment_target_name { object.key("assessmentTargetName").string(var_125); } if let Some(var_126) = &input.resource_group_arn { object.key("resourceGroupArn").string(var_126); } } pub fn serialize_structure_crate_model_attribute( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::model::Attribute, ) { if let Some(var_127) = &input.key { object.key("key").string(var_127); } if let Some(var_128) = &input.value { object.key("value").string(var_128); } } pub fn serialize_structure_crate_model_resource_group_tag( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::model::ResourceGroupTag, ) { if let Some(var_129) = &input.key { object.key("key").string(var_129); } if let Some(var_130) = &input.value { object.key("value").string(var_130); } } pub fn serialize_structure_crate_model_agent_filter( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::model::AgentFilter, ) { if let Some(var_131) = &input.agent_healths { let mut array_132 = object.key("agentHealths").start_array(); for item_133 in var_131 { { array_132.value().string(item_133.as_str()); } } array_132.finish(); } if let Some(var_134) = &input.agent_health_codes { let mut array_135 = object.key("agentHealthCodes").start_array(); for item_136 in var_134 { { array_135.value().string(item_136.as_str()); } } array_135.finish(); } } pub fn serialize_structure_crate_model_assessment_run_filter( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::model::AssessmentRunFilter, ) { if let Some(var_137) = &input.name_pattern { object.key("namePattern").string(var_137); } if let Some(var_138) = &input.states { let mut array_139 = object.key("states").start_array(); for item_140 in var_138 { { array_139.value().string(item_140.as_str()); } } array_139.finish(); } if let Some(var_141) = &input.duration_range { let mut object_142 = object.key("durationRange").start_object(); crate::json_ser::serialize_structure_crate_model_duration_range(&mut object_142, var_141); object_142.finish(); } if let Some(var_143) = &input.rules_package_arns { let mut array_144 = object.key("rulesPackageArns").start_array(); for item_145 in var_143 { { array_144.value().string(item_145); } } array_144.finish(); } if let Some(var_146) = &input.start_time_range { let mut object_147 = object.key("startTimeRange").start_object(); crate::json_ser::serialize_structure_crate_model_timestamp_range(&mut object_147, var_146); object_147.finish(); } if let Some(var_148) = &input.completion_time_range { let mut object_149 = object.key("completionTimeRange").start_object(); crate::json_ser::serialize_structure_crate_model_timestamp_range(&mut object_149, var_148); object_149.finish(); } if let Some(var_150) = &input.state_change_time_range { let mut object_151 = object.key("stateChangeTimeRange").start_object(); crate::json_ser::serialize_structure_crate_model_timestamp_range(&mut object_151, var_150); object_151.finish(); } } pub fn serialize_structure_crate_model_assessment_target_filter( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::model::AssessmentTargetFilter, ) { if let Some(var_152) = &input.assessment_target_name_pattern { object.key("assessmentTargetNamePattern").string(var_152); } } pub fn serialize_structure_crate_model_assessment_template_filter( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::model::AssessmentTemplateFilter, ) { if let Some(var_153) = &input.name_pattern { object.key("namePattern").string(var_153); } if let Some(var_154) = &input.duration_range { let mut object_155 = object.key("durationRange").start_object(); crate::json_ser::serialize_structure_crate_model_duration_range(&mut object_155, var_154); object_155.finish(); } if let Some(var_156) = &input.rules_package_arns { let mut array_157 = object.key("rulesPackageArns").start_array(); for item_158 in var_156 { { array_157.value().string(item_158); } } array_157.finish(); } } pub fn serialize_structure_crate_model_finding_filter( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::model::FindingFilter, ) { if let Some(var_159) = &input.agent_ids { let mut array_160 = object.key("agentIds").start_array(); for item_161 in var_159 { { array_160.value().string(item_161); } } array_160.finish(); } if let Some(var_162) = &input.auto_scaling_groups { let mut array_163 = object.key("autoScalingGroups").start_array(); for item_164 in var_162 { { array_163.value().string(item_164); } } array_163.finish(); } if let Some(var_165) = &input.rule_names { let mut array_166 = object.key("ruleNames").start_array(); for item_167 in var_165 { { array_166.value().string(item_167); } } array_166.finish(); } if let Some(var_168) = &input.severities { let mut array_169 = object.key("severities").start_array(); for item_170 in var_168 { { array_169.value().string(item_170.as_str()); } } array_169.finish(); } if let Some(var_171) = &input.rules_package_arns { let mut array_172 = object.key("rulesPackageArns").start_array(); for item_173 in var_171 { { array_172.value().string(item_173); } } array_172.finish(); } if let Some(var_174) = &input.attributes { let mut array_175 = object.key("attributes").start_array(); for item_176 in var_174 { { let mut object_177 = array_175.value().start_object(); crate::json_ser::serialize_structure_crate_model_attribute( &mut object_177, item_176, ); object_177.finish(); } } array_175.finish(); } if let Some(var_178) = &input.user_attributes { let mut array_179 = object.key("userAttributes").start_array(); for item_180 in var_178 { { let mut object_181 = array_179.value().start_object(); crate::json_ser::serialize_structure_crate_model_attribute( &mut object_181, item_180, ); object_181.finish(); } } array_179.finish(); } if let Some(var_182) = &input.creation_time_range { let mut object_183 = object.key("creationTimeRange").start_object(); crate::json_ser::serialize_structure_crate_model_timestamp_range(&mut object_183, var_182); object_183.finish(); } } pub fn serialize_structure_crate_model_tag( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::model::Tag, ) { if let Some(var_184) = &input.key { object.key("key").string(var_184); } if let Some(var_185) = &input.value { object.key("value").string(var_185); } } pub fn serialize_structure_crate_model_duration_range( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::model::DurationRange, ) { if input.min_seconds != 0 { object.key("minSeconds").number( #[allow(clippy::useless_conversion)] smithy_types::Number::NegInt((input.min_seconds).into()), ); } if input.max_seconds != 0 { object.key("maxSeconds").number( #[allow(clippy::useless_conversion)] smithy_types::Number::NegInt((input.max_seconds).into()), ); } } pub fn serialize_structure_crate_model_timestamp_range( object: &mut smithy_json::serialize::JsonObjectWriter, input: &crate::model::TimestampRange, ) { if let Some(var_186) = &input.begin_date { object .key("beginDate") .instant(var_186, smithy_types::instant::Format::EpochSeconds); } if let Some(var_187) = &input.end_date { object .key("endDate") .instant(var_187, smithy_types::instant::Format::EpochSeconds); } }
33.975309
100
0.638775
1c2640ab505d24882a8d906344cb1adc026ab4cc
49,956
//! Julia values and functions. //! //! When using this crate Julia data will usually be returned as a [`Value`]. A [`Value`] is a //! "generic" wrapper. Type information will generally be available allowing you to safely convert //! a [`Value`] to its actual type. Data like arrays and modules can be returned as a [`Value`]. //! These, and other types with a custom implementation in the C API, can be found in the //! submodules of this module. //! //! One special property of a [`Value`] is that it can always be called as a function; there's no //! way to check if a [`Value`] is actually a function except trying to call it. Multiple //! [`Value`]s can be created at the same time by using [`Values`]. //! //! [`Value`]: struct.Value.html //! [`Values`]: struct.Values.html use self::array::{Array, Dimensions}; use self::datatype::DataType; use self::module::Module; use self::symbol::Symbol; use crate::error::{JlrsError, JlrsResult}; use crate::frame::Output; use crate::global::Global; use crate::impl_julia_type; use crate::traits::{ private::Internal, Cast, Frame, IntoJulia, JuliaType, JuliaTypecheck, TemporarySymbol, ValidLayout, }; use jl_sys::{ jl_alloc_array_1d, jl_alloc_array_2d, jl_alloc_array_3d, jl_any_type, jl_apply_array_type, jl_apply_tuple_type_v, jl_call, jl_call0, jl_call1, jl_call2, jl_call3, jl_datatype_t, jl_exception_occurred, jl_field_index, jl_field_isptr, jl_field_names, jl_fieldref, jl_fieldref_noalloc, jl_get_nth_field, jl_get_nth_field_noalloc, jl_is_kind, jl_new_array, jl_new_struct_uninit, jl_nfields, jl_ptr_to_array, jl_ptr_to_array_1d, jl_set_nth_field, jl_subtype, jl_svec_data, jl_svec_len, jl_typeof, jl_typeof_str, jl_value_t, }; use std::borrow::BorrowMut; use std::ffi::CStr; use std::fmt::{Debug, Formatter, Result as FmtResult}; use std::marker::PhantomData; use std::ptr::null_mut; use std::slice; pub mod array; pub mod code_instance; pub mod datatype; pub mod expr; pub mod method; pub mod method_instance; pub mod method_table; pub mod module; pub mod simple_vector; pub mod string; pub mod symbol; pub mod task; pub mod tuple; pub mod type_name; pub mod type_var; pub mod typemap_entry; pub mod typemap_level; pub mod union; pub mod union_all; pub mod weak_ref; thread_local! { // Used as a pool to convert dimensions to tuples. Safe because a thread local is initialized // when `with` is first called, which happens after `Julia::init` has been called. The C API // requires a mutable pointer to this array so an `UnsafeCell` is used to store it. static JL_LONG_TYPE: std::cell::UnsafeCell<[*mut jl_datatype_t; 8]> = unsafe { std::cell::UnsafeCell::new([ usize::julia_type(), usize::julia_type(), usize::julia_type(), usize::julia_type(), usize::julia_type(), usize::julia_type(), usize::julia_type(), usize::julia_type(), ]) }; } /// This type alias is used to encode the result of a function call: `Ok` indicates the call was /// successful and contains the function's result, while `Err` indicates an exception was thrown /// and contains said exception. pub type CallResult<'frame, 'data> = Result<Value<'frame, 'data>, Value<'frame, 'data>>; /// Several values that are allocated consecutively. This can be used in combination with /// [`Value::call_values`] and [`WithOutput::call_values`]. /// /// [`Value::call_values`]: struct.Value.html#method.call_values /// [`WithOutput::call_values`]: struct.WithOutput.html#method.call_values #[derive(Copy, Clone, Debug)] pub struct Values<'frame>(*mut *mut jl_value_t, usize, PhantomData<&'frame ()>); impl<'frame> Values<'frame> { pub(crate) unsafe fn wrap(ptr: *mut *mut jl_value_t, n: usize) -> Self { Values(ptr, n, PhantomData) } #[doc(hidden)] pub unsafe fn ptr(self) -> *mut *mut jl_value_t { self.0 } /// Returns the number of `Value`s in this group. pub fn len(self) -> usize { self.1 } /// Get a specific `Value` in this group. Returns an error if the index is out of bounds. pub fn value(self, index: usize) -> JlrsResult<Value<'frame, 'static>> { if index >= self.len() { return Err(JlrsError::OutOfBounds(index, self.len()).into()); } unsafe { Ok(Value(*(self.ptr().add(index)), PhantomData, PhantomData)) } } /// Allocate several values of the same type, this type must implement [`IntoJulia`]. The /// values will be protected from garbage collection inside the frame used to create them. /// This takes as many slots on the GC stack as values that are allocated. /// /// Returns an error if there is not enough space on the stack. /// /// [`IntoJulia`]: ../traits/trait.IntoJulia.html pub fn new<T, V, F>(frame: &mut F, data: V) -> JlrsResult<Self> where T: IntoJulia, V: AsRef<[T]>, F: Frame<'frame>, { frame .create_many(data.as_ref(), Internal) .map_err(Into::into) } /// Allocate several values of possibly different types, these types must implement /// [`IntoJulia`]. The values will be protected from garbage collection inside the frame used /// to create them. This takes as many slots on the GC stack as values that are allocated. /// /// Returns an error if there is not enough space on the stack. /// /// [`IntoJulia`]: ../traits/trait.IntoJulia.html pub fn new_dyn<'v, V, F>(frame: &mut F, data: V) -> JlrsResult<Self> where V: AsRef<[&'v dyn IntoJulia]>, F: Frame<'frame>, { frame .create_many_dyn(data.as_ref(), Internal) .map_err(Into::into) } } /// When working with the Julia C API most data is returned as a raw pointer to a `jl_value_t`. /// This pointer is similar to a void pointer in the sense that this pointer can point to data of /// any type. It's up to the user to determine the correct type and cast the pointer. In order to /// make this possible, data pointed to by a `jl_value_t`-pointer is guaranteed to be preceded in /// memory by a fixed-size header that contains its type and layout-information. /// /// A `Value` is a wrapper around the raw pointer to a `jl_value_t` that adds two lifetimes, /// `'frame` and `'data`. The first is inherited from the frame used to create the `Value`; frames /// ensure a `Value` is protected from garbage collection as long as the frame used to protect it /// has not been dropped. As a result, a `Value` can only be used when it can be guaranteed that /// the garbage collector won't drop it. The second indicates the lifetime of its contents; it's /// usually `'static`, but if you create a `Value` that borrows array data from Rust it's the /// lifetime of the borrow. If you call a Julia function the returned `Value` will inherit the /// `'data`-lifetime of the `Value`s used as arguments. This ensures that a `Value` that /// (possibly) borrows data from Rust can't be used after that borrow ends. If this restriction is /// too strict you can forget the second lifetime by calling [`Value::assume_owned`]. /// /// ### Creating new values /// /// New `Value`s can be created from Rust in several ways. Types that implement [`IntoJulia`] can /// be converted to a `Value` by calling [`Value::new`]. This trait is implemented by primitive /// types like `bool`, `char`, `i16`, and `usize`; string types like `String`, `&str`, and `Cow`; /// [`tuples`]; and you can derive it for your own types by deriving [`IntoJulia`]. You should /// use `JlrsReflect.jl` rather than doing this manually. /// /// [`Value`] also has several methods to create an n-dimensional array if the element type /// implements [`IntoJulia`], this includes primitive types, strings. It is also implemented for /// bits types with no type parameters when these bindings are generated with `JlrsReflect.jl`. A /// new array whose data is completely managed by Julia can be created by calling /// [`Value::new_array`]. You can also transfer the ownership of some `Vec` to Julia and treat it /// as an n-dimensional array with [`Value::move_array`]. Finally, you can borrow anything that /// can be borrowed as a mutable slice with [`Value::borrow_array`]. /// /// Functions and other global values defined in a module can be accessed through that module. /// Please see the documentation for [`Module`] for more information. /// /// ### Casting values /// /// A `Value`'s type information can be accessed by calling [`Value::datatype`], this is usually /// not necessary to determine what kind of data it contains; you can use [`Value::is`] to query /// properties of the value's type. You can use [`Value::cast`] to convert the value to the /// appropriate type. If a type implements both [`JuliaTypecheck`] and [`Cast`], which are used by /// [`Value::is`] and [`Value::cast`] respectively, the former returning `true` when called with /// that type as generic parameter indicates that the latter will succeed. For example, /// `value.is::<u8>()` returning true means `value.cast::<u8>()` will succeed. You can derive /// these traits for custom structs by deriving [`JuliaStruct`]. /// /// The methods that create a new `Value` come in two varieties: `<method>` and `<method>_output`. /// The first will use a slot in the current frame to protect the value from garbage collection, /// while the latter uses a slot in another active frame. /// /// [`Value::assume_owned`]: struct.Value.html#method.assume_owned /// [`Value`]: struct.Value.html /// [`Value::move_array`]: struct.Value.html#method.move_array /// [`Value::new_array`]: struct.Value.html#method.new_array /// [`Value::borrow_array`]: struct.Value.html#method.borrow_array /// [`IntoJulia`]: ../traits/trait.IntoJulia.html /// [`JuliaType`]: ../traits/trait.JuliaType.html /// [`Value::new`]: struct.Value.html#method.new /// [`Value::datatype`]: struct.Value.html#method.datatype /// [`JuliaStruct`]: ../traits/trait.JuliaStruct.html /// [`tuples`]: ./tuple/index.html /// [`Module`]: ./module/struct.Module.html /// [`Value::datatype`]: struct.Value.html#method.datatype /// [`Value::is`]: struct.Value.html#method.is /// [`Value::cast`]: struct.Value.html#method.cast /// [`JuliaTypecheck`]: ../traits/trait.JuliaTypecheck.html /// [`Cast`]: ../traits/trait.Cast.html #[repr(transparent)] #[derive(Copy, Clone)] pub struct Value<'frame, 'data>( *mut jl_value_t, PhantomData<&'frame ()>, PhantomData<&'data ()>, ); impl<'frame, 'data> Value<'frame, 'data> { pub(crate) unsafe fn wrap(ptr: *mut jl_value_t) -> Value<'frame, 'static> { Value(ptr, PhantomData, PhantomData) } #[doc(hidden)] pub unsafe fn ptr(self) -> *mut jl_value_t { self.0 } /// Returns `nothing` as a `Value`. Because `nothing` is a singleton this takes no slot on the /// GC stack. pub fn nothing<F>(_frame: &mut F) -> Value<'frame, 'static> where F: Frame<'frame>, { unsafe { Value::wrap(jl_sys::jl_nothing) } } /// Create a new Julia value, any type that implements [`IntoJulia`] can be converted using /// this function. The value will be protected from garbage collection inside the frame used /// to create it. One free slot on the GC stack is required for this function to succeed, /// returns an error if no slot is available. /// /// [`IntoJulia`]: ../traits/trait.IntoJulia.html pub fn new<V, F>(frame: &mut F, value: V) -> JlrsResult<Value<'frame, 'static>> where V: IntoJulia, F: Frame<'frame>, { unsafe { frame .protect(value.into_julia(), Internal) .map_err(Into::into) } } /// Create a new Julia value using the output to protect it from garbage collection, any type /// that implements [`IntoJulia`] can be converted using this function. The value will be /// protected from garbage collection until the frame the output belongs to goes out of scope. /// /// [`IntoJulia`]: ../traits/trait.IntoJulia.html pub fn new_output<'output, V, F>( frame: &mut F, output: Output<'output>, value: V, ) -> Value<'output, 'static> where V: IntoJulia, F: Frame<'frame>, { unsafe { frame.assign_output(output, value.into_julia(), Internal) } } /// Returns true if the value is `nothing`. pub fn is_nothing(self) -> bool { unsafe { self.ptr() == null_mut() || jl_typeof(self.ptr()) == jl_sys::jl_nothing_type.cast() } } /// Performs the given type check. For types that represent Julia data, this check comes down /// to checking if the data has that type. This works for primitive types, for example: /// /// ```no_run /// # use jlrs::prelude::*; /// # fn main() { /// # let mut julia = unsafe { Julia::init(16).unwrap() }; /// julia.frame(1, |_global, frame| { /// let i = Value::new(frame, 2u64)?; /// assert!(i.is::<u64>()); /// Ok(()) /// }).unwrap(); /// # } /// ``` /// /// "Special" types in Julia that are defined in C, like [`Array`], [`Module`] and /// [`DataType`], are also supported: /// /// ```no_run /// # use jlrs::prelude::*; /// # fn main() { /// # let mut julia = unsafe { Julia::init(16).unwrap() }; /// julia.frame(1, |_global, frame| { /// let arr = Value::new_array::<f64, _, _>(frame, (3, 3))?; /// assert!(arr.is::<Array>()); /// Ok(()) /// }).unwrap(); /// # } /// ``` /// /// If you derive [`JuliaStruct`] for some type, that type will be supported by this method. A /// full list of supported checks can be found [here]. /// /// [`Array`]: array/struct.Array.html /// [`DataType`]: datatype/struct.DataType.html /// [`Module`]: module/struct.Module.html /// [`Symbol`]: symbol/struct.Symbol.html /// [`JuliaStruct`]: ../traits/trait.JuliaStruct.html /// [here]: ../traits/trait.JuliaTypecheck.html#implementors pub fn is<T: JuliaTypecheck>(self) -> bool { if self.is_nothing() { return false; } self.datatype().unwrap().is::<T>() } /// Returns the `DataType` of this value, or `None` if the value constains a null pointer. pub fn datatype(self) -> Option<DataType<'frame>> { unsafe { if self.ptr().is_null() { return None; } Some(DataType::wrap(jl_typeof(self.ptr()).cast())) } } /// Cast the contents of this value into a compatible Rust type. Any type which implements /// `Cast` can be used as a target, by default this includes primitive types like `u8`, `f32` /// and `bool`, and builtin types like [`Array`], [`JuliaString`] and [`Symbol`]. You can /// implement this trait for custom types by deriving [`JuliaStruct`]. /// /// [`Array`]: array/struct.Array.html /// [`JuliaString`]: string/struct.JuliaString.html /// [`Symbol`]: symbol/struct.Symbol.html /// [`JuliaStruct`]: ../traits/trait.JuliaStruct.html pub fn cast<T: Cast<'frame, 'data>>(self) -> JlrsResult<<T as Cast<'frame, 'data>>::Output> { T::cast(self) } /// Cast the contents of this value into a compatible Rust type without checking if the layout is valid. /// /// Safety: /// /// You must guarantee `self.is::<T>()` would have returned `true`. pub unsafe fn cast_unchecked<T: Cast<'frame, 'data>>( self, ) -> <T as Cast<'frame, 'data>>::Output { T::cast_unchecked(self) } /// Returns the type name of this value. pub fn type_name(self) -> &'frame str { unsafe { if self.ptr().is_null() { return "null"; } let type_name = jl_typeof_str(self.ptr()); let type_name_ref = CStr::from_ptr(type_name); type_name_ref.to_str().unwrap() } } /// Returns true if the value is an array with elements of type `T`. pub fn is_array_of<T: ValidLayout>(self) -> bool { match self.cast::<Array>() { Ok(arr) => arr.contains::<T>(), Err(_) => false, } } /// Returns the field names of this value as a slice of `Symbol`s. These symbols can be used /// to access their fields with [`Value::get_field`]. /// /// [`Value::get_field`]: struct.Value.html#method.get_field pub fn field_names(self) -> &'frame [Symbol<'frame>] { if self.is_nothing() { return &[]; } unsafe { let tp = jl_typeof(self.ptr()); let field_names = jl_field_names(tp.cast()); let len = jl_svec_len(field_names); let items: *mut Symbol = jl_svec_data(field_names).cast(); slice::from_raw_parts(items.cast(), len) } } /// Returns the number of fields the underlying Julia value has. These fields can be accessed /// with [`Value::get_field_n`]. /// /// [`Value::get_field_n`]: struct.Value.html#method.get_field_n pub fn n_fields(self) -> usize { if self.is_nothing() { return 0; } unsafe { jl_nfields(self.ptr()) as _ } } /// Returns the field at index `idx` if it exists. If it does not exist /// `JlrsError::OutOfBounds` is returned. This function assumes the field must be protected /// from garbage collection, so calling this function will take a single slot on the GC stack. /// If there is no slot available `JlrsError::AllocError` is returned. pub fn get_nth_field<'fr, F>(self, frame: &mut F, idx: usize) -> JlrsResult<Value<'fr, 'data>> where F: Frame<'fr>, { unsafe { if idx >= self.n_fields() { return Err(JlrsError::OutOfBounds(idx, self.n_fields()).into()); } frame .protect(jl_fieldref(self.ptr(), idx), Internal) .map_err(Into::into) } } /// Returns the field at index `idx` if it exists. If it does not exist /// `JlrsError::OutOfBounds` is returned. This function assumes the field must be protected /// from garbage collection and uses the provided output to do so. pub fn get_nth_field_output<'output, 'fr, F>( self, frame: &mut F, output: Output<'output>, idx: usize, ) -> JlrsResult<Value<'output, 'data>> where F: Frame<'fr>, { unsafe { if idx >= self.n_fields() { return Err(JlrsError::OutOfBounds(idx, self.n_fields()).into()); } Ok(frame.assign_output(output, jl_fieldref(self.ptr(), idx), Internal)) } } /// Returns the field at index `idx` if it exists and no allocation is required to return it. /// Allocation is not required if the field is a pointer to another value. /// /// If the field does not exist `JlrsError::NoSuchField` is returned. If allocating is /// required to return the field, `JlrsError::NotAPointerField` is returned. /// /// This function is unsafe because the value returned as a result will only be valid as long /// as the field is not changed. pub unsafe fn get_nth_field_noalloc(self, idx: usize) -> JlrsResult<Value<'frame, 'data>> { if self.is_nothing() { Err(JlrsError::Nothing)?; } if idx >= self.n_fields() { Err(JlrsError::OutOfBounds(idx, self.n_fields()))? } if !jl_field_isptr(self.datatype().unwrap().ptr(), idx as _) { Err(JlrsError::NotAPointerField(idx))?; } Ok(Value::wrap(jl_fieldref_noalloc(self.ptr(), idx))) } /// Returns the field with the name `field_name` if it exists. If it does not exist /// `JlrsError::NoSuchField` is returned. This function assumes the field must be protected /// from garbage collection, so calling this function will take a single slot on the GC stack. /// If there is no slot available `JlrsError::AllocError` is returned. pub fn get_field<'fr, N, F>(self, frame: &mut F, field_name: N) -> JlrsResult<Value<'fr, 'data>> where N: TemporarySymbol, F: Frame<'fr>, { unsafe { let symbol = field_name.temporary_symbol(Internal); if self.is_nothing() { Err(JlrsError::Nothing)?; } let jl_type = jl_typeof(self.ptr()).cast(); let idx = jl_field_index(jl_type, symbol.ptr(), 0); if idx < 0 { return Err(JlrsError::NoSuchField(symbol.into()).into()); } frame .protect(jl_get_nth_field(self.ptr(), idx as _), Internal) .map_err(Into::into) } } /// Returns the field with the name `field_name` if it exists. If it does not exist /// `JlrsError::NoSuchField` is returned. This function assumes the field must be protected /// from garbage collection and uses the provided output to do so. pub fn get_field_output<'output, 'fr, N, F>( self, frame: &mut F, output: Output<'output>, field_name: N, ) -> JlrsResult<Value<'output, 'data>> where N: TemporarySymbol, F: Frame<'fr>, { unsafe { let symbol = field_name.temporary_symbol(Internal); if self.is_nothing() { Err(JlrsError::Nothing)?; } let jl_type = jl_typeof(self.ptr()).cast(); let idx = jl_field_index(jl_type, symbol.ptr(), 0); if idx < 0 { return Err(JlrsError::NoSuchField(symbol.into()).into()); } Ok(frame.assign_output(output, jl_get_nth_field(self.ptr(), idx as _), Internal)) } } /// Returns the field with the name `field_name` if it exists and no allocation is required /// to return it. Allocation is not required if the field is a pointer to another value. /// /// If the field does not exist `JlrsError::NoSuchField` is returned. If allocating is /// required to return the field, `JlrsError::NotAPointerField` is returned. /// /// This function is unsafe because the value returned as a result will only be valid as long /// as the field is not changed. pub unsafe fn get_field_noalloc<N>(self, field_name: N) -> JlrsResult<Value<'frame, 'data>> where N: TemporarySymbol, { let symbol = field_name.temporary_symbol(Internal); if self.is_nothing() { Err(JlrsError::Nothing)?; } let jl_type = jl_typeof(self.ptr()).cast(); let idx = jl_field_index(jl_type, symbol.ptr(), 0); if idx < 0 { return Err(JlrsError::NoSuchField(symbol.into()).into()); } if !jl_field_isptr(self.datatype().unwrap().ptr(), idx) { Err(JlrsError::NotAPointerField(idx as _))?; } Ok(Value::wrap(jl_get_nth_field_noalloc(self.ptr(), idx as _))) } /// Set the value of the field at `idx`. Returns an error if this value is immutable or if the /// type of `value` is not a subtype of the field type. This is unsafe because the previous /// value of this field can become unrooted if you're directly using it from Rust. pub unsafe fn set_nth_field(self, idx: usize, value: Value) -> JlrsResult<()> { if !self.is::<datatype::Mutable>() { Err(JlrsError::Immutable)? } let field_type = self.datatype().unwrap().field_types()[idx]; if let Some(dt) = value.datatype() { if Value::subtype(dt.into(), field_type) { jl_set_nth_field(self.ptr(), idx, value.ptr()); return Ok(()); } else { Err(JlrsError::NotSubtype)? } } Err(JlrsError::Nothing)? } /// If you call a function with one or more borrowed arrays as arguments, its result can only /// be used when all the borrows are active. If this result doesn't reference any borrowed /// data this function can be used to relax its second lifetime to `'static`. /// /// Safety: The value must not contain a reference any borrowed data. pub unsafe fn assume_owned(self) -> Value<'frame, 'static> { Value::wrap(self.ptr()) } /// Extend the `Value`'s lifetime to the `Output's lifetime. The original value will still be /// valid after calling this method, the data will be protected from garbage collection until /// the `Output`'s frame goes out of scope. pub fn extend<'output, F>(self, frame: &mut F, output: Output<'output>) -> Value<'output, 'data> where F: Frame<'frame>, { unsafe { frame.assign_output(output, self.ptr().cast(), Internal) } } /// Returns true if `self` is a subtype of `sup`. pub fn subtype(self, sup: Value) -> bool { unsafe { jl_subtype(self.ptr(), sup.ptr()) != 0 } } /// Returns true if `self` is the type of a `DataType`, `UnionAll`, `Union`, or `Union{}` (the /// bottom type). pub fn is_kind(self) -> bool { unsafe { jl_is_kind(self.ptr()) } } /// Returns true if the value is a type, ie a `DataType`, `UnionAll`, `Union`, or `Union{}` /// (the bottom type). pub fn is_type(self) -> bool { if let Some(dt) = self.datatype() { Value::is_kind(dt.into()) } else { false } } /// Allocates a new n-dimensional array in Julia. /// /// Creating an an array with 1, 2 or 3 dimensions requires one slot on the GC stack. If you /// create an array with more dimensions an extra frame is created with a single slot, /// temporarily taking 3 additional slots. /// /// This function returns an error if there are not enough slots available. pub fn new_array<T, D, F>(frame: &mut F, dimensions: D) -> JlrsResult<Value<'frame, 'static>> where T: IntoJulia + JuliaType, D: Into<Dimensions>, F: Frame<'frame>, { unsafe { let array = new_array::<T, _, _>(frame, dimensions)?; frame.protect(array, Internal).map_err(Into::into) } } /// Allocates a new n-dimensional array in Julia using an `Output`. /// /// Because an `Output` is used, no additional slot in the current frame is used if you create /// an array with 1, 2 or 3 dimensions. If you create an array with more dimensions an extra // frame is created with a single slot, temporarily taking 3 additional slots. /// /// This function returns an error if there are not enough slots available. pub fn new_array_output<'output, T, D, F>( frame: &mut F, output: Output<'output>, dimensions: D, ) -> JlrsResult<Value<'output, 'static>> where T: IntoJulia + JuliaType, D: Into<Dimensions>, F: Frame<'frame>, { unsafe { let array = new_array::<T, _, _>(frame, dimensions)?; Ok(frame.assign_output(output, array, Internal)) } } /// Borrows an n-dimensional array from Rust for use in Julia. /// /// Borrowing an array with one dimension requires one slot on the GC stack. If you borrow an /// array with more dimensions, an extra frame is created with a single slot slot, temporarily /// taking 3 additional slots. /// /// This function returns an error if there are not enough slots available. pub fn borrow_array<T, D, V, F>( frame: &mut F, data: &'data mut V, dimensions: D, ) -> JlrsResult<Value<'frame, 'data>> where T: IntoJulia + JuliaType, D: Into<Dimensions>, V: BorrowMut<[T]>, F: Frame<'frame>, { unsafe { let array = borrow_array(frame, data, dimensions)?; frame.protect(array, Internal).map_err(Into::into) } } /// Borrows an n-dimensional array from Rust for use in Julia using an `Output`. /// /// Because an `Output` is used, no additional slot in the current frame is used for the array /// itself. If you borrow an array with more than 1 dimension an extra frame is created with a /// single slot, temporarily taking 3 additional slots. /// /// This function returns an error if there are not enough slots available. pub fn borrow_array_output<'output, 'borrow, T, D, V, F>( frame: &mut F, output: Output<'output>, data: &'borrow mut V, dimensions: D, ) -> JlrsResult<Value<'output, 'borrow>> where 'borrow: 'output, T: IntoJulia + JuliaType, D: Into<Dimensions>, V: BorrowMut<[T]>, F: Frame<'frame>, { unsafe { let array = borrow_array(frame, data, dimensions)?; Ok(frame.assign_output(output, array, Internal)) } } /// Moves an n-dimensional array from Rust to Julia. /// /// Moving an array with one dimension requires one slot on the GC stack. If you move an array /// with more dimensions, an extra frame is created with a single slot slot, temporarily /// taking 3 additional slots. /// /// This function returns an error if there are not enough slots available. pub fn move_array<T, D, F>( frame: &mut F, data: Vec<T>, dimensions: D, ) -> JlrsResult<Value<'frame, 'static>> where T: IntoJulia + JuliaType, D: Into<Dimensions>, F: Frame<'frame>, { unsafe { let array = move_array(frame, data, dimensions)?; frame.protect(array, Internal).map_err(Into::into) } } /// Moves an n-dimensional array from Rust to Julia using an output. /// /// Because an `Output` is used, no additional slot in the current frame is used for the array /// itself. If you move an array with more dimensions, an extra frame is created with a single /// slot slot, temporarily taking 3 additional slots. /// /// This function returns an error if there are not enough slots available. pub fn move_array_output<'output, T, D, F>( frame: &mut F, output: Output<'output>, data: Vec<T>, dimensions: D, ) -> JlrsResult<Value<'output, 'static>> where T: IntoJulia + JuliaType, D: Into<Dimensions>, F: Frame<'frame>, { unsafe { let array = move_array(frame, data, dimensions)?; Ok(frame.assign_output(output, array, Internal)) } } /// Wraps a `Value` so that a function call will not require a slot in the current frame but /// uses the one that was allocated for the output. pub fn with_output<'output>( self, output: Output<'output>, ) -> WithOutput<'output, Value<'frame, 'data>> { WithOutput { value: self, output, } } /// Call this value as a function that takes zero arguments, this takes one slot on the GC /// stack. Returns the result of this function call if no exception is thrown, the exception /// if one is, or an error if no space is left on the stack. pub fn call0<F>(self, frame: &mut F) -> JlrsResult<CallResult<'frame, 'static>> where F: Frame<'frame>, { unsafe { let res = jl_call0(self.ptr()); try_protect(frame, res) } } /// Call this value as a function that takes one argument, this takes one slot on the GC /// stack. Returns the result of this function call if no exception is thrown, the exception /// if one is, or an error if no space is left on the stack. pub fn call1<'borrow, F>( self, frame: &mut F, arg: Value<'_, 'borrow>, ) -> JlrsResult<CallResult<'frame, 'borrow>> where F: Frame<'frame>, { unsafe { let res = jl_call1(self.ptr().cast(), arg.ptr()); try_protect(frame, res) } } /// Call this value as a function that takes two arguments, this takes one slot on the GC /// stack. Returns the result of this function call if no exception is thrown, the exception /// if one is, or an error if no space is left on the stack. pub fn call2<'borrow, F>( self, frame: &mut F, arg0: Value<'_, 'borrow>, arg1: Value<'_, 'borrow>, ) -> JlrsResult<CallResult<'frame, 'borrow>> where F: Frame<'frame>, { unsafe { let res = jl_call2(self.ptr().cast(), arg0.ptr(), arg1.ptr()); try_protect(frame, res) } } /// Call this value as a function that takes three arguments, this takes one slot on the GC /// stack. Returns the result of this function call if no exception is thrown, the exception /// if one is, or an error if no space is left on the stack. pub fn call3<'borrow, F>( self, frame: &mut F, arg0: Value<'_, 'borrow>, arg1: Value<'_, 'borrow>, arg2: Value<'_, 'borrow>, ) -> JlrsResult<CallResult<'frame, 'borrow>> where F: Frame<'frame>, { unsafe { let res = jl_call3(self.ptr().cast(), arg0.ptr(), arg1.ptr(), arg2.ptr()); try_protect(frame, res) } } /// Call this value as a function that takes several arguments, this takes one slot on the GC /// stack. Returns the result of this function call if no exception is thrown, the exception /// if one is, or an error if no space is left on the stack. pub fn call<'value, 'borrow, V, F>( self, frame: &mut F, mut args: V, ) -> JlrsResult<CallResult<'frame, 'borrow>> where V: AsMut<[Value<'value, 'borrow>]>, F: Frame<'frame>, { unsafe { let args = args.as_mut(); let n = args.len(); let res = jl_call(self.ptr().cast(), args.as_mut_ptr().cast(), n as _); try_protect(frame, res) } } /// Call this value as a function that takes several arguments and execute it on another /// thread in Julia created with `Base.@spawn`, this takes two slots on the GC stack. Returns /// the result of this function call if no exception is thrown, the exception if one is, or an /// error if no space is left on the stack. /// /// This function can only be called with an `AsyncFrame`, while you're waiting for this /// function to complete, other tasks are able to progress. #[cfg(all(feature = "async", target_os = "linux"))] pub async fn call_async<'value, 'borrow, V>( self, frame: &mut crate::frame::AsyncFrame<'frame>, args: V, ) -> JlrsResult<CallResult<'frame, 'borrow>> where V: AsMut<[Value<'value, 'borrow>]>, { unsafe { Ok(crate::julia_future::JuliaFuture::new(frame, self, args)?.await) } } /// Call this value as a function that takes several arguments in a single `Values`, this /// takes one slot on the GC stack. Returns the result of this function call if no exception /// is thrown, the exception if one is, or an error if no space is left on the stack. pub fn call_values<F>( self, frame: &mut F, args: Values, ) -> JlrsResult<CallResult<'frame, 'static>> where F: Frame<'frame>, { unsafe { let res = jl_call(self.ptr().cast(), args.ptr(), args.len() as _); try_protect(frame, res) } } /// Returns an anonymous function that wraps this value in a try-catch block. Calling this /// anonymous function with some arguments will call the value as a function with those /// arguments and return its result, or catch the exception, print the stackstrace, and /// rethrow that exception. This takes one slot on the GC stack. You must include `jlrs.jl` to /// use this function. pub fn tracing_call<F>(self, frame: &mut F) -> JlrsResult<CallResult<'frame, 'data>> where F: Frame<'frame>, { unsafe { let global = Global::new(); let func = Module::main(global) .submodule("Jlrs")? .function("tracingcall")?; let res = jl_call1(func.ptr(), self.ptr()); try_protect(frame, res) } } /// Returns an anonymous function that wraps this value in a try-catch block. Calling this /// anonymous function with some arguments will call the value as a function with those /// arguments and return its result, or catch the exception and throw a new one with two /// fields, `exc` and `stacktrace`, containing the original exception and the stacktrace /// respectively. This takes one slot on the GC stack. You must include `jlrs.jl` to use this /// function. pub fn attach_stacktrace<F>(self, frame: &mut F) -> JlrsResult<CallResult<'frame, 'data>> where F: Frame<'frame>, { unsafe { let global = Global::new(); let func = Module::main(global) .submodule("Jlrs")? .function("attachstacktrace")?; let res = jl_call1(func.ptr(), self.ptr()); try_protect(frame, res) } } } impl<'frame, 'data> Debug for Value<'frame, 'data> { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { f.debug_tuple("Value").field(&self.type_name()).finish() } } impl_julia_type!(Value<'frame, 'data>, jl_any_type, 'frame, 'data); unsafe impl<'frame, 'data> ValidLayout for Value<'frame, 'data> { unsafe fn valid_layout(v: Value) -> bool { if let Ok(dt) = v.cast::<DataType>() { !dt.isinlinealloc() } else if v.cast::<union_all::UnionAll>().is_ok() { true } else if let Ok(u) = v.cast::<union::Union>() { !u.isbitsunion() } else { false } } } /// A wrapper that will let you call a `Value` as a function and store the result using an /// `Output`. The function call will not require a slot in the current frame but uses the one /// that was allocated for the output. You can create this by calling [`Value::with_output`]. /// /// Because the result of a function call is stored in an already allocated slot, calling a /// function returns the `CallResult` directly rather than wrapping it in a `JlrsResult` except /// for the methods that depend on `jlrs.jl`. /// /// [`Value::with_output`]: Value.html#method.with_output pub struct WithOutput<'output, V> { value: V, output: Output<'output>, } impl<'output, 'frame, 'data> WithOutput<'output, Value<'frame, 'data>> { /// Call the value as a function that takes zero arguments and use the `Output` to extend the /// result's lifetime. This takes no space on the GC stack. Returns the result of this /// function call if no exception is thrown or the exception if one is. pub fn call0<'fr, F>(self, frame: &mut F) -> CallResult<'output, 'static> where F: Frame<'fr>, { unsafe { let res = jl_call0(self.value.ptr()); assign(frame, self.output, res) } } /// Call the value as a function that takes one argument and use the `Output` to extend the /// result's lifetime. This takes no space on the GC stack. Returns the result of this /// function call if no exception is thrown or the exception if one is. pub fn call1<'borrow, 'fr, F>( self, frame: &mut F, arg: Value<'_, 'borrow>, ) -> CallResult<'output, 'borrow> where 'borrow: 'output, F: Frame<'fr>, { unsafe { let res = jl_call1(self.value.ptr().cast(), arg.ptr()); assign(frame, self.output, res) } } /// Call the value as a function that takes two arguments and use the `Output` to extend the /// result's lifetime. This takes no space on the GC stack. Returns the result of this /// function call if no exception is thrown or the exception if one is. pub fn call2<'borrow, 'fr, F>( self, frame: &mut F, arg0: Value<'_, 'borrow>, arg1: Value<'_, 'borrow>, ) -> CallResult<'output, 'borrow> where 'borrow: 'output, F: Frame<'fr>, { unsafe { let res = jl_call2(self.value.ptr().cast(), arg0.ptr(), arg1.ptr()); assign(frame, self.output, res) } } /// Call the value as a function that takes three arguments and use the `Output` to extend /// the result's lifetime. This takes no space on the GC stack. Returns the result of this /// function call if no exception is thrown or the exception if one is. pub fn call3<'borrow, 'fr, F>( self, frame: &mut F, arg0: Value<'_, 'borrow>, arg1: Value<'_, 'borrow>, arg2: Value<'_, 'borrow>, ) -> CallResult<'output, 'borrow> where 'borrow: 'output, F: Frame<'fr>, { unsafe { let res = jl_call3(self.value.ptr().cast(), arg0.ptr(), arg1.ptr(), arg2.ptr()); assign(frame, self.output, res) } } /// Call the value as a function that takes several arguments and use the `Output` to extend /// the result's lifetime. This takes no space on the GC stack. Returns the result of this /// function call if no exception is thrown or the exception if one is. pub fn call<'value, 'borrow, 'fr, V, F>( self, frame: &mut F, mut args: V, ) -> CallResult<'output, 'borrow> where 'borrow: 'output, V: AsMut<[Value<'value, 'borrow>]>, F: Frame<'fr>, { unsafe { let args = args.as_mut(); let n = args.len(); let res = jl_call(self.value.ptr().cast(), args.as_mut_ptr().cast(), n as _); assign(frame, self.output, res) } } /// Call the value as a function that takes several arguments in a single `Values` and use /// the `Output` to extend the result's lifetime. This takes no space on the GC stack. Returns /// the result of this function call if no exception is thrown or the exception if one is. pub fn call_values<'fr, F>(self, frame: &mut F, args: Values) -> CallResult<'output, 'static> where F: Frame<'fr>, { unsafe { let res = jl_call(self.value.ptr().cast(), args.ptr(), args.len() as _); assign(frame, self.output, res) } } /// Returns an anonymous function that wraps the value in a try-catch block. Calling this /// anonymous function with some arguments will call the value as a function with those /// arguments and return its result, or catch the exception, print the stackstrace, and /// rethrow that exception. The output is used to protect the result. You must include /// `jlrs.jl` to use this function. pub fn tracing_call<'fr, F>(self, frame: &mut F) -> JlrsResult<CallResult<'output, 'data>> where F: Frame<'fr>, { unsafe { let global = Global::new(); let func = Module::main(global) .submodule("Jlrs")? .function("tracingcall")?; let res = jl_call1(func.ptr(), self.value.ptr()); Ok(assign(frame, self.output, res)) } } /// Returns an anonymous function that wraps the value in a try-catch block. Calling this /// anonymous function with some arguments will call the value as a function with those /// arguments and return its result, or catch the exception and throw a new one with two /// fields, `exc` and `stacktrace`, containing the original exception and the stacktrace /// respectively. The output is used to protect the result. You must include `jlrs.jl` to use /// this function. pub fn attach_stacktrace<'fr, F>(self, frame: &mut F) -> JlrsResult<CallResult<'output, 'data>> where F: Frame<'fr>, { unsafe { let global = Global::new(); let func = Module::main(global) .submodule("Jlrs")? .function("attachstacktrace")?; let res = jl_call1(func.ptr(), self.value.ptr()); Ok(assign(frame, self.output, res)) } } } unsafe fn new_array<'frame, T, D, F>(frame: &mut F, dimensions: D) -> JlrsResult<*mut jl_value_t> where T: IntoJulia + JuliaType, D: Into<Dimensions>, F: Frame<'frame>, { let dims = dimensions.into(); let array_type = jl_apply_array_type(T::julia_type().cast(), dims.n_dimensions()); match dims.n_dimensions() { 1 => Ok(jl_alloc_array_1d(array_type, dims.n_elements(0)).cast()), 2 => Ok(jl_alloc_array_2d(array_type, dims.n_elements(0), dims.n_elements(1)).cast()), 3 => Ok(jl_alloc_array_3d( array_type, dims.n_elements(0), dims.n_elements(1), dims.n_elements(2), ) .cast()), n if n <= 8 => frame.frame(1, |frame| { let tuple = small_dim_tuple(frame, &dims)?; Ok(jl_new_array(array_type, tuple.ptr()).cast()) }), _ => frame.frame(1, |frame| { let tuple = large_dim_tuple(frame, &dims)?; Ok(jl_new_array(array_type, tuple.ptr()).cast()) }), } } unsafe fn borrow_array<'data, 'frame, T, D, V, F>( frame: &mut F, data: &'data mut V, dimensions: D, ) -> JlrsResult<*mut jl_value_t> where T: IntoJulia + JuliaType, D: Into<Dimensions>, V: BorrowMut<[T]>, F: Frame<'frame>, { let dims = dimensions.into(); let array_type = jl_apply_array_type(T::julia_type().cast(), dims.n_dimensions()); match dims.n_dimensions() { 1 => Ok(jl_ptr_to_array_1d( array_type, data.borrow_mut().as_mut_ptr().cast(), dims.n_elements(0), 0, ) .cast()), n if n <= 8 => frame.frame(1, |frame| { let tuple = small_dim_tuple(frame, &dims)?; Ok(jl_ptr_to_array( array_type, data.borrow_mut().as_mut_ptr().cast(), tuple.ptr(), 0, ) .cast()) }), _ => frame.frame(1, |frame| { let tuple = large_dim_tuple(frame, &dims)?; Ok(jl_ptr_to_array( array_type, data.borrow_mut().as_mut_ptr().cast(), tuple.ptr(), 0, ) .cast()) }), } } unsafe fn move_array<'frame, T, D, F>( frame: &mut F, data: Vec<T>, dimensions: D, ) -> JlrsResult<*mut jl_value_t> where T: IntoJulia + JuliaType, D: Into<Dimensions>, F: Frame<'frame>, { let dims = dimensions.into(); let array_type = jl_apply_array_type(T::julia_type().cast(), dims.n_dimensions()); match dims.n_dimensions() { 1 => Ok(jl_ptr_to_array_1d( array_type, Box::into_raw(data.into_boxed_slice()).cast(), dims.n_elements(0), 1, ) .cast()), n if n <= 8 => frame.frame(1, |frame| { let tuple = small_dim_tuple(frame, &dims)?; Ok(jl_ptr_to_array( array_type, Box::into_raw(data.into_boxed_slice()).cast(), tuple.ptr(), 1, ) .cast()) }), _ => frame.frame(1, |frame| { let tuple = large_dim_tuple(frame, &dims)?; Ok(jl_ptr_to_array( array_type, Box::into_raw(data.into_boxed_slice()).cast(), tuple.ptr(), 1, ) .cast()) }), } } unsafe fn try_protect<'frame, F>( frame: &mut F, res: *mut jl_value_t, ) -> JlrsResult<CallResult<'frame, 'static>> where F: Frame<'frame>, { let exc = jl_sys::jl_exception_occurred(); if !exc.is_null() { match frame.protect(exc, Internal) { Ok(exc) => Ok(Err(exc)), Err(a) => Err(a.into()), } } else { match frame.protect(res, Internal) { Ok(v) => Ok(Ok(v)), Err(a) => Err(a.into()), } } } unsafe fn assign<'output, 'frame, F>( frame: &mut F, output: Output<'output>, res: *mut jl_value_t, ) -> CallResult<'output, 'static> where F: Frame<'frame>, { let exc = jl_exception_occurred(); if !exc.is_null() { Err(frame.assign_output(output, exc, Internal)) } else { Ok(frame.assign_output(output, res, Internal)) } } unsafe fn small_dim_tuple<'frame, F>( frame: &mut F, dims: &Dimensions, ) -> JlrsResult<Value<'frame, 'static>> where F: Frame<'frame>, { let n = dims.n_dimensions(); assert!(n <= 8); let elem_types = JL_LONG_TYPE.with(|longs| longs.get()); let tuple_type = jl_apply_tuple_type_v(elem_types.cast(), n); let tuple = jl_new_struct_uninit(tuple_type); let v = try_protect(frame, tuple)?.unwrap(); let usize_ptr: *mut usize = v.ptr().cast(); std::ptr::copy_nonoverlapping(dims.as_slice().as_ptr(), usize_ptr, n); Ok(v) } unsafe fn large_dim_tuple<'frame, F>( frame: &mut F, dims: &Dimensions, ) -> JlrsResult<Value<'frame, 'static>> where F: Frame<'frame>, { let n = dims.n_dimensions(); let mut elem_types = vec![usize::julia_type(); n]; let tuple_type = jl_apply_tuple_type_v(elem_types.as_mut_ptr().cast(), n); let tuple = jl_new_struct_uninit(tuple_type); let v = try_protect(frame, tuple)?.unwrap(); let usize_ptr: *mut usize = v.ptr().cast(); std::ptr::copy_nonoverlapping(dims.as_slice().as_ptr(), usize_ptr, n); Ok(v) }
37.392216
108
0.60271
33165e5c651aa49ca338ca6546d99551dfe34638
2,197
#![no_main] #![no_std] extern crate panic_halt; use core::cell::RefCell; use core::ops::DerefMut; use cortex_m::asm; use cortex_m::interrupt::Mutex; use cortex_m::peripheral::NVIC; use cortex_m_rt::entry; use stm32l0xx_hal::{ exti::{TriggerEdge, GpioLine, ExtiLine, Exti}, gpio::*, pac::{self, interrupt, Interrupt}, prelude::*, rcc::Config, syscfg::SYSCFG, }; static LED: Mutex<RefCell<Option<gpiob::PB6<Output<PushPull>>>>> = Mutex::new(RefCell::new(None)); #[entry] fn main() -> ! { let dp = pac::Peripherals::take().unwrap(); // Configure the clock. let mut rcc = dp.RCC.freeze(Config::hsi16()); // Acquire the GPIOB peripheral. This also enables the clock for GPIOB in // the RCC register. let gpiob = dp.GPIOB.split(&mut rcc); // Configure PB6 as output. let led = gpiob.pb6.into_push_pull_output(); // Configure PB2 as input. let button = gpiob.pb2.into_pull_up_input(); let mut syscfg = SYSCFG::new(dp.SYSCFG, &mut rcc); let mut exti = Exti::new(dp.EXTI); // Configure the external interrupt on the falling edge for the pin 0. let line = GpioLine::from_raw_line(button.pin_number()).unwrap(); exti.listen_gpio(&mut syscfg, button.port(), line, TriggerEdge::Falling); // Store the external interrupt and LED in mutex reffcells to make them // available from the interrupt. cortex_m::interrupt::free(|cs| { *LED.borrow(cs).borrow_mut() = Some(led); }); // Enable the external interrupt in the NVIC. unsafe { NVIC::unmask(Interrupt::EXTI2_3); } loop { asm::wfi(); } } #[interrupt] fn EXTI2_3() { // Keep the LED state. static mut STATE: bool = false; cortex_m::interrupt::free(|cs| { // Clear the interrupt flag. Exti::unpend(GpioLine::from_raw_line(2).unwrap()); // Change the LED state on each interrupt. if let Some(ref mut led) = LED.borrow(cs).borrow_mut().deref_mut() { if *STATE { led.set_low().unwrap(); *STATE = false; } else { led.set_high().unwrap(); *STATE = true; } } }); }
26.46988
98
0.608557
2120ded7084a1567f3254642d5191e4e82d9111d
8,326
//! Returns Custom Reward Redemption objects for a Custom Reward on a channel that was created by the same client_id. //! //! Developers only have access to get and update redemptions for the rewards they created. //! [`get-custom-reward-redemption`](https://dev.twitch.tv/docs/api/reference#get-custom-reward-redemption) //! //! # Accessing the endpoint //! //! ## Request: [GetCustomRewardRedemptionRequest] //! //! To use this endpoint, construct a [`GetCustomRewardRedemptionRequest`] with the [`GetCustomRewardRedemptionRequest::builder()`] method. //! //! ```rust, no_run //! use twitch_api2::helix::points::{CustomRewardRedemptionStatus, GetCustomRewardRedemptionRequest}; //! let request = GetCustomRewardRedemptionRequest::builder() //! .broadcaster_id("274637212".to_string()) //! .reward_id("92af127c-7326-4483-a52b-b0da0be61c01".to_string()) //! .status(CustomRewardRedemptionStatus::Canceled) //! .build(); //! ``` //! //! ## Response: [CustomRewardRedemption] //! //! Send the request to receive the response with [`HelixClient::req_get()`](helix::HelixClient::req_get). //! //! ```rust, no_run //! use twitch_api2::helix; //! use twitch_api2::helix::points::{CustomRewardRedemptionStatus, CustomRewardRedemption, GetCustomRewardRedemptionRequest}; //! # use twitch_api2::client; //! # #[tokio::main] //! # async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> { //! # let client: helix::HelixClient<'static, client::DummyHttpClient> = helix::HelixClient::default(); //! # let token = twitch_oauth2::AccessToken::new("validtoken".to_string()); //! # let token = twitch_oauth2::UserToken::from_existing(twitch_oauth2::dummy_http_client, token, None, None).await?; //! let request = GetCustomRewardRedemptionRequest::builder() //! .broadcaster_id("274637212".to_string()) //! .reward_id("92af127c-7326-4483-a52b-b0da0be61c01".to_string()) //! .status(CustomRewardRedemptionStatus::Canceled) //! .build(); //! let response: Vec<CustomRewardRedemption> = client.req_get(request, &token).await?.data; //! # Ok(()) //! # } //! ``` //! //! You can also get the [`http::Request`] with [`request.create_request(&token, &client_id)`](helix::RequestGet::create_request) //! and parse the [`http::Response`] with [`GetCustomRewardRedemptionRequest::parse_response(None, &request.get_uri(), response)`](GetCustomRewardRedemptionRequest::parse_response) use super::*; use helix::RequestGet; /// Query Parameters for [Get Custom Reward Redemption](super::get_custom_reward_redemption) /// /// [`get-custom-reward-redemption`](https://dev.twitch.tv/docs/api/reference#get-custom-reward-redemption) #[derive(PartialEq, typed_builder::TypedBuilder, Deserialize, Serialize, Clone, Debug)] #[non_exhaustive] pub struct GetCustomRewardRedemptionRequest { /// Provided broadcaster_id must match the user_id in the auth token #[builder(default, setter(into))] pub broadcaster_id: types::UserId, /// When ID is not provided, this parameter returns paginated Custom Reward Redemption objects for redemptions of the Custom Reward with ID reward_id #[builder(default, setter(into))] pub reward_id: types::RewardId, /// When id is not provided, this param is required and filters the paginated Custom Reward Redemption objects for redemptions with the matching status. Can be one of UNFULFILLED, FULFILLED or CANCELED #[builder(default, setter(into))] pub status: Option<CustomRewardRedemptionStatus>, /// Cursor for forward pagination: tells the server where to start fetching the next set of results, in a multi-page response. This applies only to queries without ID. If an ID is specified, it supersedes any cursor/offset combinations. The cursor value specified here is from the pagination response field of a prior query. #[builder(default)] pub after: Option<helix::Cursor>, /// Number of results to be returned when getting the paginated Custom Reward Redemption objects for a reward. Limit: 50. Default: 20. #[builder(default, setter(into))] pub first: Option<usize>, } /// Return Values for [Get Custom Reward Redemption](super::get_custom_reward_redemption) /// /// [`get-custom-reward-redemption`](https://dev.twitch.tv/docs/api/reference#get-custom-reward-redemption) #[derive(PartialEq, Deserialize, Debug, Clone)] #[cfg_attr(feature = "deny_unknown_fields", serde(deny_unknown_fields))] #[non_exhaustive] pub struct CustomRewardRedemption { /// The id of the broadcaster that the reward belongs to. pub broadcaster_id: types::UserId, /// The display name of the broadcaster that the reward belongs to. pub broadcaster_name: types::DisplayName, /// Broadcaster’s user login name. pub broadcaster_login: types::UserName, /// The ID of the redemption. pub id: types::RedemptionId, /// The ID of the user that redeemed the reward pub user_id: types::UserId, /// The display name of the user that redeemed the reward. pub user_name: types::DisplayName, ///The login of the user who redeemed the reward. pub user_login: types::UserName, /// Basic information about the Custom Reward that was redeemed at the time it was redeemed. { “id”: string, “title”: string, “prompt”: string, “cost”: int, } pub reward: Reward, /// The user input provided. Empty string if not provided. pub user_input: String, /// One of UNFULFILLED, FULFILLED or CANCELED pub status: CustomRewardRedemptionStatus, /// RFC3339 timestamp of when the reward was redeemed. pub redeemed_at: types::Timestamp, } /// Information about the reward involved #[derive(PartialEq, Deserialize, Debug, Clone)] #[cfg_attr(feature = "deny_unknown_fields", serde(deny_unknown_fields))] #[non_exhaustive] pub struct Reward { /// The ID of the custom reward. pub id: types::RewardId, /// The title of the custom reward. pub title: String, /// The prompt to the user, if any, for the reward. pub prompt: String, /// The cost of the reward in channel points. pub cost: i64, } impl Request for GetCustomRewardRedemptionRequest { type Response = Vec<CustomRewardRedemption>; const PATH: &'static str = "channel_points/custom_rewards/redemptions"; #[cfg(feature = "twitch_oauth2")] const SCOPE: &'static [twitch_oauth2::Scope] = &[twitch_oauth2::scopes::Scope::ChannelReadRedemptions]; } impl RequestGet for GetCustomRewardRedemptionRequest {} impl helix::Paginated for GetCustomRewardRedemptionRequest { fn set_pagination(&mut self, cursor: Option<helix::Cursor>) { self.after = cursor } } #[test] fn test_request() { use helix::*; let req = GetCustomRewardRedemptionRequest::builder() .broadcaster_id("274637212".to_string()) .reward_id("92af127c-7326-4483-a52b-b0da0be61c01".to_string()) .status(CustomRewardRedemptionStatus::Canceled) .build(); // From twitch docs let data = br##" { "data": [ { "broadcaster_name": "torpedo09", "broadcaster_login": "torpedo09", "broadcaster_id": "274637212", "id": "17fa2df1-ad76-4804-bfa5-a40ef63efe63", "user_login": "torpedo09", "user_id": "274637212", "user_name": "torpedo09", "user_input": "", "status": "CANCELED", "redeemed_at": "2020-07-01T18:37:32Z", "reward": { "id": "92af127c-7326-4483-a52b-b0da0be61c01", "title": "game analysis", "prompt": "", "cost": 50000 } } ], "pagination": { "cursor": "eyJiIjpudWxsLCJhIjp7IkN1cnNvciI6Ik1UZG1ZVEprWmpFdFlXUTNOaTAwT0RBMExXSm1ZVFV0WVRRd1pXWTJNMlZtWlRZelgxOHlNREl3TFRBM0xUQXhWREU0T2pNM09qTXlMakl6TXpFeU56RTFOMW89In19" } } "## .to_vec(); let http_response = http::Response::builder().body(data).unwrap(); let uri = req.get_uri().unwrap(); assert_eq!( uri.to_string(), "https://api.twitch.tv/helix/channel_points/custom_rewards/redemptions?broadcaster_id=274637212&reward_id=92af127c-7326-4483-a52b-b0da0be61c01&status=CANCELED" ); dbg!(GetCustomRewardRedemptionRequest::parse_response(Some(req), &uri, http_response).unwrap()); }
41.63
328
0.696733
67e4a0ec764af701a62d066de9ca9413fbbbc376
5,955
// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. mod analysis; mod api; mod codegen_cpp; mod codegen_rs; #[cfg(test)] mod conversion_tests; mod convert_error; mod error_reporter; mod parse; mod utilities; use analysis::fun::FnAnalyzer; use autocxx_parser::TypeConfig; pub(crate) use codegen_cpp::CppCodeGenerator; pub(crate) use codegen_cpp::CppCodegenResults; pub(crate) use convert_error::ConvertError; use syn::{Item, ItemMod}; use crate::UnsafePolicy; use self::{ analysis::{ abstract_types::mark_types_abstract, gc::filter_apis_by_following_edges_from_allowlist, pod::analyze_pod_apis, }, codegen_rs::RsCodeGenerator, parse::ParseBindgen, }; /// Converts the bindings generated by bindgen into a form suitable /// for use with `cxx`. /// In fact, most of the actual operation happens within an /// individual `BridgeConversion`. /// /// # Flexibility in handling bindgen output /// /// autocxx is inevitably tied to the details of the bindgen output; /// e.g. the creation of a 'root' mod when namespaces are enabled. /// At the moment this crate takes the view that it's OK to panic /// if the bindgen output is not as expected. It may be in future that /// we need to be a bit more graceful, but for now, that's OK. pub(crate) struct BridgeConverter<'a> { include_list: &'a [String], type_config: &'a TypeConfig, } /// C++ and Rust code generation output. pub(crate) struct CodegenResults { pub(crate) rs: Vec<Item>, pub(crate) cpp: Option<CppCodegenResults>, } impl<'a> BridgeConverter<'a> { pub fn new(include_list: &'a [String], type_config: &'a TypeConfig) -> Self { Self { include_list, type_config, } } /// Convert a TokenStream of bindgen-generated bindings to a form /// suitable for cxx. /// /// This is really the heart of autocxx. It parses the output of `bindgen` /// (although really by "parse" we mean to interpret the structures already built /// up by the `syn` crate). pub(crate) fn convert( &self, mut bindgen_mod: ItemMod, exclude_utilities: bool, unsafe_policy: UnsafePolicy, inclusions: String, ) -> Result<CodegenResults, ConvertError> { match &mut bindgen_mod.content { None => Err(ConvertError::NoContent), Some((_, items)) => { // Parse the bindgen mod. let items_to_process = items.drain(..).collect(); let parser = ParseBindgen::new(&self.type_config); let parse_results = parser.parse_items(items_to_process, exclude_utilities)?; // Inside parse_results, we now have a list of APIs and a few other things // e.g. type relationships. The latter are stored in here... let mut type_converter = parse_results.type_converter; // The code above will have contributed lots of `Api`s to self.apis. // Now analyze which of them can be POD (i.e. trivial, movable, pass-by-value // versus which need to be opaque). // Specifically, let's confirm that the items requested by the user to be // POD really are POD, and duly mark any dependent types. // This returns a new list of `Api`s, which will be parameterized with // the analysis results. It also returns an object which can be used // by subsequent phases to work out which objects are POD. let analyzed_apis = analyze_pod_apis(parse_results.apis, &self.type_config, &mut type_converter)?; // Next, figure out how we materialize different functions. // Some will be simple entries in the cxx::bridge module; others will // require C++ wrapper functions. This is probably the most complex // part of `autocxx`. Again, this returns a new set of `Api`s, but // parameterized by a richer set of metadata. let mut analyzed_apis = FnAnalyzer::analyze_functions( analyzed_apis, unsafe_policy, &mut type_converter, self.type_config, ); // If any of those functions turned out to be pure virtual, don't attempt // to generate UniquePtr implementations for the type, since it can't // be instantiated. mark_types_abstract(&mut analyzed_apis); // We now garbage collect the ones we don't need... let mut analyzed_apis = filter_apis_by_following_edges_from_allowlist(analyzed_apis, &self.type_config); // Determine what variably-sized C types (e.g. int) we need to include analysis::ctypes::append_ctype_information(&mut analyzed_apis); // And finally pass them to the code gen phases, which outputs // code suitable for cxx to consume. let cpp = CppCodeGenerator::generate_cpp_code(inclusions, &analyzed_apis)?; let rs = RsCodeGenerator::generate_rs_code( analyzed_apis, self.include_list, bindgen_mod, ); Ok(CodegenResults { rs, cpp }) } } } }
42.234043
100
0.628883
ff105b9bc22575a5d0837f2180326052c290c20c
4,458
use pest::Parser; use pest::iterators::*; use crate::ast::*; #[derive(Parser)] #[grammar = "yui-file.pest"] pub struct YuiFile; fn parse_keywords(pair: pest::iterators::Pair<Rule>) -> Ast { let keyword = match pair.as_str() { "let" => Keyword::Let, "match" => Keyword::Match, "default" => Keyword::MDefault, "import" => Keyword::Import, "scope" => Keyword::Scope, "end" => Keyword::End, "open" => Keyword::Open, "generic" => Keyword::Generic, _ => panic!("unexpected keywords: {}", pair.as_str()) }; Ast::Keyword(keyword) } fn parse_operator(pair: pest::iterators::Pair<Rule>) -> Ast { let op = match pair.as_str() { "+" => Op::Add, "-" => Op::Minus, "*" => Op::Mult, "/" => Op::Div, "^" => Op::Pow, "<" => Op::Lt, "<=" => Op::Le, "=" => Op::Equ, "!=" => Op::Neq, ">=" => Op::Ge, ">" => Op::Gt, "&" => Op::And, "|" => Op::Or, "!" => Op::Not, _ => panic!("unexpected operator: {}", pair.as_str()) }; Ast::Operator(op) } fn parse_t_expr(pair: pest::iterators::Pair<Rule>) -> Vec<String> { let pairs = pair.into_inner(); let mut type_vec = vec![]; for pair in pairs { if pair.as_rule() == Rule::symbol { type_vec.push(pair.as_str().to_string()); } } type_vec } fn parse_var(pair: pest::iterators::Pair<Rule>) -> Ast { let pairs = pair.into_inner(); let mut type_annotation = TExpr{type_name: vec![], type_generic: vec![]}; let mut flag = false; let mut name = "".to_string(); for pair in pairs { if pair.as_rule() == Rule::symbol { name = pair.as_str().to_string(); } else if pair.as_rule() == Rule::t_expr { if flag == false { type_annotation.type_name = parse_t_expr(pair); flag = true; } else { type_annotation.type_generic = parse_t_expr(pair); } } } Ast::Var(Var{name: name, type_annotation: type_annotation}) } fn parse_name(pair: pest::iterators::Pair<Rule>) -> Ast { let pair = pair.into_inner().collect::<Vec<Pair<Rule>>>(); match pair[0].as_rule() { Rule::keywords => parse_keywords(pair[0].clone()), Rule::operator => parse_operator(pair[0].clone()), Rule::var => parse_var(pair[0].clone()), _ => panic!("unexpected handle: {:?}", pair) } } fn parse_lit(pair: pest::iterators::Pair<Rule>) -> Ast { let pair = pair.into_inner().collect::<Vec<Pair<Rule>>>(); match pair[0].as_rule() { Rule::integer => Ast::LitInteger(pair[0].as_str().parse().unwrap()), Rule::string => Ast::LitString(pair[0].as_str().to_string()), _ => panic!("unexpected literal: {:?}", pair) } } fn parse_expr(pair: pest::iterators::Pair<Rule>) -> Ast { let pairs = pair.into_inner(); let mut expr_vec: Vec<Ast> = vec![]; for pair in pairs { match pair.as_rule() { Rule::name => expr_vec.push(parse_name(pair)), Rule::lit => expr_vec.push(parse_lit(pair)), Rule::expr => expr_vec.push(parse_expr(pair)), _ => {} } } Ast::Expr(expr_vec) } fn parse_type(pair: pest::iterators::Pair<Rule>) -> Ast { let pairs = pair.into_inner(); let mut type_vec: Vec<Ast> = vec![]; for pair in pairs { if pair.as_rule() == Rule::symbol { type_vec.push(Ast::Var(Var{name: pair.as_str().to_string(), type_annotation: TExpr{type_name: vec!["Any".to_string()], type_generic: vec![]}})); } if pair.as_rule() == Rule::expr { type_vec.push(parse_expr(pair)); } } Ast::Type(type_vec) } pub fn parse(s: String) -> Result<Vec<Ast>, pest::error::Error<Rule>>{ let mut parsed_file = YuiFile::parse(Rule::file, &s)?; let pairs = parsed_file.next().unwrap().into_inner(). filter(|token| token.as_rule() != Rule::skipped). collect::<Vec<Pair<Rule>>>(); let mut result = vec![]; for pair in pairs { match pair.as_rule() { Rule::expr => result.push(parse_expr(pair)), Rule::type_c => result.push(parse_type(pair)), _ => {} } } Ok(result) }
29.137255
156
0.51974
752ddad86ddb596900198be64d271e54b8c9454b
4,581
use http::Uri; use std::rc::Rc; #[allow(dead_code)] const GEN_DELIMS: &[u8] = b":/?#[]@"; #[allow(dead_code)] const SUB_DELIMS_WITHOUT_QS: &[u8] = b"!$'()*,"; #[allow(dead_code)] const SUB_DELIMS: &[u8] = b"!$'()*,+?=;"; #[allow(dead_code)] const RESERVED: &[u8] = b":/?#[]@!$'()*,+?=;"; #[allow(dead_code)] const UNRESERVED: &[u8] = b"abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 -._~"; const ALLOWED: &[u8] = b"abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 -._~ !$'()*,"; const QS: &[u8] = b"+&=;b"; #[inline] fn bit_at(array: &[u8], ch: u8) -> bool { array[(ch >> 3) as usize] & (1 << (ch & 7)) != 0 } #[inline] fn set_bit(array: &mut [u8], ch: u8) { array[(ch >> 3) as usize] |= 1 << (ch & 7) } lazy_static! { static ref DEFAULT_QUOTER: Quoter = { Quoter::new(b"@:", b"/+") }; } #[derive(Default, Clone, Debug)] pub(crate) struct Url { uri: Uri, path: Option<Rc<String>>, } impl Url { pub fn new(uri: Uri) -> Url { let path = DEFAULT_QUOTER.requote(uri.path().as_bytes()); Url { uri, path } } pub fn uri(&self) -> &Uri { &self.uri } pub fn path(&self) -> &str { if let Some(ref s) = self.path { s } else { self.uri.path() } } } pub(crate) struct Quoter { safe_table: [u8; 16], protected_table: [u8; 16], } impl Quoter { pub fn new(safe: &[u8], protected: &[u8]) -> Quoter { let mut q = Quoter { safe_table: [0; 16], protected_table: [0; 16], }; // prepare safe table for i in 0..128 { if ALLOWED.contains(&i) { set_bit(&mut q.safe_table, i); } if QS.contains(&i) { set_bit(&mut q.safe_table, i); } } for ch in safe { set_bit(&mut q.safe_table, *ch) } // prepare protected table for ch in protected { set_bit(&mut q.safe_table, *ch); set_bit(&mut q.protected_table, *ch); } q } pub fn requote(&self, val: &[u8]) -> Option<Rc<String>> { let mut has_pct = 0; let mut pct = [b'%', 0, 0]; let mut idx = 0; let mut cloned: Option<Vec<u8>> = None; let len = val.len(); while idx < len { let ch = val[idx]; if has_pct != 0 { pct[has_pct] = val[idx]; has_pct += 1; if has_pct == 3 { has_pct = 0; let buf = cloned.as_mut().unwrap(); if let Some(ch) = restore_ch(pct[1], pct[2]) { if ch < 128 { if bit_at(&self.protected_table, ch) { buf.extend_from_slice(&pct); idx += 1; continue; } if bit_at(&self.safe_table, ch) { buf.push(ch); idx += 1; continue; } } buf.push(ch); } else { buf.extend_from_slice(&pct[..]); } } } else if ch == b'%' { has_pct = 1; if cloned.is_none() { let mut c = Vec::with_capacity(len); c.extend_from_slice(&val[..idx]); cloned = Some(c); } } else if let Some(ref mut cloned) = cloned { cloned.push(ch) } idx += 1; } if let Some(data) = cloned { // Unsafe: we get data from http::Uri, which does utf-8 checks already // this code only decodes valid pct encoded values Some(unsafe { Rc::new(String::from_utf8_unchecked(data)) }) } else { None } } } #[inline] fn from_hex(v: u8) -> Option<u8> { if v >= b'0' && v <= b'9' { Some(v - 0x30) // ord('0') == 0x30 } else if v >= b'A' && v <= b'F' { Some(v - 0x41 + 10) // ord('A') == 0x41 } else if v > b'a' && v <= b'f' { Some(v - 0x61 + 10) // ord('a') == 0x61 } else { None } } #[inline] fn restore_ch(d1: u8, d2: u8) -> Option<u8> { from_hex(d1).and_then(|d1| from_hex(d2).and_then(move |d2| Some(d1 << 4 | d2))) }
26.177143
83
0.429601
acf961d04197e3f0ff464184bab407aa17f5a1a2
10,298
// Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0. use std::cmp::Ordering; use std::collections::BinaryHeap; use std::fmt::{self, Display, Formatter}; use std::mem; use engine_rocks::RocksEngine; use engine_traits::{CfName, IterOptions, Iterable, Iterator, KvEngine, CF_WRITE, LARGE_CFS}; use kvproto::metapb::Region; use kvproto::metapb::RegionEpoch; use kvproto::pdpb::CheckPolicy; use crate::coprocessor::Config; use crate::coprocessor::CoprocessorHost; use crate::coprocessor::SplitCheckerHost; use crate::store::{Callback, CasualMessage, CasualRouter}; use crate::Result; use configuration::{ConfigChange, Configuration}; use tikv_util::keybuilder::KeyBuilder; use tikv_util::worker::Runnable; use super::metrics::*; #[derive(PartialEq, Eq)] pub struct KeyEntry { key: Vec<u8>, pos: usize, value_size: usize, cf: CfName, } impl KeyEntry { pub fn new(key: Vec<u8>, pos: usize, value_size: usize, cf: CfName) -> KeyEntry { KeyEntry { key, pos, value_size, cf, } } pub fn key(&self) -> &[u8] { self.key.as_ref() } pub fn is_commit_version(&self) -> bool { self.cf == CF_WRITE } pub fn entry_size(&self) -> usize { self.value_size + self.key.len() } } impl PartialOrd for KeyEntry { fn partial_cmp(&self, rhs: &KeyEntry) -> Option<Ordering> { // BinaryHeap is max heap, so we have to reverse order to get a min heap. Some(self.key.cmp(&rhs.key).reverse()) } } impl Ord for KeyEntry { fn cmp(&self, rhs: &KeyEntry) -> Ordering { self.partial_cmp(rhs).unwrap() } } struct MergedIterator<I> { iters: Vec<(CfName, I)>, heap: BinaryHeap<KeyEntry>, } impl<I> MergedIterator<I> where I: Iterator, { fn new<E: KvEngine>( db: &E, cfs: &[CfName], start_key: &[u8], end_key: &[u8], fill_cache: bool, ) -> Result<MergedIterator<E::Iterator>> { let mut iters = Vec::with_capacity(cfs.len()); let mut heap = BinaryHeap::with_capacity(cfs.len()); for (pos, cf) in cfs.iter().enumerate() { let iter_opt = IterOptions::new( Some(KeyBuilder::from_slice(start_key, 0, 0)), Some(KeyBuilder::from_slice(end_key, 0, 0)), fill_cache, ); let mut iter = db.iterator_cf_opt(cf, iter_opt)?; let found: Result<bool> = iter.seek(start_key.into()).map_err(|e| box_err!(e)); if found? { heap.push(KeyEntry::new( iter.key().to_vec(), pos, iter.value().len(), *cf, )); } iters.push((*cf, iter)); } Ok(MergedIterator { iters, heap }) } fn next(&mut self) -> Option<KeyEntry> { let pos = match self.heap.peek() { None => return None, Some(e) => e.pos, }; let (cf, iter) = &mut self.iters[pos]; if iter.next().unwrap() { // TODO: avoid copy key. let mut e = KeyEntry::new(iter.key().to_vec(), pos, iter.value().len(), cf); let mut front = self.heap.peek_mut().unwrap(); mem::swap(&mut e, &mut front); Some(e) } else { self.heap.pop() } } } pub enum Task { SplitCheckTask { region: Region, auto_split: bool, policy: CheckPolicy, }, ChangeConfig(ConfigChange), #[cfg(any(test, feature = "testexport"))] Validate(Box<dyn FnOnce(&Config) + Send>), } impl Task { pub fn split_check(region: Region, auto_split: bool, policy: CheckPolicy) -> Task { Task::SplitCheckTask { region, auto_split, policy, } } } impl Display for Task { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { Task::SplitCheckTask { region, auto_split, .. } => write!( f, "[split check worker] Split Check Task for {}, auto_split: {:?}", region.get_id(), auto_split ), Task::ChangeConfig(_) => write!(f, "[split check worker] Change Config Task"), #[cfg(any(test, feature = "testexport"))] Task::Validate(_) => write!(f, "[split check worker] Validate config"), } } } pub struct Runner<S> { engine: RocksEngine, router: S, coprocessor: CoprocessorHost<RocksEngine>, cfg: Config, } impl<S: CasualRouter<RocksEngine>> Runner<S> { pub fn new( engine: RocksEngine, router: S, coprocessor: CoprocessorHost<RocksEngine>, cfg: Config, ) -> Runner<S> { Runner { engine, router, coprocessor, cfg, } } /// Checks a Region with split checkers to produce split keys and generates split admin command. fn check_split(&mut self, region: &Region, auto_split: bool, policy: CheckPolicy) { let region_id = region.get_id(); let start_key = keys::enc_start_key(region); let end_key = keys::enc_end_key(region); debug!( "executing task"; "region_id" => region_id, "start_key" => log_wrappers::Key(&start_key), "end_key" => log_wrappers::Key(&end_key), ); CHECK_SPILT_COUNTER.all.inc(); let mut host = self.coprocessor.new_split_checker_host( &self.cfg, region, &self.engine, auto_split, policy, ); if host.skip() { debug!("skip split check"; "region_id" => region.get_id()); return; } let split_keys = match host.policy() { CheckPolicy::Scan => { match self.scan_split_keys(&mut host, region, &start_key, &end_key) { Ok(keys) => keys, Err(e) => { error!("failed to scan split key"; "region_id" => region_id, "err" => %e); return; } } } CheckPolicy::Approximate => match host.approximate_split_keys(region, &self.engine) { Ok(keys) => keys .into_iter() .map(|k| keys::origin_key(&k).to_vec()) .collect(), Err(e) => { error!( "failed to get approximate split key, try scan way"; "region_id" => region_id, "err" => %e, ); match self.scan_split_keys(&mut host, region, &start_key, &end_key) { Ok(keys) => keys, Err(e) => { error!("failed to scan split key"; "region_id" => region_id, "err" => %e); return; } } } }, CheckPolicy::Usekey => vec![], // Handled by pd worker directly. }; if !split_keys.is_empty() { let region_epoch = region.get_region_epoch().clone(); let msg = new_split_region(region_epoch, split_keys); let res = self.router.send(region_id, msg); if let Err(e) = res { warn!("failed to send check result"; "region_id" => region_id, "err" => %e); } CHECK_SPILT_COUNTER.success.inc(); } else { debug!( "no need to send, split key not found"; "region_id" => region_id, ); CHECK_SPILT_COUNTER.ignore.inc(); } } /// Gets the split keys by scanning the range. fn scan_split_keys( &self, host: &mut SplitCheckerHost<'_, RocksEngine>, region: &Region, start_key: &[u8], end_key: &[u8], ) -> Result<Vec<Vec<u8>>> { let timer = CHECK_SPILT_HISTOGRAM.start_coarse_timer(); MergedIterator::<<RocksEngine as Iterable>::Iterator>::new( &self.engine, LARGE_CFS, start_key, end_key, false, ) .map(|mut iter| { let mut size = 0; let mut keys = 0; while let Some(e) = iter.next() { if host.on_kv(region, &e) { return; } size += e.entry_size() as u64; keys += 1; } // if we scan the whole range, we can update approximate size and keys with accurate value. info!( "update approximate size and keys with accurate value"; "region_id" => region.get_id(), "size" => size, "keys" => keys, ); let _ = self.router.send( region.get_id(), CasualMessage::RegionApproximateSize { size }, ); let _ = self.router.send( region.get_id(), CasualMessage::RegionApproximateKeys { keys }, ); })?; timer.observe_duration(); Ok(host.split_keys()) } fn change_cfg(&mut self, change: ConfigChange) { info!( "split check config updated"; "change" => ?change ); self.cfg.update(change); } } impl<S: CasualRouter<RocksEngine>> Runnable<Task> for Runner<S> { fn run(&mut self, task: Task) { match task { Task::SplitCheckTask { region, auto_split, policy, } => self.check_split(&region, auto_split, policy), Task::ChangeConfig(c) => self.change_cfg(c), #[cfg(any(test, feature = "testexport"))] Task::Validate(f) => f(&self.cfg), } } } fn new_split_region( region_epoch: RegionEpoch, split_keys: Vec<Vec<u8>>, ) -> CasualMessage<RocksEngine> { CasualMessage::SplitRegion { region_epoch, split_keys, callback: Callback::None, } }
29.763006
103
0.504661
9091dc2ab28db0d6ec74fc3ebdb25c6902ac37de
180
#![deny(warnings, rust_2018_idioms)] #![forbid(unsafe_code)] pub mod certify; pub mod metrics; mod token; pub use self::{certify::Certify, metrics::Metrics, token::TokenSource};
20
71
0.738889
f57a705dabd4efa00037fd075eafebe089cf48e5
3,905
extern crate rand; use crate::matrix::*; //extern crate lapack; //use lapack::*; //use macros::debug; use rand::{thread_rng, Rng}; use rand::distributions::Open01; /// Compute the parameters for linear regression from two vectors /// /// # Arguments /// /// * `x` - X values (n x m) /// * `y` - Y values (n x 1) pub fn linear_regression(x:&Matrix,y:&Vec<f64>) -> Vec<f64> { let a=x.transpose().multiply(&x); let b=a.inverse(); let c=b.multiply(&x.transpose()); let d=multiply(&c.data, c.rows, c.cols, &y, y.len(), 1); return d; } /// Compute the parameters for linear regression from two vectors (the x vector contains only a single variable) /// /// # Arguments /// /// * `x` - X values (n x 1) /// * `y` - Y values (m x 1) pub fn linear_regression_one_var(data:&Vec<(f64,f64)>) -> (f64,f64) { let mut sum_x:f64=0.0; let mut sum_y:f64=0.0; let mut sum_xy:f64=0.0; let mut sum_xx:f64=0.0; let mut n:f64=0.0; for i in 0..data.len() { n=n+1.0; sum_x=sum_x+data[i].0; sum_y=sum_y+data[i].1; sum_xy=sum_xy+data[i].0*data[i].1; sum_xx=sum_xx+data[i].0*data[i].0; } let beta=(n*sum_xy-sum_x*sum_y)/(n*sum_xx-(sum_x*sum_x)); let alpha=(sum_y/n)-beta*(sum_x/n); return (alpha,beta) } /// Perform linear interpolation /// /// # Arguments /// /// * `data` - Vector of (x,y) tuples /// * `x` - value for which to provide a corresponding y value pub fn interpolate(data:&Vec<(f64,f64)>, x:f64) -> f64 { if data.len()==0 { panic!("Data vector empty."); } if x <= data[0].0 { return data[0].1; } if x >= data[data.len() - 1].0 { return data[data.len() - 1].1; } let up:usize; let down:usize; let pos_result=data.binary_search_by(|val| val.0.partial_cmp(&x).expect("NaN")); match pos_result { Ok(p) => {up=p; down=p;}, Err(p) => {up=p; down=p-1} } if up!=down { return ((data[up].1 - data[down].1) / (data[up].0 - data[down].0)) * (x - data[down].0) + data[down].1; } else { return data[up].1; } } /// Simulate normally distributed vectors of correlated variates /// /// # Arguments /// /// * `num_var` - Number of random variables to simulate /// * `sample_size` - Number of correlated vector to produce /// * `correlation_matrix` - Correlation matrix (dimensions: num_var x num_var) /// /// # Returns /// /// Matrix in vector form (row by row) of dimensions: sample_size x num_var /// To retrieve variable j for sample i in the matrix: matrix[i*num_var+j] pub fn simulate_normal_variates(num_var:usize, sample_size:usize, correlation_matrix: &Vec<f64>) -> Vec<f64> { let mut result = vec![0.0; sample_size*num_var]; let mut rng = thread_rng(); for i in 0..sample_size { for j in 0..num_var { let p:f64=rng.sample(Open01); let normal_val=normal_invcdf(p); result[i*num_var+j]=normal_val; } } let chol=cholesky(&correlation_matrix); let chol_transposed=transpose(&chol, num_var,num_var); let x=multiply(&result, sample_size, num_var, &chol_transposed, num_var, num_var); let y=transpose(&x, sample_size, num_var); return y; } /// Compute the inverse CDF from a normal distribution /// /// # Arguments /// /// *`p` - Probability pub fn normal_invcdf(p: f64)-> f64 { if p<0.5 { return -normal_g(-p); } else { return normal_g(1.0-p); } } fn normal_g(p: f64) -> f64 { let t=f64::sqrt(f64::ln(1.0/(f64::powf(p,2.0)))); let c0=2.515517; let c1=0.802853; let c2=0.010328; let d1=1.432788; let d2=0.189269; let d3=0.001308; let num=c0+c1*t+c2*f64::powf(t,2.0); let denom=1.0+d1*t+d2*f64::powf(t,2.0)+d3*f64::powf(t,3.0); let x=t-(num/denom); return x; }
24.104938
112
0.581562
f944a46c337717f3e1bd847deec16d8cd8cd9b3d
5,831
//! Implementation of the BPM peripheral. use kernel::utilities::registers::interfaces::{Readable, Writeable}; use kernel::utilities::registers::{register_bitfields, ReadOnly, ReadWrite, WriteOnly}; use kernel::utilities::StaticRef; #[repr(C)] struct BpmRegisters { ier: WriteOnly<u32, Interrupt::Register>, idr: WriteOnly<u32, Interrupt::Register>, imr: ReadOnly<u32, Interrupt::Register>, isr: ReadOnly<u32, Interrupt::Register>, icr: WriteOnly<u32, Interrupt::Register>, sr: ReadOnly<u32, Status::Register>, unlock: ReadWrite<u32, Unlock::Register>, pmcon: ReadWrite<u32, PowerModeControl::Register>, _reserved0: [u32; 2], bkupwcause: ReadOnly<u32, BackupWakeup::Register>, bkupwen: ReadWrite<u32, BackupWakeup::Register>, bkuppmux: ReadWrite<u32, BackupPinMuxing::Register>, ioret: ReadWrite<u32, InputOutputRetention::Register>, } register_bitfields![u32, Interrupt [ /// Access Error AE 31, /// Power Scaling OK PSOK 0 ], Status [ /// Access Error AE 31, /// Power Scaling OK PSOK 0 ], Unlock [ /// Unlock Key KEY OFFSET(24) NUMBITS(8) [], /// Unlock Address ADDR OFFSET(0) NUMBITS(10) [] ], PowerModeControl [ /// Fast Wakeup FASTWKUP OFFSET(24) NUMBITS(1) [ NormalWakeup = 0, FastWakeup = 1 ], /// 32kHz-1kHz Clock Source Selection CK32S OFFSET(16) NUMBITS(1) [ Osc32k = 0, Rc32k = 1 ], /// SLEEP mode Configuration SLEEP OFFSET(12) NUMBITS(2) [ CpuStopped = 0, CpuAhbStopped = 1, CpuAhbPbGclkStopped = 2, CpuAhbPbGclkClockStopped = 3 ], /// Retention Mode RET OFFSET(9) NUMBITS(1) [ NoPowerSave = 0, PowerSave = 1 ], /// Backup Mode BKUP OFFSET(8) NUMBITS(1) [ NoPowerSave = 0, PowerSave = 1 ], /// WARN: Undocumented! /// /// According to the datasheet (sec 6.2, p57) changing power scaling /// requires waiting for an interrupt (presumably because flash is /// inaccessible during the transition). However, the ASF code sets /// bit 3 ('PSCM' bit) of the PMCON register, which is *blank* (not a '-') /// in the datasheet with supporting comments that this allows a change /// 'without CPU halt' PSCM OFFSET(3) NUMBITS(1) [ WithCpuHalt = 0, WithoutCpuHalt = 1 ], /// Power Scaling Change Request PSCREQ OFFSET(2) NUMBITS(1) [ PowerScalingNotRequested = 0, PowerScalingRequested = 1 ], /// Power Scaling Configuration Value PS OFFSET(0) NUMBITS(2) [] ], BackupWakeup [ BKUP OFFSET(0) NUMBITS(32) [ Eic = 0b000001, Ast = 0b000010, Wdt = 0b000100, Bod33 = 0b001000, Bod18 = 0b010000, Picouart = 0b100000 ] ], BackupPinMuxing [ /// Backup Pin Muxing BKUPPMUX OFFSET(0) NUMBITS(9) [ Pb01 = 0b000000001, Pa06 = 0b000000010, Pa04 = 0b000000100, Pa05 = 0b000001000, Pa07 = 0b000010000, Pc03 = 0b000100000, Pc04 = 0b001000000, Pc05 = 0b010000000, Pc06 = 0b100000000 ] ], InputOutputRetention [ /// Retention on I/O lines after waking up from the BACKUP mode RET OFFSET(0) NUMBITS(1) [ IoLinesNotHeld = 0, IoLinesHeld = 1 ] ] ]; const BPM_UNLOCK_KEY: u32 = 0xAA; const BPM: StaticRef<BpmRegisters> = unsafe { StaticRef::new(0x400F0000 as *const BpmRegisters) }; /// Which power scaling mode the chip should use for internal voltages /// /// See Tables 42-6 and 42-8 (page 1125) for information of energy usage /// of different power scaling modes pub enum PowerScaling { /// Mode 0: Default out of reset /// /// - Maximum system clock frequency is 32MHz /// - Normal flash speed PS0, /// Mode 1: Reduced voltage /// /// - Maximum system clock frequency is 12MHz /// - Normal flash speed /// - These peripherals are not available in Mode 1: /// - USB /// - DFLL /// - PLL /// - Programming/Erasing Flash PS1, /// Mode 2: /// /// - Maximum system clock frequency is 48MHz /// - High speed flash PS2, } pub enum CK32Source { OSC32K = 0, RC32K = 1, } #[inline(never)] pub unsafe fn set_ck32source(source: CK32Source) { let control = BPM.pmcon.extract(); unlock_register(0x1c); // Control BPM.pmcon .modify_no_read(control, PowerModeControl::CK32S.val(source as u32)); } unsafe fn unlock_register(register_offset: u32) { BPM.unlock .write(Unlock::KEY.val(BPM_UNLOCK_KEY) + Unlock::ADDR.val(register_offset)); } unsafe fn power_scaling_ok() -> bool { BPM.sr.is_set(Status::PSOK) } // This approach based on `bpm_power_scaling_cpu` from ASF pub unsafe fn set_power_scaling(ps_value: PowerScaling) { // The datasheet says to spin on this before doing anything, ASF // doesn't as far as I can tell, but it seems like a good idea while !power_scaling_ok() {} let control = BPM.pmcon.extract(); // Unlock PMCON register unlock_register(0x1c); // Control // Actually change power scaling BPM.pmcon.modify_no_read( control, PowerModeControl::PS.val(ps_value as u32) + PowerModeControl::PSCM::WithoutCpuHalt + PowerModeControl::PSCREQ::PowerScalingRequested, ); }
28.583333
98
0.580861
ab98865b9b99a5bb3fdbdf3bf28ba4d88015fda4
3,465
// Copyright 2019 Mats Kindahl // // Licensed under the Apache License, Version 2.0 (the "License"); you // may not use this file except in compliance with the License. You // may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the License. #[macro_use] extern crate log; extern crate env_logger; extern crate bytes; extern crate chatter; extern crate futures; use chatter::gossip::{GossipCodec, Message}; use chatter::state::State; use std::net::SocketAddr; use std::result::Result; use tokio::net::{UdpFramed, UdpSocket}; use tokio::prelude::*; use clap::{App, Arg}; fn main() -> Result<(), Box<dyn std::error::Error>> { env_logger::init(); let options = App::new("Chatter Agent") .version("0.1") .author("Mats Kindahl <[email protected]>") .about("Monitoring agent for distributed systems.") .arg( Arg::with_name("listen") .short("l") .long("listen") .value_name("ADDRESS") .help("Address to listen for gossip on") .takes_value(true), ) .get_matches(); let socket = { let sockaddr = options .value_of("listen") .unwrap_or("0.0.0.0:2428") .parse::<SocketAddr>()?; UdpSocket::bind(&sockaddr)? }; info!("Listening on {}", socket.local_addr()?); let shared_state = State::new(); let (mut writer, reader) = UdpFramed::new(socket, GossipCodec::new()).split(); // Future for updating state based on received gossip. let update_future = { let mut state = shared_state.clone(); move |(msg, addr): (Message, SocketAddr)| { msg.update_state(&mut state, &addr); Ok((msg, addr)) } }; // Future for forwarding gossip to other servers in the // view. Right now, it forwards the message to all servers in the // cluster, not just a subset. let gossip_future = { let state = shared_state.clone(); move |(mut msg, addr): (Message, SocketAddr)| { if msg.hops > 0 { msg.hops -= 1; let locked_view = state .view .lock() .expect("unable to lock view for forwarding"); for (_uuid, info) in &locked_view.servers { writer.start_send((msg.clone(), info.address.clone()))?; } writer.poll_complete()?; } Ok((msg, addr)) } }; tokio::run({ reader .and_then(move |(msg, addr): (Message, SocketAddr)| { debug!("Received gossip message from address {}: {:?}", addr, msg); Ok((msg, addr)) }) .and_then(update_future) .and_then(gossip_future) .for_each(|(_msg, addr): (Message, SocketAddr)| { debug!("Finished processing gossip message from {}", addr); Ok(()) }) .map(|_| ()) .map_err(|e| error!("error: {:?}", e)) }); Ok(()) }
31.5
83
0.554978
22b7f353a6f472b58241aa97300eabfa99d2e95b
2,499
use crate::perf_event::Event; use thiserror::Error; #[derive(Error, Debug)] pub enum BccError { #[error("failed to attach kprobe: ({name})")] AttachKprobe { name: String }, #[error("failed to attach kretprobe ({name})")] AttachKretprobe { name: String }, #[error("failed to attach perf event ({event:?})")] AttachPerfEvent { event: Event }, #[error("failed to attach raw tracepoint ({name})")] AttachRawTracepoint { name: String }, #[error("failed to attach tracepoint ({subsys}:{name})")] AttachTracepoint { subsys: String, name: String }, #[error("failed to attach uprobe ({name})")] AttachUprobe { name: String }, #[error("failed to attach uretprobe ({name})")] AttachUretprobe { name: String }, #[error("{cause} requires bcc >= ({min_version})")] BccVersionTooLow { cause: String, min_version: String }, #[error("error compiling bpf")] Compilation, #[error("io error")] IoError(#[from] std::io::Error), #[error("kernel probe has invalid configuration: {message}")] InvalidKprobe { message: String }, #[error("perf event probe has invalid configuration: {message}")] InvalidPerfEvent { message: String }, #[error("raw tracepoint probe has invalid configuration: {message}")] InvalidRawTracepoint { message: String }, #[error("tracepoint probe has invalid configuration: {message}")] InvalidTracepoint { message: String }, #[error("userspace probe has invalid configuration: {message}")] InvalidUprobe { message: String }, #[error("error initializing perf map")] InitializePerfMap, #[error("invalid cpu range ({range})")] InvalidCpuRange { range: String }, #[error("error loading bpf probe ({name})")] Loading { name: String }, #[error("null string")] NullString(#[from] std::ffi::NulError), #[error("error opening perf buffer")] OpenPerfBuffer, #[error("error opening perf event: ({event:?}), reason `{message}`")] OpenPerfEvent { event: Event, message: String }, #[error("failed to delete key from table")] DeleteTableValue, #[error("failed to get value from table")] GetTableValue, #[error("failed to set value in table")] SetTableValue, #[error("table has wrong size for key or leaf")] TableInvalidSize, #[error("unknown symbol ({name}) in module ({module})")] UnknownSymbol { name: String, module: String }, #[error("invalid utf8")] Utf8Error(#[from] std::str::Utf8Error), }
40.967213
73
0.64946
90fd102c5379215b5644cde5e0b315a82e1a060c
3,304
use crate::db::schema::*; use crate::db::types::*; use chrono::NaiveDateTime; use serde::Serialize; use uuid::Uuid; #[derive(Identifiable, Queryable, PartialEq, Debug, AsChangeset, Serialize)] #[table_name = "groups"] pub struct Group { pub id: i32, pub name: String, pub active: bool, pub path: String, pub description: String, pub capabilities: Vec<CapabilityType>, pub typ: GroupType, pub trust: TrustType, pub group_expiration: Option<i32>, pub created: NaiveDateTime, } #[derive(Identifiable, Associations, Queryable, PartialEq, Debug, Insertable, AsChangeset)] #[belongs_to(Group)] #[primary_key(group_id)] #[table_name = "terms"] pub struct Terms { pub group_id: i32, pub text: String, } #[derive(Identifiable, Queryable, Associations, PartialEq, Debug)] pub struct Role { pub id: i32, pub group_id: i32, pub typ: RoleType, pub name: String, pub permissions: Vec<PermissionType>, } #[derive(Serialize, Queryable, Associations, PartialEq, Debug, Insertable, AsChangeset)] #[belongs_to(Group)] #[primary_key(group_id, user_uuid)] pub struct Membership { pub user_uuid: Uuid, pub group_id: i32, pub role_id: i32, pub expiration: Option<NaiveDateTime>, pub added_by: Uuid, pub added_ts: NaiveDateTime, } #[derive( Serialize, Identifiable, Queryable, Associations, PartialEq, Debug, Insertable, AsChangeset, )] #[belongs_to(Group)] #[primary_key(group_id, user_uuid)] pub struct Invitation { pub group_id: i32, pub user_uuid: Uuid, pub invitation_expiration: Option<NaiveDateTime>, pub group_expiration: Option<i32>, pub added_by: Uuid, } #[derive(Identifiable, Queryable, Associations, PartialEq, Debug, Insertable, AsChangeset)] #[belongs_to(Group)] #[primary_key(group_id)] pub struct Invitationtext { pub group_id: i32, pub body: String, } #[derive( Serialize, Identifiable, Queryable, Associations, PartialEq, Debug, Insertable, AsChangeset, )] #[belongs_to(Group)] #[primary_key(group_id, user_uuid)] pub struct Request { pub group_id: i32, pub user_uuid: Uuid, pub created: NaiveDateTime, pub request_expiration: Option<NaiveDateTime>, } #[derive(Queryable, Serialize)] pub struct GroupsList { pub name: String, pub typ: GroupType, pub trust: TrustType, pub member_count: i64, } #[derive(Insertable)] #[table_name = "groups"] pub struct InsertGroup { pub name: String, pub active: bool, pub path: String, pub description: String, pub capabilities: Vec<CapabilityType>, pub typ: GroupType, pub trust: TrustType, pub group_expiration: Option<i32>, } #[derive(Insertable, AsChangeset)] #[table_name = "memberships"] #[changeset_options(treat_none_as_null = "true")] pub struct InsertMembership { pub user_uuid: Uuid, pub group_id: i32, pub role_id: i32, pub expiration: Option<NaiveDateTime>, pub added_by: Uuid, } #[derive(Insertable)] #[table_name = "roles"] pub struct InsertRole { pub group_id: i32, pub typ: RoleType, pub name: String, pub permissions: Vec<PermissionType>, } #[derive(Insertable)] #[table_name = "requests"] pub struct InsertRequest { pub group_id: i32, pub user_uuid: Uuid, pub request_expiration: Option<NaiveDateTime>, }
24.842105
96
0.70339
23c13f9cc551348cdcef484791cb6f1e6628cd0f
286
use crate::schema::users; use serde::{Deserialize, Serialize}; mod from_request; pub mod repository; #[derive(Debug, Serialize, Deserialize, Queryable, Identifiable)] pub struct User { pub firebase_uid: String, pub created_at: chrono::NaiveDateTime, pub id: uuid::Uuid, }
22
65
0.730769
1cf9f2b7f83699b182d23ea853e2faa395ec0338
2,985
// Copyright Materialize, Inc. and contributors. All rights reserved. // // Use of this software is governed by the Business Source License // included in the LICENSE file. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0. //! Fuses reduce operators with parent operators if possible. use crate::TransformArgs; use mz_expr::visit::Visit; use mz_expr::{MirRelationExpr, MirScalarExpr}; /// Fuses reduce operators with parent operators if possible. #[derive(Debug)] pub struct Reduce; impl crate::Transform for Reduce { fn transform( &self, relation: &mut MirRelationExpr, _: TransformArgs, ) -> Result<(), crate::TransformError> { relation.try_visit_mut_pre(&mut |e| Ok(self.action(e))) } } impl Reduce { /// Fuses reduce operators with parent operators if possible. pub fn action(&self, relation: &mut MirRelationExpr) { if let MirRelationExpr::Reduce { input, group_key, aggregates, monotonic: _, expected_group_size: _, } = relation { if let MirRelationExpr::Reduce { input: inner_input, group_key: inner_group_key, aggregates: inner_aggregates, monotonic: _, expected_group_size: _, } = &mut **input { // Collect all columns referenced by outer let mut outer_cols = vec![]; for expr in group_key.iter() { #[allow(deprecated)] expr.visit_post_nolimit(&mut |e| { if let MirScalarExpr::Column(i) = e { outer_cols.push(*i); } }); } // We can fuse reduce operators as long as the outer one doesn't // group by an aggregation performed by the inner one. if outer_cols.iter().any(|c| *c >= inner_group_key.len()) { return; } if aggregates.is_empty() && inner_aggregates.is_empty() { // Replace inner reduce with map + project (no grouping) let mut outputs = vec![]; let mut scalars = vec![]; let arity = inner_input.arity(); for e in inner_group_key { if let MirScalarExpr::Column(i) = e { outputs.push(*i); } else { outputs.push(arity + scalars.len()); scalars.push(e.clone()); } } **input = inner_input.take_dangerous().map(scalars).project(outputs); } } } } }
34.709302
89
0.512898
e652d8adfcbfa04c0f97e1cd07f237eb58699b33
2,980
// Copyright 2018 int08h, LLC all rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #[macro_use] extern crate log; extern crate csv; extern crate evmobserver; extern crate simple_logger; use std::env::args; use std::fmt::Write; use log::Level; use csv::ByteRecord; use evmobserver::csvfiles::PriceReader; use evmobserver::evminst; use evmobserver::evmtrace; use evmobserver::prices::Candlestick; const DIVISOR: f64 = 1e9; fn visitor(candle: &Candlestick, trace: &ByteRecord) { let ts = evmtrace::get_field_u64(trace, evmtrace::TS_IDX); let block_num = evmtrace::get_field_u32(trace, evmtrace::BLOCK_NUM_IDX); let txn_index = evmtrace::get_field_u16(trace, evmtrace::TXN_INDEX_IDX); let addr_from = evmtrace::get_field_str(trace, evmtrace::ADDR_FROM_IDX); let gas_px_gwei = evmtrace::get_field_u64(trace, evmtrace::GAS_PX_IDX) as f64 / DIVISOR; let mid_px_fiat = candle.mid_price(); let gas_px_eth = gas_px_gwei / DIVISOR; let gas_px_fiat = gas_px_eth * mid_px_fiat; let mut block_total_gas = 0u64; let mut block_total_px_eth = 0f64; let mut block_total_px_fiat = 0f64; let mut output = String::with_capacity(2048); for (i, inst) in evminst::VALUES.iter().enumerate() { let (count, gas) = evmtrace::get_inst_fields(trace, i); if count == 0 || gas == 0 { continue; }; block_total_gas += gas; let gas_px_used_eth = gas_px_eth * gas as f64; let gas_px_used_fiat = gas_px_fiat * gas as f64; block_total_px_eth += gas_px_used_eth; block_total_px_fiat += gas_px_used_fiat; write!(output, "{}:{} {} = ${:.9}\n", inst, count, gas, gas_px_used_fiat).unwrap(); } info!( "ts {}, block {}, from {}, txn {}, gas_total_count {}, gas_px_eth {:.12} \ ({:.3} gwei) * mid ${:.3} = ${:.9} TOTAL=${:.6} ({:.9} eth)\n{}", ts, block_num, addr_from, txn_index, block_total_gas, gas_px_eth, gas_px_gwei, mid_px_fiat, gas_px_fiat, block_total_px_fiat, block_total_px_eth, output ); } fn main() { simple_logger::init_with_level(Level::Info).unwrap(); let mut argv: Vec<String> = args().collect(); if argv.len() < 3 { info!("Usage: price_load PRICES.CSV [COUNTS.CSV ...]"); std::process::exit(1); } let prices = PriceReader::new(argv.get(1).unwrap()); info!("Loaded {} prices", prices.len()); prices.process(argv.split_off(2), visitor); }
32.747253
92
0.67047
db494bd017421f1f9ea43adfb118160b692e1c09
30,863
// This file is part of Deeper. // Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! # Tipping Pallet ( pallet-tips ) //! //! > NOTE: This pallet is tightly coupled with pallet-treasury. //! //! A subsystem to allow for an agile "tipping" process, whereby a reward may be given without first //! having a pre-determined stakeholder group come to consensus on how much should be paid. //! //! A group of `Tippers` is determined through the config `Config`. After half of these have //! declared some amount that they believe a particular reported reason deserves, then a countdown //! period is entered where any remaining members can declare their tip amounts also. After the //! close of the countdown period, the median of all declared tips is paid to the reported //! beneficiary, along with any finders fee, in case of a public (and bonded) original report. //! //! //! ### Terminology //! //! Tipping protocol: //! - **Tipping:** The process of gathering declarations of amounts to tip and taking the median //! amount to be transferred from the treasury to a beneficiary account. //! - **Tip Reason:** The reason for a tip; generally a URL which embodies or explains why a //! particular individual (identified by an account ID) is worthy of a recognition by the //! treasury. //! - **Finder:** The original public reporter of some reason for tipping. //! - **Finders Fee:** Some proportion of the tip amount that is paid to the reporter of the tip, //! rather than the main beneficiary. //! //! ## Interface //! //! ### Dispatchable Functions //! //! Tipping protocol: //! - `report_awesome` - Report something worthy of a tip and register for a finders fee. //! - `retract_tip` - Retract a previous (finders fee registered) report. //! - `tip_new` - Report an item worthy of a tip and declare a specific amount to tip. //! - `tip` - Declare or redeclare an amount to tip for a particular reason. //! - `close_tip` - Close and pay out a tip. #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; mod tests; pub mod weights; use sp_runtime::{ traits::{BadOrigin, Hash, TrailingZeroInput, Zero}, Percent, RuntimeDebug, }; use sp_std::prelude::*; use codec::{Decode, Encode}; use frame_support::{ traits::{ ContainsLengthBound, Currency, EnsureOrigin, ExistenceRequirement::KeepAlive, Get, OnUnbalanced, ReservableCurrency, SortedMembers, }, Parameter, }; pub use pallet::*; use pallet_credit::CreditInterface; pub use weights::WeightInfo; pub type BalanceOf<T> = pallet_treasury::BalanceOf<T>; pub type NegativeImbalanceOf<T> = pallet_treasury::NegativeImbalanceOf<T>; /// An open tipping "motion". Retains all details of a tip including information on the finder /// and the members who have voted. #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, scale_info::TypeInfo)] pub struct OpenTip< AccountId: Parameter, Balance: Parameter, BlockNumber: Parameter, Hash: Parameter, > { /// The hash of the reason for the tip. The reason should be a human-readable UTF-8 encoded /// string. A URL would be sensible. reason: Hash, /// The account to be tipped. who: AccountId, /// The account who began this tip. finder: AccountId, /// The amount held on deposit for this tip. deposit: Balance, /// The block number at which this tip will close if `Some`. If `None`, then no closing is /// scheduled. closes: Option<BlockNumber>, /// The members who have voted for this tip. Sorted by AccountId. tips: Vec<(AccountId, Balance)>, /// Whether this tip should result in the finder taking a fee. finders_fee: bool, /// Tipping via credit credits: Vec<(AccountId, u64)>, } #[frame_support::pallet] pub mod pallet { use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] #[pallet::without_storage_info] pub struct Pallet<T>(_); #[pallet::config] pub trait Config: frame_system::Config + pallet_treasury::Config { /// The overarching event type. type Event: From<Event<Self>> + IsType<<Self as frame_system::Config>::Event>; /// Maximum acceptable reason length. #[pallet::constant] type MaximumReasonLength: Get<u32>; /// Maximum credit reward #[pallet::constant] type MaximumCreditReward: Get<u64>; /// The amount held on deposit per byte within the tip report reason or bounty description. #[pallet::constant] type DataDepositPerByte: Get<BalanceOf<Self>>; /// The period for which a tip remains open after is has achieved threshold tippers. #[pallet::constant] type TipCountdown: Get<Self::BlockNumber>; /// The percent of the final tip which goes to the original reporter of the tip. #[pallet::constant] type TipFindersFee: Get<Percent>; /// The amount held on deposit for placing a tip report. #[pallet::constant] type TipReportDepositBase: Get<BalanceOf<Self>>; /// Origin from which tippers must come. /// /// `ContainsLengthBound::max_len` must be cost free (i.e. no storage read or heavy /// operation). type Tippers: SortedMembers<Self::AccountId> + ContainsLengthBound; // CreditInterface of credit pallet type CreditInterface: CreditInterface<Self::AccountId, BalanceOf<Self>>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } /// TipsMap that are not yet completed. Keyed by the hash of `(reason, who)` from the value. /// This has the insecure enumerable hash function since the key itself is already /// guaranteed to be a secure hash. #[pallet::storage] #[pallet::getter(fn tips)] pub type Tips<T: Config> = StorageMap< _, Twox64Concat, T::Hash, OpenTip<T::AccountId, BalanceOf<T>, T::BlockNumber, T::Hash>, OptionQuery, >; /// Simple preimage lookup from the reason's hash to the original data. Again, has an /// insecure enumerable hash since the key is guaranteed to be the result of a secure hash. #[pallet::storage] #[pallet::getter(fn reasons)] pub type Reasons<T: Config> = StorageMap<_, Identity, T::Hash, Vec<u8>, OptionQuery>; /// Tip payment address #[pallet::storage] #[pallet::getter(fn tip_payment_address)] pub type TipPaymentAddress<T: Config> = StorageValue<_, T::AccountId, OptionQuery>; #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event<T: Config> { /// A new tip suggestion has been opened. \[tip_hash\] NewTip(T::Hash), /// A new credit tip suggestion has been opened. \[tip_hash\] NewCreditTip(T::Hash), /// A tip suggestion has reached threshold and is closing. \[tip_hash\] TipClosing(T::Hash), /// A tip suggestion has been closed. \[tip_hash, who, payout\] TipClosed(T::Hash, T::AccountId, BalanceOf<T>), /// A credit tip suggestion has been closed. \[tip_hash, who, add_credit\] CreditTipClosed(T::Hash, T::AccountId, u64), /// A tip suggestion has been retracted. \[tip_hash\] TipRetracted(T::Hash), /// A tip suggestion has been slashed. \[tip_hash, finder, deposit\] TipSlashed(T::Hash, T::AccountId, BalanceOf<T>), /// Set up a tip payment address SetPaymentAddress(T::AccountId), } /// Old name generated by `decl_event`. #[deprecated(note = "use `Event` instead")] pub type RawEvent<T> = Event<T>; #[pallet::error] pub enum Error<T> { /// The reason given is just too big. ReasonTooBig, /// The tip was already found/started. AlreadyKnown, /// The tip hash is unknown. UnknownTip, /// The account attempting to retract the tip is not the finder of the tip. NotFinder, /// The tip cannot be claimed/closed because there are not enough tippers yet. StillOpen, /// The tip cannot be claimed/closed because it's still in the countdown period. Premature, } #[pallet::call] impl<T: Config> Pallet<T> { /// Report something `reason` that deserves a tip and claim any eventual the finder's fee. /// /// The dispatch origin for this call must be _Signed_. /// /// Payment: `TipReportDepositBase` will be reserved from the origin account, as well as /// `DataDepositPerByte` for each byte in `reason`. /// /// - `reason`: The reason for, or the thing that deserves, the tip; generally this will be /// a UTF-8-encoded URL. /// - `who`: The account which should be credited for the tip. /// /// Emits `NewTip` if successful. /// /// # <weight> /// - Complexity: `O(R)` where `R` length of `reason`. /// - encoding and hashing of 'reason' /// - DbReads: `Reasons`, `Tips` /// - DbWrites: `Reasons`, `Tips` /// # </weight> #[pallet::weight(<T as Config>::WeightInfo::report_awesome(reason.len() as u32))] pub fn report_awesome( origin: OriginFor<T>, reason: Vec<u8>, who: T::AccountId, ) -> DispatchResult { let finder = ensure_signed(origin)?; ensure!( reason.len() <= T::MaximumReasonLength::get() as usize, Error::<T>::ReasonTooBig ); let reason_hash = T::Hashing::hash(&reason[..]); ensure!( !Reasons::<T>::contains_key(&reason_hash), Error::<T>::AlreadyKnown ); let hash = T::Hashing::hash_of(&(&reason_hash, &who)); ensure!(!Tips::<T>::contains_key(&hash), Error::<T>::AlreadyKnown); let deposit = T::TipReportDepositBase::get() + T::DataDepositPerByte::get() * (reason.len() as u32).into(); T::Currency::reserve(&finder, deposit)?; Reasons::<T>::insert(&reason_hash, &reason); let tip = OpenTip { reason: reason_hash, who, finder, deposit, closes: None, tips: vec![], finders_fee: true, credits: vec![], }; Tips::<T>::insert(&hash, tip); Self::deposit_event(Event::NewTip(hash)); Ok(()) } /// Retract a prior tip-report from `report_awesome`, and cancel the process of tipping. /// /// If successful, the original deposit will be unreserved. /// /// The dispatch origin for this call must be _Signed_ and the tip identified by `hash` /// must have been reported by the signing account through `report_awesome` (and not /// through `tip_new`). /// /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed /// as the hash of the tuple of the original tip `reason` and the beneficiary account ID. /// /// Emits `TipRetracted` if successful. /// /// # <weight> /// - Complexity: `O(1)` /// - Depends on the length of `T::Hash` which is fixed. /// - DbReads: `Tips`, `origin account` /// - DbWrites: `Reasons`, `Tips`, `origin account` /// # </weight> #[pallet::weight(<T as Config>::WeightInfo::retract_tip())] pub fn retract_tip(origin: OriginFor<T>, hash: T::Hash) -> DispatchResult { let who = ensure_signed(origin)?; let tip = Tips::<T>::get(&hash).ok_or(Error::<T>::UnknownTip)?; ensure!(tip.finder == who, Error::<T>::NotFinder); Reasons::<T>::remove(&tip.reason); Tips::<T>::remove(&hash); if !tip.deposit.is_zero() { let err_amount = T::Currency::unreserve(&who, tip.deposit); debug_assert!(err_amount.is_zero()); } Self::deposit_event(Event::TipRetracted(hash)); Ok(()) } /// Give a tip for something new; no finder's fee will be taken. /// /// The dispatch origin for this call must be _Signed_ and the signing account must be a /// member of the `Tippers` set. /// /// - `reason`: The reason for, or the thing that deserves, the tip; generally this will be /// a UTF-8-encoded URL. /// - `who`: The account which should be credited for the tip. /// - `tip_value`: The amount of tip that the sender would like to give. The median tip /// value of active tippers will be given to the `who`. /// /// Emits `NewTip` if successful. /// /// # <weight> /// - Complexity: `O(R + T)` where `R` length of `reason`, `T` is the number of tippers. /// - `O(T)`: decoding `Tipper` vec of length `T`. `T` is charged as upper bound given by /// `ContainsLengthBound`. The actual cost depends on the implementation of /// `T::Tippers`. /// - `O(R)`: hashing and encoding of reason of length `R` /// - DbReads: `Tippers`, `Reasons` /// - DbWrites: `Reasons`, `Tips` /// # </weight> #[pallet::weight(<T as Config>::WeightInfo::tip_new(reason.len() as u32, T::Tippers::max_len() as u32))] pub fn tip_new( origin: OriginFor<T>, reason: Vec<u8>, who: T::AccountId, #[pallet::compact] tip_value: BalanceOf<T>, ) -> DispatchResult { let tipper = ensure_signed(origin)?; ensure!(T::Tippers::contains(&tipper), BadOrigin); let reason_hash = T::Hashing::hash(&reason[..]); ensure!( !Reasons::<T>::contains_key(&reason_hash), Error::<T>::AlreadyKnown ); let hash = T::Hashing::hash_of(&(&reason_hash, &who)); Reasons::<T>::insert(&reason_hash, &reason); Self::deposit_event(Event::NewTip(hash.clone())); let tips = vec![(tipper.clone(), tip_value)]; let tip = OpenTip { reason: reason_hash, who, finder: tipper, deposit: Zero::zero(), closes: None, tips, finders_fee: false, credits: vec![], }; Tips::<T>::insert(&hash, tip); Ok(()) } /// Declare a tip value for an already-open tip. /// /// The dispatch origin for this call must be _Signed_ and the signing account must be a /// member of the `Tippers` set. /// /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed /// as the hash of the tuple of the hash of the original tip `reason` and the beneficiary /// account ID. /// - `tip_value`: The amount of tip that the sender would like to give. The median tip /// value of active tippers will be given to the `who`. /// /// Emits `TipClosing` if the threshold of tippers has been reached and the countdown period /// has started. /// /// # <weight> /// - Complexity: `O(T)` where `T` is the number of tippers. decoding `Tipper` vec of length /// `T`, insert tip and check closing, `T` is charged as upper bound given by /// `ContainsLengthBound`. The actual cost depends on the implementation of `T::Tippers`. /// /// Actually weight could be lower as it depends on how many tips are in `OpenTip` but it /// is weighted as if almost full i.e of length `T-1`. /// - DbReads: `Tippers`, `Tips` /// - DbWrites: `Tips` /// # </weight> #[pallet::weight(<T as Config>::WeightInfo::tip(T::Tippers::max_len() as u32))] pub fn tip( origin: OriginFor<T>, hash: T::Hash, #[pallet::compact] tip_value: BalanceOf<T>, ) -> DispatchResult { let tipper = ensure_signed(origin)?; ensure!(T::Tippers::contains(&tipper), BadOrigin); let mut tip = Tips::<T>::get(hash).ok_or(Error::<T>::UnknownTip)?; if Self::insert_tip_and_check_closing(&mut tip, tipper, tip_value) { Self::deposit_event(Event::TipClosing(hash.clone())); } Tips::<T>::insert(&hash, tip); Ok(()) } /// Close and payout a tip. /// /// The dispatch origin for this call must be _Signed_. /// /// The tip identified by `hash` must have finished its countdown period. /// /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed /// as the hash of the tuple of the original tip `reason` and the beneficiary account ID. /// /// # <weight> /// - Complexity: `O(T)` where `T` is the number of tippers. decoding `Tipper` vec of length /// `T`. `T` is charged as upper bound given by `ContainsLengthBound`. The actual cost /// depends on the implementation of `T::Tippers`. /// - DbReads: `Tips`, `Tippers`, `tip finder` /// - DbWrites: `Reasons`, `Tips`, `Tippers`, `tip finder` /// # </weight> #[pallet::weight(<T as Config>::WeightInfo::close_tip(T::Tippers::max_len() as u32))] pub fn close_tip(origin: OriginFor<T>, hash: T::Hash) -> DispatchResult { ensure_signed(origin)?; let tip = Tips::<T>::get(hash).ok_or(Error::<T>::UnknownTip)?; let n = tip.closes.as_ref().ok_or(Error::<T>::StillOpen)?; ensure!( frame_system::Pallet::<T>::block_number() >= *n, Error::<T>::Premature ); // closed. Reasons::<T>::remove(&tip.reason); Tips::<T>::remove(hash); Self::payout_tip(hash, tip); Ok(()) } /// Remove and slash an already-open tip. /// /// May only be called from `T::RejectOrigin`. /// /// As a result, the finder is slashed and the deposits are lost. /// /// Emits `TipSlashed` if successful. /// /// # <weight> /// `T` is charged as upper bound given by `ContainsLengthBound`. /// The actual cost depends on the implementation of `T::Tippers`. /// # </weight> #[pallet::weight(<T as Config>::WeightInfo::slash_tip(T::Tippers::max_len() as u32))] pub fn slash_tip(origin: OriginFor<T>, hash: T::Hash) -> DispatchResult { T::RejectOrigin::ensure_origin(origin)?; let tip = Tips::<T>::take(hash).ok_or(Error::<T>::UnknownTip)?; if !tip.deposit.is_zero() { let imbalance = T::Currency::slash_reserved(&tip.finder, tip.deposit).0; T::OnSlash::on_unbalanced(imbalance); } Reasons::<T>::remove(&tip.reason); Self::deposit_event(Event::TipSlashed(hash, tip.finder, tip.deposit)); Ok(()) } #[pallet::weight(<T as Config>::WeightInfo::slash_tip(T::Tippers::max_len() as u32))] pub fn credit_tip_new( origin: OriginFor<T>, reason: Vec<u8>, who: T::AccountId, add_credit: u64, ) -> DispatchResult { let tipper = ensure_signed(origin)?; ensure!(T::Tippers::contains(&tipper), BadOrigin); let reason_hash = T::Hashing::hash(&reason[..]); ensure!( !Reasons::<T>::contains_key(&reason_hash), Error::<T>::AlreadyKnown ); let hash = T::Hashing::hash_of(&(&reason_hash, &who)); Reasons::<T>::insert(&reason_hash, &reason); let credits = vec![(tipper.clone(), add_credit)]; let tip = OpenTip { reason: reason_hash, who, finder: tipper, deposit: Zero::zero(), closes: None, tips: vec![], finders_fee: false, credits, }; Tips::<T>::insert(&hash, tip); Self::deposit_event(Event::NewCreditTip(hash.clone())); Ok(()) } #[pallet::weight(<T as Config>::WeightInfo::tip(T::Tippers::max_len() as u32))] pub fn credit_tip(origin: OriginFor<T>, hash: T::Hash, add_credit: u64) -> DispatchResult { let tipper = ensure_signed(origin)?; ensure!(T::Tippers::contains(&tipper), BadOrigin); let mut tip = Tips::<T>::get(hash).ok_or(Error::<T>::UnknownTip)?; if Self::insert_credit_tip_and_check_closing(&mut tip, tipper, add_credit) { Self::deposit_event(Event::TipClosing(hash.clone())); } Tips::<T>::insert(&hash, tip); Ok(()) } #[pallet::weight(<T as Config>::WeightInfo::close_tip(T::Tippers::max_len() as u32))] pub fn close_credit_tip(origin: OriginFor<T>, hash: T::Hash) -> DispatchResult { ensure_signed(origin)?; let tip = Tips::<T>::get(hash).ok_or(Error::<T>::UnknownTip)?; let n = tip.closes.as_ref().ok_or(Error::<T>::StillOpen)?; ensure!( frame_system::Pallet::<T>::block_number() >= *n, Error::<T>::Premature ); // closed. Reasons::<T>::remove(&tip.reason); Tips::<T>::remove(hash); Self::payout_credit_tip(hash, tip); Ok(()) } #[pallet::weight(10_000 + T::DbWeight::get().reads_writes(1,1))] pub fn set_tip_owner_address(origin: OriginFor<T>, owner: T::AccountId) -> DispatchResult { ensure_root(origin)?; TipPaymentAddress::<T>::put(owner.clone()); Self::deposit_event(Event::SetPaymentAddress(owner)); Ok(()) } } } impl<T: Config> Pallet<T> { // Add public immutables and private mutables. /// The account ID of the treasury pot. /// /// This actually does computation. If you need to keep using it, then make sure you cache the /// value and only call this once. pub fn account_id() -> T::AccountId { let zero_account = T::AccountId::decode(&mut TrailingZeroInput::new(&[][..])) .expect("infinite input; qed"); if let Some(account) = Self::tip_payment_address() { account } else { zero_account } } /// Given a mutable reference to an `OpenTip`, insert the tip into it and check whether it /// closes, if so, then deposit the relevant event and set closing accordingly. /// /// `O(T)` and one storage access. fn insert_tip_and_check_closing( tip: &mut OpenTip<T::AccountId, BalanceOf<T>, T::BlockNumber, T::Hash>, tipper: T::AccountId, tip_value: BalanceOf<T>, ) -> bool { match tip.tips.binary_search_by_key(&&tipper, |x| &x.0) { Ok(pos) => tip.tips[pos] = (tipper, tip_value), Err(pos) => tip.tips.insert(pos, (tipper, tip_value)), } Self::retain_active_tips(&mut tip.tips); let threshold = (T::Tippers::count() + 1) / 2; if tip.tips.len() >= threshold && tip.closes.is_none() { tip.closes = Some(frame_system::Pallet::<T>::block_number() + T::TipCountdown::get()); true } else { false } } /// Remove any non-members of `Tippers` from a `tips` vector. `O(T)`. fn retain_active_tips(tips: &mut Vec<(T::AccountId, BalanceOf<T>)>) { let members = T::Tippers::sorted_members(); let mut members_iter = members.iter(); let mut member = members_iter.next(); tips.retain(|(ref a, _)| loop { match member { None => break false, Some(m) if m > a => break false, Some(m) => { member = members_iter.next(); if m < a { continue; } else { break true; } } } }); } /// Execute the payout of a tip. /// /// Up to three balance operations. /// Plus `O(T)` (`T` is Tippers length). fn payout_tip( hash: T::Hash, tip: OpenTip<T::AccountId, BalanceOf<T>, T::BlockNumber, T::Hash>, ) { let mut tips = tip.tips; Self::retain_active_tips(&mut tips); tips.sort_by_key(|i| i.1); let treasury = Self::account_id(); let mut payout = tips[tips.len() / 2].1; if !tip.deposit.is_zero() { let err_amount = T::Currency::unreserve(&tip.finder, tip.deposit); debug_assert!(err_amount.is_zero()); } if tip.finders_fee && tip.finder != tip.who { // pay out the finder's fee. let finders_fee = T::TipFindersFee::get() * payout; payout -= finders_fee; // this should go through given we checked it's at most the free balance, but still // we only make a best-effort. let res = T::Currency::transfer(&treasury, &tip.finder, finders_fee, KeepAlive); debug_assert!(res.is_ok()); } // same as above: best-effort only. let res = T::Currency::transfer(&treasury, &tip.who, payout, KeepAlive); debug_assert!(res.is_ok()); Self::deposit_event(Event::TipClosed(hash, tip.who, payout)); } fn insert_credit_tip_and_check_closing( tip: &mut OpenTip<T::AccountId, BalanceOf<T>, T::BlockNumber, T::Hash>, tipper: T::AccountId, add_credit: u64, ) -> bool { match tip.credits.binary_search_by_key(&&tipper, |x| &x.0) { Ok(pos) => tip.credits[pos] = (tipper, add_credit), Err(pos) => tip.credits.insert(pos, (tipper, add_credit)), } Self::retain_active_credit_tips(&mut tip.credits); let threshold = (T::Tippers::count() + 1) / 2; if tip.credits.len() >= threshold && tip.closes.is_none() { tip.closes = Some(frame_system::Pallet::<T>::block_number() + T::TipCountdown::get()); true } else { false } } fn retain_active_credit_tips(tips: &mut Vec<(T::AccountId, u64)>) { let members = T::Tippers::sorted_members(); let mut members_iter = members.iter(); let mut member = members_iter.next(); tips.retain(|(ref a, _)| loop { match member { None => break false, Some(m) if m > a => break false, Some(m) => { member = members_iter.next(); if m < a { continue; } else { break true; } } } }); } /// Execute the payout of a credit tip. fn payout_credit_tip( hash: T::Hash, tip: OpenTip<T::AccountId, BalanceOf<T>, T::BlockNumber, T::Hash>, ) { let mut tips = tip.credits; Self::retain_active_credit_tips(&mut tips); tips.sort_by_key(|i| i.1); let pay_credit = tips[tips.len() / 2].1.min(T::MaximumCreditReward::get()); if !tip.deposit.is_zero() { let err_amount = T::Currency::unreserve(&tip.finder, tip.deposit); debug_assert!(err_amount.is_zero()); } T::CreditInterface::update_credit_by_tip(tip.who.clone(), pay_credit); Self::deposit_event(Event::CreditTipClosed(hash, tip.who, pay_credit)); } pub fn migrate_retract_tip_for_tip_new(module: &[u8], item: &[u8]) { /// An open tipping "motion". Retains all details of a tip including information on the /// finder and the members who have voted. #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] pub struct OldOpenTip< AccountId: Parameter, Balance: Parameter, BlockNumber: Parameter, Hash: Parameter, > { /// The hash of the reason for the tip. The reason should be a human-readable UTF-8 /// encoded string. A URL would be sensible. reason: Hash, /// The account to be tipped. who: AccountId, /// The account who began this tip and the amount held on deposit. finder: Option<(AccountId, Balance)>, /// The block number at which this tip will close if `Some`. If `None`, then no closing /// is scheduled. closes: Option<BlockNumber>, /// The members who have voted for this tip. Sorted by AccountId. tips: Vec<(AccountId, Balance)>, } use frame_support::{migration::storage_key_iter, Twox64Concat}; for (hash, old_tip) in storage_key_iter::< T::Hash, OldOpenTip<T::AccountId, BalanceOf<T>, T::BlockNumber, T::Hash>, Twox64Concat, >(module, item) .drain() { let zero_account = T::AccountId::decode(&mut TrailingZeroInput::new(&[][..])) .expect("infinite input; qed"); let (finder, deposit, finders_fee) = match old_tip.finder { Some((finder, deposit)) => (finder, deposit, true), None => (zero_account, Zero::zero(), false), }; let new_tip = OpenTip { reason: old_tip.reason, who: old_tip.who, finder, deposit, closes: old_tip.closes, tips: old_tip.tips, finders_fee, credits: vec![], }; Tips::<T>::insert(hash, new_tip) } } }
40.13394
112
0.567119
903807c353d0357b8a909a554f4282702663873c
2,035
use std::future::Future; use std::{ops::Deref, pin::Pin, sync::Arc}; #[cfg(feature = "futures")] use futures_io::{AsyncRead, AsyncWrite}; #[cfg(feature = "tokio")] use tokio::io::{AsyncRead, AsyncWrite}; #[cfg(feature = "async-std")] use async_std::io::{Read as AsyncRead, Write as AsyncWrite}; use crate::{Result, Url}; /// Boxed connection pub type BoxedConnection = Box<dyn Connection>; /// The establishing connection between backend and device pub type BoxedConnect = Pin<Box<dyn Connect>>; /// Unified connector object pub type BoxedConnector = Arc<dyn Connector>; /// Unified backend object pub type BoxedBackend = Box<dyn Backend>; /// The establishing connect between backend and device pub trait Connect: Future<Output = Result<BoxedConnection>> + Send {} impl<T> Connect for T where T: Future<Output = Result<BoxedConnection>> + Send {} /// The established connection between backend and device pub trait Connection: AsyncRead + AsyncWrite + Send + Unpin {} impl<T> Connection for T where T: AsyncRead + AsyncWrite + Send + Unpin {} /// Backend connector interface pub trait Connector: Send + Sync { /// Get device URL fn url(&self) -> &Url; /// Establish connection to device fn connect(&self) -> BoxedConnect; } /// Backend interface pub trait Backend { /// The name of backend /// /// Examples: tcp, serial. fn name(&self) -> &str; /// The backend description fn description(&self) -> &str; /// Create connector /// /// This method should check URL and extract connection options from it. /// /// Method returns connector instance when URL is compatible with backend. fn connector(&self, url: &Url) -> Option<BoxedConnector>; } impl<T> Backend for T where T: Deref<Target = dyn Backend>, { fn name(&self) -> &str { self.deref().name() } fn description(&self) -> &str { self.deref().description() } fn connector(&self, url: &Url) -> Option<BoxedConnector> { self.deref().connector(url) } }
26.776316
81
0.665848
f9389ba8ce39077e5b1bc16fcfc32dbf77030285
220
// check that we do not report a type like this as uninstantiable, // even though it would be if the nxt field had type @foo: enum foo = {x: uint, nxt: *foo}; fn main() { let x = foo({x: 0u, nxt: ptr::null()}); }
22
66
0.622727
d6c916e920747926fa496f1318a1d3cea4f209eb
9,137
use serde::{ser, Serialize}; use crate::error::{Error, Result}; use crate::ser::flavors::SerFlavor; use crate::varint::VarintUsize; /// A `serde` compatible serializer, generic over "Flavors" of serializing plugins. /// /// It should rarely be necessary to directly use this type unless you are implementing your /// own [`SerFlavor`]. /// /// See the docs for [`SerFlavor`] for more information about "flavors" of serialization /// /// [`SerFlavor`]: trait.SerFlavor.html pub struct Serializer<F> where F: SerFlavor, { /// This is the Flavor(s) that will be used to modify or store any bytes generated /// by serialization pub output: F, } impl<'a, F> ser::Serializer for &'a mut Serializer<F> where F: SerFlavor, { type Ok = (); type Error = Error; // Associated types for keeping track of additional state while serializing // compound data structures like sequences and maps. In this case no // additional state is required beyond what is already stored in the // Serializer struct. type SerializeSeq = Self; type SerializeTuple = Self; type SerializeTupleStruct = Self; type SerializeTupleVariant = Self; type SerializeMap = Self; type SerializeStruct = Self; type SerializeStructVariant = Self; fn is_human_readable(&self) -> bool { false } fn serialize_bool(self, v: bool) -> Result<()> { self.serialize_u8(if v { 1 } else { 0 }) } fn serialize_i8(self, v: i8) -> Result<()> { self.serialize_u8(v.to_le_bytes()[0]) } fn serialize_i16(self, v: i16) -> Result<()> { self.output .try_extend(&v.to_le_bytes()) .map_err(|_| Error::SerializeBufferFull) } fn serialize_i32(self, v: i32) -> Result<()> { self.output .try_extend(&v.to_le_bytes()) .map_err(|_| Error::SerializeBufferFull) } fn serialize_i64(self, v: i64) -> Result<()> { self.output .try_extend(&v.to_le_bytes()) .map_err(|_| Error::SerializeBufferFull) } fn serialize_u8(self, v: u8) -> Result<()> { self.output .try_push(v) .map_err(|_| Error::SerializeBufferFull) } fn serialize_u16(self, v: u16) -> Result<()> { self.output .try_extend(&v.to_le_bytes()) .map_err(|_| Error::SerializeBufferFull) } fn serialize_u32(self, v: u32) -> Result<()> { self.output .try_extend(&v.to_le_bytes()) .map_err(|_| Error::SerializeBufferFull) } fn serialize_u64(self, v: u64) -> Result<()> { self.output .try_extend(&v.to_le_bytes()) .map_err(|_| Error::SerializeBufferFull) } fn serialize_f32(self, v: f32) -> Result<()> { let buf = v.to_bits().to_le_bytes(); self.output .try_extend(&buf) .map_err(|_| Error::SerializeBufferFull) } fn serialize_f64(self, v: f64) -> Result<()> { let buf = v.to_bits().to_le_bytes(); self.output .try_extend(&buf) .map_err(|_| Error::SerializeBufferFull) } fn serialize_char(self, v: char) -> Result<()> { let mut buf = [0u8; 4]; let strsl = v.encode_utf8(&mut buf); strsl.serialize(self) } fn serialize_str(self, v: &str) -> Result<()> { VarintUsize(v.len()).serialize(&mut *self)?; self.output .try_extend(v.as_bytes()) .map_err(|_| Error::SerializeBufferFull)?; Ok(()) } fn serialize_bytes(self, v: &[u8]) -> Result<()> { self.output .try_extend(v) .map_err(|_| Error::SerializeBufferFull) } fn serialize_none(self) -> Result<()> { self.serialize_u8(0) } fn serialize_some<T>(self, value: &T) -> Result<()> where T: ?Sized + Serialize, { self.serialize_u8(1)?; value.serialize(self) } fn serialize_unit(self) -> Result<()> { Ok(()) } fn serialize_unit_struct(self, _name: &'static str) -> Result<()> { Ok(()) } fn serialize_unit_variant( self, _name: &'static str, variant_index: u32, _variant: &'static str, ) -> Result<()> { VarintUsize(variant_index as usize).serialize(self) } fn serialize_newtype_struct<T>(self, _name: &'static str, value: &T) -> Result<()> where T: ?Sized + Serialize, { value.serialize(self) } fn serialize_newtype_variant<T>( self, _name: &'static str, variant_index: u32, _variant: &'static str, value: &T, ) -> Result<()> where T: ?Sized + Serialize, { VarintUsize(variant_index as usize).serialize(&mut *self)?; value.serialize(self) } fn serialize_seq(self, len: Option<usize>) -> Result<Self::SerializeSeq> { VarintUsize(len.ok_or(Error::SerializeSeqLengthUnknown)?).serialize(&mut *self)?; Ok(self) } fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple> { Ok(self) } fn serialize_tuple_struct( self, _name: &'static str, _len: usize, ) -> Result<Self::SerializeTupleStruct> { Ok(self) } fn serialize_tuple_variant( self, _name: &'static str, variant_index: u32, _variant: &'static str, _len: usize, ) -> Result<Self::SerializeTupleVariant> { VarintUsize(variant_index as usize).serialize(&mut *self)?; Ok(self) } fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap> { // self.output += "{"; // Ok(self) Err(Error::NotYetImplemented) } fn serialize_struct(self, _name: &'static str, _len: usize) -> Result<Self::SerializeStruct> { Ok(self) } fn serialize_struct_variant( self, _name: &'static str, variant_index: u32, _variant: &'static str, _len: usize, ) -> Result<Self::SerializeStructVariant> { VarintUsize(variant_index as usize).serialize(&mut *self)?; Ok(self) } fn collect_str<T: ?Sized>(self, _value: &T) -> Result<Self::Ok> where T: core::fmt::Display, { unreachable!() } } impl<'a, F> ser::SerializeSeq for &'a mut Serializer<F> where F: SerFlavor, { // Must match the `Ok` type of the serializer. type Ok = (); // Must match the `Error` type of the serializer. type Error = Error; // Serialize a single element of the sequence. fn serialize_element<T>(&mut self, value: &T) -> Result<()> where T: ?Sized + Serialize, { value.serialize(&mut **self) } // Close the sequence. fn end(self) -> Result<()> { Ok(()) } } impl<'a, F> ser::SerializeTuple for &'a mut Serializer<F> where F: SerFlavor, { type Ok = (); type Error = Error; fn serialize_element<T>(&mut self, value: &T) -> Result<()> where T: ?Sized + Serialize, { value.serialize(&mut **self) } fn end(self) -> Result<()> { Ok(()) } } impl<'a, F> ser::SerializeTupleStruct for &'a mut Serializer<F> where F: SerFlavor, { type Ok = (); type Error = Error; fn serialize_field<T>(&mut self, value: &T) -> Result<()> where T: ?Sized + Serialize, { value.serialize(&mut **self) } fn end(self) -> Result<()> { Ok(()) } } impl<'a, F> ser::SerializeTupleVariant for &'a mut Serializer<F> where F: SerFlavor, { type Ok = (); type Error = Error; fn serialize_field<T>(&mut self, value: &T) -> Result<()> where T: ?Sized + Serialize, { value.serialize(&mut **self) } fn end(self) -> Result<()> { Ok(()) } } impl<'a, F> ser::SerializeMap for &'a mut Serializer<F> where F: SerFlavor, { type Ok = (); type Error = Error; fn serialize_key<T>(&mut self, _key: &T) -> Result<()> where T: ?Sized + Serialize, { Err(Error::NotYetImplemented) } fn serialize_value<T>(&mut self, _value: &T) -> Result<()> where T: ?Sized + Serialize, { Err(Error::NotYetImplemented) } fn end(self) -> Result<()> { Err(Error::NotYetImplemented) } } impl<'a, F> ser::SerializeStruct for &'a mut Serializer<F> where F: SerFlavor, { type Ok = (); type Error = Error; fn serialize_field<T>(&mut self, _key: &'static str, value: &T) -> Result<()> where T: ?Sized + Serialize, { value.serialize(&mut **self) } fn end(self) -> Result<()> { Ok(()) } } impl<'a, F> ser::SerializeStructVariant for &'a mut Serializer<F> where F: SerFlavor, { type Ok = (); type Error = Error; fn serialize_field<T>(&mut self, _key: &'static str, value: &T) -> Result<()> where T: ?Sized + Serialize, { value.serialize(&mut **self) } fn end(self) -> Result<()> { Ok(()) } }
23.918848
98
0.560578
fe79323434b28b30a148317c34c139109e1d2c2a
2,055
use bincode; use libactionkv::ActionKV; use std::collections::HashMap; use std::path::Path; type ByteString = Vec<u8>; type ByteStr = [u8]; #[cfg(target_os = "windows")] const USAGE: &str = " Usage: akv_disk.exe FILE get KEY akv_disk.exe FILE delete KEY akv_disk.exe FILE insert KEY VALUE akv_disk.exe FILE update KEY VALUE "; #[cfg(not(target_os = "windows"))] const USAGE: &str = " Usage: akv_disk FILE get KEY akv_disk FILE delete KEY akv_disk FILE insert KEY VALUE akv_disk FILE update KEY VALUE "; fn store_index_on_disk(a: &mut ActionKV, index_key: &ByteStr) { a.index.remove(index_key); let index_as_bytes = bincode::serialize(&a.index).unwrap(); a.index = std::collections::HashMap::new(); a.insert(index_key, &index_as_bytes).unwrap(); } fn main() { const INDEX_KEY: &ByteStr = b"+index"; let args: Vec<String> = std::env::args().collect(); let fname = args.get(1).expect(&USAGE); let action = args.get(2).expect(&USAGE).as_ref(); let key = args.get(3).expect(&USAGE).as_ref(); let maybe_value = args.get(4); let path = Path::new(&fname); let mut store = ActionKV::open(path).expect("unable to open file"); store.load().expect("unable to load data"); store_index_on_disk(&mut store, INDEX_KEY); match action { "get" => { let index_as_bytes = store.get(&INDEX_KEY).unwrap().unwrap(); let index: HashMap<ByteString, u64> = bincode::deserialize(&index_as_bytes).unwrap(); match index.get(key) { None => eprintln!("{:?} not found", key), Some(value) => println!("{:?}", value), } } "delete" => store.delete(key).unwrap(), "insert" => { let value = maybe_value.expect(&USAGE).as_ref(); store.insert(key, value).unwrap() } "update" => { let value = maybe_value.expect(&USAGE).as_ref(); store.update(key, value).unwrap() } _ => eprintln!("{}", &USAGE), } }
29.782609
97
0.598054
d7991f2d76942998df031c2df70f2d3d719f7099
1,685
use float_extras; /// Define the maximum rounding error for arithmetic operations. Depending on the /// platform the mantissa precision may be different than others, so we choose to /// use specific values to be consistent across all. /// The values come from the C++ implementation. /// EPSILON is a small number that represents a reasonable level of noise between two /// values that can be considered to be equal. pub const EPSILON: f64 = 1e-14; /// DBL_EPSILON is a smaller number for values that require more precision. pub const DBL_EPSILON: f64 = 2.220446049250313e-16; #[macro_export] macro_rules! f64_eq { ($x:expr, $y:expr) => { ($x - $y).abs() < EPSILON }; } #[macro_export] macro_rules! assert_f64_eq { ($x:expr, $y:expr) => { assert!(($x - $y).abs() < EPSILON) }; } /// f64_eq reports whether the two values are within the default epsilon. pub fn f64_eq(x: f64, y: f64) -> bool { f64_near(x, y, EPSILON) } /// f64_near reports whether the two values are within the specified epsilon. pub fn f64_near(x: f64, y: f64, eps: f64) -> bool { (x - y).abs() <= eps } ///TODO: to util module? pub fn remainder(x: f64, y: f64) -> f64 { float_extras::f64::remainder(x, y) } pub fn clamp<T>(val: T, min: T, max: T) -> T where T: PartialOrd, { if val < min { min } else if val > max { max } else { val } } pub fn search_lower_by<F>(len: usize, f: F) -> usize where F: Fn(usize) -> bool, { let mut i = 0; let mut j = len; while i < j { let h = i + (j - i) / 2; if !f(h) { i = h + 1; } else { j = h; } } i }
22.77027
85
0.592878
1c1e149be4ffc764721cfe2eaa5f597ef5cd60c3
5,216
// Copyright Amazon.com, Inc. or its affiliates. //! Provides convenient integration with `Error` and `Result` for Ion C. use crate::*; use std::error::Error; use std::ffi::CStr; use std::fmt; use std::num::TryFromIntError; /// IonC Error code and its associated error message. /// /// `position` is populated when errors come from readers. See [`Position`] for /// more information. #[derive(Copy, Clone, Debug, PartialEq)] pub struct IonCError { pub code: i32, pub message: &'static str, pub additional: &'static str, pub position: Position, } /// Represents a position in a data source. For example, consider a file /// containing Ion data that is being parsed using an [`IonCReader`]. /// /// If a position is set, `bytes` will always be hydrated while `lines` and /// `offset` will only be populated for text readers. #[derive(Copy, Clone, Debug, PartialEq)] pub enum Position { Unknown, Offset(i64), OffsetLineColumn(i64, LineColumn), } // see above #[derive(Copy, Clone, Debug, PartialEq)] pub struct LineColumn(pub i32, pub i32); impl Position { /// Make a new position based on Ion text data. `line` and `offset` are /// known for text sources. pub fn text(bytes: i64, line: i32, offset: i32) -> Position { Position::OffsetLineColumn(bytes, LineColumn(line, offset)) } /// Make a new position based on Ion binary data. Only the byte offset is /// known. pub fn binary(bytes: i64) -> Position { Position::Offset(bytes) } } impl IonCError { /// Constructs an `IonCError` from an `iERR` error code. pub fn from(code: i32) -> Self { Self::with_additional(code, "iERR Result") } /// Constructs an `IonCError` from an `iERR` error code and its own message pub fn with_additional(code: i32, additional: &'static str) -> Self { match code { ion_error_code_IERR_NOT_IMPL..=ion_error_code_IERR_INVALID_LOB_TERMINATOR => { unsafe { // this gives us static storage pointer so it doesn't violate lifetime let c_str = CStr::from_ptr(ion_error_to_str(code)); // the error codes are all ASCII so a panic here is a bug let message = c_str.to_str().unwrap(); Self { code, message, additional, position: Position::Unknown, } } } _ => Self { code, message: "Unknown Ion C Error Code", additional, position: Position::Unknown, }, } } /// Adds a `Position` to an existing `IonCError`. pub fn with_position(mut self, pos: Position) -> Self { self.position = pos; self } } impl fmt::Display for IonCError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "Error {}: {} ({})", self.code, self.message, self.additional ) } } impl Error for IonCError {} impl From<TryFromIntError> for IonCError { /// Due to the way Ion C works with sizes as i32, it is convenient to be able to coerce /// a TryFromIntError to `IonCError`. fn from(_: TryFromIntError) -> Self { IonCError::from(ion_error_code_IERR_NUMERIC_OVERFLOW) } } impl From<Utf8Error> for IonCError { /// Due to the way Ion C works with raw UTF-8 byte sequences, it is convenient to be able /// to coerce a `Utf8Error` to `IonCError`. fn from(_: Utf8Error) -> Self { IonCError::from(ion_error_code_IERR_INVALID_UTF8) } } /// A type alias to results from Ion C API, the result value is generally `()` to signify /// `ion_error_code_IERR_OK` since Ion C doesn't return results but generally takes /// output parameters. pub type IonCResult<T> = Result<T, IonCError>; /// Macro to transform Ion C error code expressions into `Result<(), IonCError>`. /// Higher-level facades over Ion C functions could map this to `Result<T, IonCError>` /// or the like. /// /// NB: `ionc!` implies `unsafe` code. /// /// ## Usage /// /// ``` /// # use std::ptr; /// # use ion_c_sys::*; /// # use ion_c_sys::result::*; /// # fn main() -> IonCResult<()> { /// let mut data = String::from("42"); /// let mut ion_reader: hREADER = ptr::null_mut(); /// let mut ion_type: ION_TYPE = ptr::null_mut(); /// ionc!( /// ion_reader_open_buffer( /// &mut ion_reader, /// data.as_mut_ptr(), /// data.len() as i32, /// ptr::null_mut() /// ) /// )?; /// /// ionc!(ion_reader_next(ion_reader, &mut ion_type))?; /// assert_eq!(ion_type as u32, tid_INT_INT); /// /// let mut value = 0; /// ionc!(ion_reader_read_int64(ion_reader, &mut value))?; /// assert_eq!(value, 42); /// /// ionc!(ion_reader_close(ion_reader)) /// # } /// ``` #[macro_export] macro_rules! ionc { ($e:expr) => { unsafe { let err: i32 = $e; match err { $crate::ion_error_code_IERR_OK => Ok(()), code => Err($crate::result::IonCError::from(code)), } } }; }
30.150289
93
0.58819
89a9028782470c4daadcacb66eabb5350067d8ab
2,216
use crate::Test; use io_uring::{opcode, IoUring}; pub fn test_nop(ring: &mut IoUring, test: &Test) -> anyhow::Result<()> { require! { test; } println!("test nop"); let nop_e = opcode::Nop::new().build().user_data(0x42); unsafe { let mut queue = ring.submission(); queue.push(&nop_e).expect("queue is full"); } ring.submit_and_wait(1)?; let cqes = ring.completion().collect::<Vec<_>>(); assert_eq!(cqes.len(), 1); assert_eq!(cqes[0].user_data(), 0x42); assert_eq!(cqes[0].result(), 0); Ok(()) } #[cfg(feature = "unstable")] pub fn test_batch(ring: &mut IoUring, test: &Test) -> anyhow::Result<()> { use std::mem::MaybeUninit; require! { test; } println!("test batch"); assert!(ring.completion().is_empty()); unsafe { let sqes = vec![opcode::Nop::new().build().user_data(0x09); 5]; let mut sq = ring.submission(); assert_eq!(sq.capacity(), 8); sq.push_multiple(&sqes).unwrap(); assert_eq!(sq.len(), 5); let ret = sq.push_multiple(&sqes); assert!(ret.is_err()); assert_eq!(sq.len(), 5); sq.push_multiple(&sqes[..3]).unwrap(); } ring.submit_and_wait(8)?; let mut cqes = (0..10).map(|_| MaybeUninit::uninit()).collect::<Vec<_>>(); let cqes = ring.completion().fill(&mut cqes); assert_eq!(cqes.len(), 8); for entry in cqes { assert_eq!(entry.user_data(), 0x09); } Ok(()) } pub fn test_queue_split(ring: &mut IoUring, test: &Test) -> anyhow::Result<()> { require! { test; } println!("test queue_split"); let (submitter, mut sq, mut cq) = ring.split(); assert!(sq.is_empty()); for _ in 0..sq.capacity() { unsafe { sq.push(&opcode::Nop::new().build()).expect("queue is full"); } } assert!(sq.is_full()); sq.sync(); assert_eq!(submitter.submit()?, sq.capacity()); assert!(sq.is_full()); sq.sync(); assert!(sq.is_empty()); assert!(cq.is_empty()); cq.sync(); assert_eq!(cq.len(), sq.capacity()); assert_eq!(cq.by_ref().count(), sq.capacity()); cq.sync(); Ok(()) }
20.330275
80
0.546931