hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
abe94159888a6b6a31e6b06576bd3e3fba54fe34
2,150
use js_sys::{Function, Object, Promise}; use serde::Deserialize; use serde::Serialize; use wasm_bindgen::prelude::*; use wasm_bindgen::JsCast; #[wasm_bindgen] extern "C" { #[derive(Debug)] pub type Window; pub static window: Window; #[wasm_bindgen(method)] pub fn open(this: &Window, url: String) -> Window; } #[wasm_bindgen] extern "C" { pub type Browser; pub static browser: Browser; #[wasm_bindgen(method, getter)] pub fn windows(this: &Browser) -> Windows; #[wasm_bindgen(method, getter)] pub fn runtime(this: &Browser) -> Runtime; } #[wasm_bindgen] extern "C" { pub type Sidebar; #[wasm_bindgen(method, getter)] pub fn open(this: &Sidebar) -> Promise; } #[wasm_bindgen] extern "C" { pub type Windows; #[wasm_bindgen(method)] pub fn create(this: &Windows, info: &Object) -> Promise; } #[wasm_bindgen] extern "C" { pub type Runtime; #[wasm_bindgen(method, getter, js_name = onMessage)] pub fn on_message(this: &Runtime) -> Event; } #[wasm_bindgen] extern "C" { pub type Event; #[wasm_bindgen(method, js_name = addListener)] pub fn add_listener(this: &Event, callback: &Function); } #[wasm_bindgen(start)] pub fn main() { wasm_logger::init(wasm_logger::Config::new(log::Level::Debug)); log::info!("Hello World from Background Script"); let closure = Closure::wrap(Box::new(|msg: JsValue| { log::info!("Received: {}", msg.as_string().unwrap()); let popup = Popup { url: "popup.html".to_string(), type_: "popup".to_string(), height: 200, width: 200, }; let js_value = JsValue::from_serde(&popup).unwrap(); let object = Object::try_from(&js_value).unwrap(); let _x = browser.windows().create(&object); }) as Box<dyn FnMut(_)>); browser .runtime() .on_message() .add_listener(closure.as_ref().unchecked_ref()); closure.forget(); } #[derive(Serialize, Deserialize)] struct Popup { pub url: String, #[serde(rename = "type")] pub type_: String, pub height: u8, pub width: u8, }
21.938776
67
0.614884
fca0f231c2587900c82060922e316d93c8a43cb2
9,250
#[doc(hidden)] #[macro_export] macro_rules! fiat_define_weierstrass_curve { ($FE:ident) => { lazy_static! { static ref A: $FE = $FE::from_bytes(&A_BYTES).unwrap(); static ref B: $FE = $FE::from_bytes(&B_BYTES).unwrap(); static ref B3: $FE = $FE::from_bytes(&B3_BYTES).unwrap(); static ref GX: $FE = $FE::from_bytes(&GX_BYTES).unwrap(); static ref GY: $FE = $FE::from_bytes(&GY_BYTES).unwrap(); static ref ORDER: &'static [u8] = &ORDER_BYTES; } /// The Weierstrass elliptic curve object itself #[derive(Debug, Clone, Copy)] pub struct Curve; impl Curve { /// Get the group order as an array of bytes in big endian representation pub fn group_order(self) -> &'static [u8] { &ORDER } /// Return the generator field element in affine coordinate (X,Y) pub fn generator() -> (&'static $FE, &'static $FE) { (&GX, &GY) } } impl WeierstrassCurve for Curve { type FieldElement = $FE; fn a(self) -> &'static Self::FieldElement { &A } fn b(self) -> &'static Self::FieldElement { &B } fn b3(self) -> &'static Self::FieldElement { &B3 } } }; } #[doc(hidden)] #[macro_export] macro_rules! fiat_define_weierstrass_points { ($FE:ident) => { /// Affine Point on the curve of type (X,Y) /// /// Note that this representation cannot handle the point at infinity #[derive(Clone, Debug, PartialEq, Eq)] pub struct PointAffine(affine::Point<$FE>); /// Point on the curve using a more optimised representation /// /// This implementation used projective coordinate (X:Y:Z) #[derive(Clone, Debug, PartialEq, Eq)] pub struct Point(projective::Point<$FE>); impl PointAffine { /// Curve generator point in affine coordinate pub fn generator() -> Self { PointAffine(affine::Point { x: GX.clone(), y: GY.clone(), }) } /// Try to create an affine point with X, Y coordinates. /// /// check if the equation y^2 = x^3 + a*x + b (mod p) holds for this curve, if it doesn't /// None is returned pub fn from_coordinate(x: &FieldElement, y: &FieldElement) -> Option<Self> { affine::Point::from_coordinate(x, y, Curve).map(PointAffine) } /// Return the tuple of coordinate (x, y) associated with this /// affine point pub fn to_coordinate(&self) -> (&FieldElement, &FieldElement) { (&self.0.x, &self.0.y) } /// Double the affine point Self /// /// This is equivalent to Self + Self at the mathematic level, /// but is implemented more quickly than the normal addition /// of double possibly arbitrary point pub fn double(&self) -> PointAffine { PointAffine(affine::Point::double(&self.0, Curve)) } /// Turn an affine point into the X component and the sign of the Y component /// /// This is often refered as point compression, and related to the fact there /// two point on the curve for a valid x component as (x,y) and (x,-y), unless /// y is 0. So it is sufficient to know just the sign of y to know which point /// is in use for a given x component pub fn compress(&self) -> (&FieldElement, Sign) { self.0.compress() } /// Try to create an affine point given a X component and the sign /// of the Y component. /// /// This is often refered as point decompression pub fn decompress(x: &FieldElement, sign: Sign) -> Option<Self> { affine::Point::decompress(x, sign, Curve).map(PointAffine) } } impl<'a, 'b> std::ops::Add<&'b PointAffine> for &'a PointAffine { type Output = PointAffine; fn add(self, other: &'b PointAffine) -> PointAffine { PointAffine(&self.0 + &other.0) } } impl Point { /// Curve generator point pub fn generator() -> Self { Point(projective::Point { x: GX.clone(), y: GY.clone(), z: FieldElement::one(), }) } /// Point at infinity, used as additive zero pub fn infinity() -> Self { Point(projective::Point::infinity()) } /// Convert an affine point to optimised point representation /// /// In projective coordinate it means, (X,Y) => (X:Y:1) pub fn from_affine(p: &PointAffine) -> Self { Point(projective::Point::from_affine(&p.0)) } /// Convert a point to the affine point /// /// In projective coordinate it means, (X:Y:Z) => (X/Z, Y/Z) pub fn to_affine(&self) -> Option<PointAffine> { self.0.to_affine().map(PointAffine) } /// Normalize the point, keeping the same representation /// /// In projective coordinate it means, (X:Y:Z) => (X/Z:Y/Z:1) pub fn normalize(&mut self) { self.0.normalize() } } impl From<PointAffine> for Point { fn from(p: PointAffine) -> Self { Point(projective::Point::from_affine(&p.0)) } } impl From<&PointAffine> for Point { fn from(p: &PointAffine) -> Self { Point(projective::Point::from_affine(&p.0)) } } // ************* // Point Negation // ************* impl std::ops::Neg for Point { type Output = Point; fn neg(self) -> Self::Output { Point(self.0.neg()) } } impl<'a> std::ops::Neg for &'a Point { type Output = Point; fn neg(self) -> Self::Output { Point(self.0.clone().neg()) } } // ************* // Point Scaling // ************* // note that scalar multiplication is really defined for arbitrary scalar // (of any size), not just the *field element* scalar defined in F(p). // this semantic abuse makes it easier to use. impl<'a, 'b> std::ops::Mul<&'b Scalar> for &'a Point { type Output = Point; fn mul(self, other: &'b Scalar) -> Point { self.scale(other) //Point(self.0.scale_a0(&other.to_bytes(), Curve)) } } impl<'a, 'b> std::ops::Mul<&'b Point> for &'a Scalar { type Output = Point; fn mul(self, other: &'b Point) -> Point { other * self } } // ************** // Point Addition // ************** impl<'a, 'b> std::ops::Add<&'b Point> for &'a Point { type Output = Point; fn add(self, other: &'b Point) -> Point { self.add_or_double(other) } } impl<'b> std::ops::Add<&'b Point> for Point { type Output = Point; fn add(self, other: &'b Point) -> Point { &self + other } } impl<'a> std::ops::Add<Point> for &'a Point { type Output = Point; fn add(self, other: Point) -> Point { self + &other } } impl std::ops::Add<Point> for Point { type Output = Point; fn add(self, other: Point) -> Point { &self + &other } } impl<'a, 'b> std::ops::Sub<&'b Point> for &'a Point { type Output = Point; fn sub(self, other: &'b Point) -> Point { self + (-other) } } impl std::ops::Sub<Point> for Point { type Output = Point; fn sub(self, other: Point) -> Point { &self - &other } } }; } /* #[cfg(test)] mod tests { use super::*; #[test] fn bytes() { let b: [u8; 32] = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 7, 8, 9, 0, 1, 2, 3, 4, ]; let s = Scalar::from_bytes(&b).unwrap(); assert_eq!(b, s.to_bytes()) } #[test] fn bytes_u64() { let b: [u8; 32] = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xce, ]; let s1 = Scalar::from_bytes(&b).unwrap(); let s2 = Scalar::from_u64(0xce); assert_eq!(s1, s2) } } */
31.040268
101
0.464973
87645e20fabe8a3b78bc033654a1dc20dfa2383d
2,152
#![feature(test)] extern crate test; extern crate variable_size_byte_writer; use test::Bencher; use variable_size_byte_writer::*; #[bench] fn write_59bits_vec(bench: &mut Bencher) { let mut target = std::io::Cursor::new(vec![]); let mut writer = VariableSizeByteWriter::new(target); let bits = test::black_box(59); bench.iter(|| writer.write::<Max64>(0x7A2_5555_ABAB_FFFF, bits)); } #[bench] fn write_59bits_file(bench: &mut Bencher) { let _res = std::fs::create_dir("benches/temp"); let file = std::fs::File::create("benches/temp/write_64_file.temp").unwrap(); let mut writer = VariableSizeByteWriter::new(file); let bits = test::black_box(59); bench.iter(|| writer.write::<Max64>(0x7A2_5555_ABAB_FFFF, bits)); std::fs::remove_file("benches/temp/write_64_file.temp").unwrap(); } #[bench] fn write_21bits_vec(bench: &mut Bencher) { let mut target = std::io::Cursor::new(vec![]); let mut writer = VariableSizeByteWriter::new(target); let bits = test::black_box(21); bench.iter(|| writer.write::<Max24>(0x7_F1F0, bits)); } #[bench] fn write_21bits_file(bench: &mut Bencher) { let _res = std::fs::create_dir("benches/temp"); let file = std::fs::File::create("benches/temp/write_32_file.temp").unwrap(); let mut writer = VariableSizeByteWriter::new(file); let bits = test::black_box(21); bench.iter(|| writer.write::<Max24>(0x7_F1F0, bits)); std::fs::remove_file("benches/temp/write_32_file.temp").unwrap(); } #[bench] fn write_7bits_vec(bench: &mut Bencher) { let mut target = std::io::Cursor::new(vec![]); let mut writer = VariableSizeByteWriter::new(target); let bits = test::black_box(7); bench.iter(|| writer.write::<Max8>(0x1A, bits)); } #[bench] fn write_7bits_file(bench: &mut Bencher) { let _res = std::fs::create_dir("benches/temp"); let file = std::fs::File::create("benches/temp/write_8_file.temp").unwrap(); let mut writer = VariableSizeByteWriter::new(file); let bits = test::black_box(7); bench.iter(|| writer.write::<Max8>(0x1A, bits)); std::fs::remove_file("benches/temp/write_8_file.temp").unwrap(); }
30.742857
81
0.675186
ed43549d43f916988e123172d857d8852fea07d1
765
use rune::diagnostics::WarningDiagnosticKind::*; use rune::span; use rune_tests::*; #[test] fn test_let_pattern_might_panic() { assert_warnings! { r#"pub fn main() { let [0, 1, 3] = []; }"#, LetPatternMightPanic { span, .. } => { assert_eq!(span, span!(16, 35)); } }; } #[test] fn test_template_without_variables() { assert_warnings! { r#"pub fn main() { `Hello World` }"#, TemplateWithoutExpansions { span, .. } => { assert_eq!(span, span!(16, 29)); } }; } #[test] fn test_remove_variant_parens() { assert_warnings! { r#"pub fn main() { None() }"#, RemoveTupleCallParams { span, .. } => { assert_eq!(span, span!(16, 22)); } }; }
22.5
51
0.52549
1dc780d9484f1806dc27fbb7e4cfc33ca0412a5b
3,568
mod activity_tasks; mod child_workflows; mod determinism; mod local_activities; mod queries; mod replay_flag; mod retry; mod workers; mod workflow_cancels; mod workflow_tasks; use crate::{ errors::{PollActivityError, PollWfError}, pollers::MockManualGateway, test_help::{ build_fake_core, canned_histories, fake_sg_opts, hist_to_poll_resp, ResponseType, TEST_Q, }, Core, CoreInitOptionsBuilder, CoreSDK, WorkerConfigBuilder, }; use futures::FutureExt; use std::time::Duration; use temporal_sdk_core_protos::{ coresdk::workflow_completion::WfActivationCompletion, temporal::api::workflowservice::v1::PollActivityTaskQueueResponse, }; use tokio::{sync::Barrier, time::sleep}; #[tokio::test] async fn after_shutdown_server_is_not_polled() { let t = canned_histories::single_timer("fake_timer"); let core = build_fake_core("fake_wf_id", t, &[1]); let res = core.poll_workflow_activation(TEST_Q).await.unwrap(); assert_eq!(res.jobs.len(), 1); core.complete_workflow_activation(WfActivationCompletion::empty(TEST_Q, res.run_id)) .await .unwrap(); core.shutdown().await; assert_matches!( core.poll_workflow_activation(TEST_Q).await.unwrap_err(), PollWfError::ShutDown ); } // Better than cloning a billion arcs... lazy_static::lazy_static! { static ref BARR: Barrier = Barrier::new(3); } #[tokio::test] async fn shutdown_interrupts_both_polls() { let mut mock_gateway = MockManualGateway::new(); mock_gateway .expect_poll_activity_task() .times(1) .returning(move |_| { async move { BARR.wait().await; sleep(Duration::from_secs(1)).await; Ok(PollActivityTaskQueueResponse { task_token: vec![1], heartbeat_timeout: Some(Duration::from_secs(1).into()), ..Default::default() }) } .boxed() }); mock_gateway .expect_poll_workflow_task() .times(1) .returning(move |_, _| { async move { BARR.wait().await; sleep(Duration::from_secs(1)).await; let t = canned_histories::single_timer("hi"); Ok(hist_to_poll_resp( &t, "wf".to_string(), ResponseType::AllHistory, TEST_Q.to_string(), )) } .boxed() }); let core = CoreSDK::new( mock_gateway, CoreInitOptionsBuilder::default() .gateway_opts(fake_sg_opts()) .build() .unwrap(), ); core.register_worker( WorkerConfigBuilder::default() .task_queue(TEST_Q) // Need only 1 concurrent pollers for mock expectations to work here .max_concurrent_wft_polls(1_usize) .max_concurrent_at_polls(1_usize) .build() .unwrap(), ) .await .unwrap(); tokio::join! { async { assert_matches!(core.poll_activity_task(TEST_Q).await.unwrap_err(), PollActivityError::ShutDown); }, async { assert_matches!(core.poll_workflow_activation(TEST_Q).await.unwrap_err(), PollWfError::ShutDown); }, async { // Give polling a bit to get stuck, then shutdown BARR.wait().await; core.shutdown().await; } }; }
30.237288
97
0.580717
f8acc7bbc5cc6c7c5ac042cbb9fd201e8bbabb0e
2,500
pub mod rust; /// This trait is responsible for providing a list of members, /// which are directories to be linted against. pub trait Config { /// This function should return a list of relative paths /// a linter will iterate on. /// /// If only the current working directory must be checked, it must return `vec![".".to_string()]` /// /// If several directories must be checked, /// return their relative path as strings. /// /// For example if your current working directory is `foo`, /// and you want to check `./bar` and `./baz`, /// return `vec!["bar".to_string(), "baz".to_string()]` /// /// # Example with the root directory /// ``` /// # use cargo_scout_lib::config::Config; /// # struct CustomConfig{} /// # impl CustomConfig { /// # fn new() -> Self { /// # Self {} /// # } /// # } /// # // Your own implementation goes here /// # impl Config for CustomConfig { /// # fn members(&self) -> Vec<String> { /// # vec![".".to_string()] /// # } /// # } /// let config = CustomConfig::new(); /// // Only the current directory must be linted /// assert_eq!(vec![".".to_string()], config.members()); /// ``` /// /// # Example with two subdirectories /// ``` /// # use cargo_scout_lib::config::Config; /// # struct CustomConfig{} /// # impl CustomConfig { /// # fn new() -> Self { /// # Self {} /// # } /// # } /// # // Your own implementation goes here /// # impl Config for CustomConfig { /// # fn members(&self) -> Vec<String> { /// # vec!["foo".to_string(), "bar".to_string()] /// # } /// # } /// let config = CustomConfig::new(); /// // Directories ./foo and ./bar must be linted /// assert_eq!(vec!["foo".to_string(), "bar".to_string()], config.members()); /// ``` /// /// # Implementing your own Config /// ``` /// use cargo_scout_lib::config::Config; /// /// struct CustomConfig{} /// /// # impl CustomConfig { /// # fn new() -> Self { /// # Self {} /// # } /// # } /// impl Config for CustomConfig { /// fn members(&self) -> Vec<String> { /// // Your own code to fetch the list of /// // directories to iterate on goes here /// # vec![".".to_string()] /// } /// } /// ``` fn members(&self) -> Vec<String>; }
31.64557
101
0.4988
7a0a12925fdf55ff825e1b52d79af992099a068b
15,113
//! Errors, type aliases, and functions related to working with `Result`. use std::convert::From; use std::error::Error as StdError; use std::ffi::NulError; use std::fmt::{self, Display}; #[derive(Debug)] #[allow(clippy::enum_variant_names)] /// Represents all the ways that a query can fail. /// /// This type is not intended to be exhaustively matched, and new variants may /// be added in the future without a major version bump. #[non_exhaustive] pub enum Error { /// The query contained a nul byte. /// /// This should never occur in normal usage. InvalidCString(NulError), /// The database returned an error. /// /// While Diesel prevents almost all sources of runtime errors at compile /// time, it does not attempt to prevent 100% of them. Typically this error /// will occur from insert or update statements due to a constraint /// violation. DatabaseError( DatabaseErrorKind, Box<dyn DatabaseErrorInformation + Send + Sync>, ), /// No rows were returned by a query expected to return at least one row. /// /// This variant is only returned by [`get_result`] and [`first`]. [`load`] /// does not treat 0 rows as an error. If you would like to allow either 0 /// or 1 rows, call [`optional`] on the result. /// /// [`get_result`]: crate::query_dsl::RunQueryDsl::get_result() /// [`first`]: crate::query_dsl::RunQueryDsl::first() /// [`load`]: crate::query_dsl::RunQueryDsl::load() /// [`optional`]: OptionalExtension::optional NotFound, /// The query could not be constructed /// /// An example of when this error could occur is if you are attempting to /// construct an update statement with no changes (e.g. all fields on the /// struct are `None`). QueryBuilderError(Box<dyn StdError + Send + Sync>), /// An error occurred deserializing the data being sent to the database. /// /// Typically this error means that the stated type of the query is /// incorrect. An example of when this error might occur in normal usage is /// attempting to deserialize an infinite date into chrono. DeserializationError(Box<dyn StdError + Send + Sync>), /// An error occurred serializing the data being sent to the database. /// /// An example of when this error would be returned is if you attempted to /// serialize a `chrono::NaiveDate` earlier than the earliest date supported /// by PostgreSQL. SerializationError(Box<dyn StdError + Send + Sync>), /// An error occurred during the rollback of a transaction. /// /// An example of when this error would be returned is if a rollback has /// already be called on the current transaction. RollbackError(Box<Error>), /// Roll back the current transaction. /// /// You can return this variant inside of a transaction when you want to /// roll it back, but have no actual error to return. Diesel will never /// return this variant unless you gave it to us, and it can be safely /// ignored in error handling. RollbackTransaction, /// Attempted to perform an operation that cannot be done inside a transaction /// when a transaction was already open. AlreadyInTransaction, /// Attempted to perform an operation that can only be done inside a transaction /// when no transaction was open NotInTransaction, /// Transaction broken, likely due to a broken connection. No other operations are possible. BrokenTransaction, /// Commiting a transaction failed /// /// The transaction manager will try to perform /// a rollback in such cases. Indications about the success /// of this can be extracted from this error variant CommitTransactionFailed { /// Failure message of the commit attempt commit_error: Box<Error>, /// Outcome of the rollback attempt rollback_result: Box<QueryResult<()>>, }, } #[derive(Debug, Clone, Copy)] /// The kind of database error that occurred. /// /// This is not meant to exhaustively cover all possible errors, but is used to /// identify errors which are commonly recovered from programmatically. This enum /// is not intended to be exhaustively matched, and new variants may be added in /// the future without a major version bump. #[non_exhaustive] pub enum DatabaseErrorKind { /// A unique constraint was violated. UniqueViolation, /// A foreign key constraint was violated. ForeignKeyViolation, /// The query could not be sent to the database due to a protocol violation. /// /// An example of a case where this would occur is if you attempted to send /// a query with more than 65000 bind parameters using PostgreSQL. UnableToSendCommand, /// A serializable transaction failed to commit due to a read/write /// dependency on a concurrent transaction. /// /// Corresponds to SQLSTATE code 40001 /// /// This error is only detected for PostgreSQL, as we do not yet support /// transaction isolation levels for other backends. SerializationFailure, /// The command could not be completed because the transaction was read /// only. /// /// This error will also be returned for `SELECT` statements which attempted /// to lock the rows. ReadOnlyTransaction, /// A not null constraint was violated. NotNullViolation, /// A check constraint was violated. CheckViolation, /// The connection to the server was unexpectedly closed. /// /// This error is only detected for PostgreSQL and is emitted on a best-effort basis /// and may be missed. ClosedConnection, #[doc(hidden)] Unknown, // Match against _ instead, more variants may be added in the future } /// Information about an error that was returned by the database. pub trait DatabaseErrorInformation { /// The primary human-readable error message. Typically one line. fn message(&self) -> &str; /// An optional secondary error message providing more details about the /// problem, if it was provided by the database. Might span multiple lines. fn details(&self) -> Option<&str>; /// An optional suggestion of what to do about the problem, if one was /// provided by the database. fn hint(&self) -> Option<&str>; /// The name of the table the error was associated with, if the error was /// associated with a specific table and the backend supports retrieving /// that information. /// /// Currently this method will return `None` for all backends other than /// PostgreSQL. fn table_name(&self) -> Option<&str>; /// The name of the column the error was associated with, if the error was /// associated with a specific column and the backend supports retrieving /// that information. /// /// Currently this method will return `None` for all backends other than /// PostgreSQL. fn column_name(&self) -> Option<&str>; /// The constraint that was violated if this error is a constraint violation /// and the backend supports retrieving that information. /// /// Currently this method will return `None` for all backends other than /// PostgreSQL. fn constraint_name(&self) -> Option<&str>; /// An optional integer indicating an error cursor position as an index into /// the original statement string. fn statement_position(&self) -> Option<i32>; } impl fmt::Debug for dyn DatabaseErrorInformation + Send + Sync { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&self.message(), f) } } impl DatabaseErrorInformation for String { fn message(&self) -> &str { self } fn details(&self) -> Option<&str> { None } fn hint(&self) -> Option<&str> { None } fn table_name(&self) -> Option<&str> { None } fn column_name(&self) -> Option<&str> { None } fn constraint_name(&self) -> Option<&str> { None } fn statement_position(&self) -> Option<i32> { None } } /// Errors which can occur during [`Connection::establish`] /// /// [`Connection::establish`]: crate::connection::Connection::establish #[derive(Debug, PartialEq)] #[non_exhaustive] pub enum ConnectionError { /// The connection URL contained a `NUL` byte. InvalidCString(NulError), /// The database returned an error. BadConnection(String), /// The connection URL could not be parsed. InvalidConnectionUrl(String), /// Diesel could not configure the database connection. /// /// Diesel may try to automatically set session specific configuration /// values, such as UTF8 encoding, or enabling the `||` operator on MySQL. /// This variant is returned if an error occurred executing the query to set /// those options. Diesel will never affect global configuration. CouldntSetupConfiguration(Error), } /// A specialized result type for queries. /// /// This type is exported by `diesel::prelude`, and is generally used by any /// code which is interacting with Diesel. This type exists to avoid writing out /// `diesel::result::Error`, and is otherwise a direct mapping to `Result`. pub type QueryResult<T> = Result<T, Error>; /// A specialized result type for establishing connections. /// /// This type exists to avoid writing out `diesel::result::ConnectionError`, and /// is otherwise a direct mapping to `Result`. pub type ConnectionResult<T> = Result<T, ConnectionError>; /// See the [method documentation](OptionalExtension::optional). pub trait OptionalExtension<T> { /// Converts a `QueryResult<T>` into a `QueryResult<Option<T>>`. /// /// By default, Diesel treats 0 rows being returned from a query that is expected to return 1 /// row as an error (e.g. the return value of [`get_result`] or [`first`]). This method will /// handle that error, and give you back an `Option<T>` instead. /// /// [`get_result`]: crate::query_dsl::RunQueryDsl::get_result() /// [`first`]: crate::query_dsl::RunQueryDsl::first() /// /// # Example /// /// ```rust /// use diesel::{QueryResult, NotFound, OptionalExtension}; /// /// let result: QueryResult<i32> = Ok(1); /// assert_eq!(Ok(Some(1)), result.optional()); /// /// let result: QueryResult<i32> = Err(NotFound); /// assert_eq!(Ok(None), result.optional()); /// ``` fn optional(self) -> Result<Option<T>, Error>; } impl<T> OptionalExtension<T> for QueryResult<T> { fn optional(self) -> Result<Option<T>, Error> { match self { Ok(value) => Ok(Some(value)), Err(Error::NotFound) => Ok(None), Err(e) => Err(e), } } } impl From<NulError> for ConnectionError { fn from(e: NulError) -> Self { ConnectionError::InvalidCString(e) } } impl From<NulError> for Error { fn from(e: NulError) -> Self { Error::InvalidCString(e) } } impl Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Error::InvalidCString(ref nul_err) => write!(f, "{}", nul_err), Error::DatabaseError(_, ref e) => write!(f, "{}", e.message()), Error::NotFound => f.write_str("Record not found"), Error::QueryBuilderError(ref e) => e.fmt(f), Error::DeserializationError(ref e) => e.fmt(f), Error::SerializationError(ref e) => e.fmt(f), Error::RollbackError(ref e) => e.fmt(f), Error::RollbackTransaction => write!(f, "The current transaction was aborted"), Error::BrokenTransaction => write!(f, "The current transaction is broken"), Error::AlreadyInTransaction => write!( f, "Cannot perform this operation while a transaction is open", ), Error::NotInTransaction => { write!(f, "Cannot perform this operation outside of a transaction",) } Error::CommitTransactionFailed { ref commit_error, ref rollback_result, } => { write!( f, "Commiting the current transaction failed: {}", commit_error )?; match &**rollback_result { Ok(()) => write!(f, " Rollback attempt was succesful"), Err(e) => write!(f, " Rollback attempt failed with {}", e), } } } } } impl StdError for Error { fn cause(&self) -> Option<&dyn StdError> { match *self { Error::InvalidCString(ref e) => Some(e), Error::QueryBuilderError(ref e) => Some(&**e), Error::DeserializationError(ref e) => Some(&**e), Error::SerializationError(ref e) => Some(&**e), _ => None, } } } impl Display for ConnectionError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { ConnectionError::InvalidCString(ref nul_err) => nul_err.fmt(f), ConnectionError::BadConnection(ref s) => write!(f, "{}", s), ConnectionError::InvalidConnectionUrl(ref s) => write!(f, "{}", s), ConnectionError::CouldntSetupConfiguration(ref e) => e.fmt(f), } } } impl StdError for ConnectionError { fn cause(&self) -> Option<&dyn StdError> { match *self { ConnectionError::InvalidCString(ref e) => Some(e), ConnectionError::CouldntSetupConfiguration(ref e) => Some(e), _ => None, } } } impl PartialEq for Error { fn eq(&self, other: &Error) -> bool { match (self, other) { (&Error::InvalidCString(ref a), &Error::InvalidCString(ref b)) => a == b, (&Error::DatabaseError(_, ref a), &Error::DatabaseError(_, ref b)) => { a.message() == b.message() } (&Error::NotFound, &Error::NotFound) => true, (&Error::RollbackTransaction, &Error::RollbackTransaction) => true, (&Error::AlreadyInTransaction, &Error::AlreadyInTransaction) => true, _ => false, } } } #[cfg(test)] #[allow(warnings)] fn error_impls_send() { let err: Error = unimplemented!(); let x: &Send = &err; } /// An unexpected `NULL` was encountered during deserialization #[derive(Debug, Clone, Copy)] pub struct UnexpectedNullError; impl fmt::Display for UnexpectedNullError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Unexpected null for non-null column") } } impl StdError for UnexpectedNullError {} /// Expected more fields then present in the current row while deserialising results #[derive(Debug, Clone, Copy)] pub struct UnexpectedEndOfRow; impl fmt::Display for UnexpectedEndOfRow { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Unexpected end of row") } } impl StdError for UnexpectedEndOfRow {}
35.728132
97
0.634619
395d584c434068cf0709adc1fcb62aa34a2df37e
4,009
//! Non-interactive Zero Knowledge proof for correct Hybrid //! decryption key generation. We use the notation and scheme //! presented in Figure 5 of the Treasury voting protocol spec. //! //! The proof is the following: //! //! `NIZK{(pk, C = (C1, C2), D), (sk): D = C1^sk AND pk = g^sk}` //! //! which is a proof of discrete log equality. We can therefore prove //! correct decryption using a proof of discrete log equality. use crate::cryptography::dl_equality::DleqZkp; use crate::cryptography::elgamal::{HybridCiphertext, SymmetricKey}; use crate::dkg::procedure_keys::{MemberCommunicationKey, MemberCommunicationPublicKey}; use crate::errors::ProofError; use crate::traits::{PrimeGroupElement, Scalar}; use rand_core::{CryptoRng, RngCore}; /// Proof of correct decryption. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Zkp<G: PrimeGroupElement> { hybrid_dec_key_proof: DleqZkp<G>, } impl<G> Zkp<G> where G: PrimeGroupElement, { /// Generate a decryption zero knowledge proof. pub fn generate<R>( c: &HybridCiphertext<G>, pk: &MemberCommunicationPublicKey<G>, symmetric_key: &SymmetricKey<G>, sk: &MemberCommunicationKey<G>, rng: &mut R, ) -> Self where R: CryptoRng + RngCore, [(); G::SIZE]: , { let hybrid_dec_key_proof = DleqZkp::generate( &G::generator(), &c.e1, &pk.0.pk, &symmetric_key.group_repr, &sk.0.sk, rng, ); Zkp { hybrid_dec_key_proof, } } /// Verify a decryption zero knowledge proof pub fn verify( &self, c: &HybridCiphertext<G>, symmetric_key: &SymmetricKey<G>, pk: &MemberCommunicationPublicKey<G>, ) -> Result<(), ProofError> where [(); G::SIZE]: , { self.hybrid_dec_key_proof.verify( &G::generator(), &c.e1, &pk.0.pk, &symmetric_key.group_repr, ) } pub fn to_bytes(&self) -> [u8; 2 * G::SIZE] where [(); <G::CorrespondingScalar as Scalar>::SIZE]: , { self.hybrid_dec_key_proof.to_bytes() } pub fn from_bytes(bytes: &[u8]) -> Option<Self> { DleqZkp::from_bytes(bytes).map(|x| Self { hybrid_dec_key_proof: x, }) } } #[cfg(test)] mod tests { use super::*; use curve25519_dalek::ristretto::RistrettoPoint; use rand_chacha::ChaCha20Rng; use rand_core::SeedableRng; #[test] pub fn it_works() { let mut r = ChaCha20Rng::from_seed([0u8; 32]); let comm_key = MemberCommunicationKey::<RistrettoPoint>::new(&mut r); let comm_pkey = comm_key.to_public(); let plaintext = [10u8; 43]; let ciphertext = comm_pkey.hybrid_encrypt(&plaintext, &mut r); let decryption_key = comm_key.0.recover_symmetric_key(&ciphertext); let proof = Zkp::generate(&ciphertext, &comm_pkey, &decryption_key, &comm_key, &mut r); assert!(proof .verify(&ciphertext, &decryption_key, &comm_pkey) .is_ok()) } #[test] fn serialisation() { let mut r = ChaCha20Rng::from_seed([0u8; 32]); let comm_key = MemberCommunicationKey::<RistrettoPoint>::new(&mut r); let comm_pkey = comm_key.to_public(); let plaintext = [10u8; 43]; let ciphertext = comm_pkey.hybrid_encrypt(&plaintext, &mut r); let decryption_key = comm_key.0.recover_symmetric_key(&ciphertext); let proof = Zkp::<RistrettoPoint>::generate( &ciphertext, &comm_pkey, &decryption_key, &comm_key, &mut r, ); let bytes = proof.to_bytes(); let deserialised = Zkp::from_bytes(&bytes); assert!(deserialised.is_some()); let unwrapped = deserialised.unwrap(); assert!(unwrapped .verify(&ciphertext, &decryption_key, &comm_pkey) .is_ok()) } }
28.841727
95
0.594662
f7c090b5135e9b5354a12386ac8344b36c96af3d
74,229
use super::{Parser, PResult, Restrictions, PrevTokenKind, TokenType, PathStyle}; use super::{BlockMode, SemiColonMode}; use super::{SeqSep, TokenExpectType}; use super::pat::{GateOr, PARAM_EXPECTED}; use crate::maybe_recover_from_interpolated_ty_qpath; use crate::ptr::P; use crate::ast::{self, Attribute, AttrStyle, Ident, CaptureBy, BlockCheckMode}; use crate::ast::{Expr, ExprKind, RangeLimits, Label, Movability, IsAsync, Arm}; use crate::ast::{Ty, TyKind, FunctionRetTy, Arg, FnDecl}; use crate::ast::{BinOpKind, BinOp, UnOp}; use crate::ast::{Mac, AnonConst, Field}; use crate::parse::classify; use crate::parse::token::{self, Token}; use crate::parse::diagnostics::{Error}; use crate::print::pprust; use crate::source_map::{self, Span}; use crate::symbol::{kw, sym}; use crate::util::parser::{AssocOp, Fixity, prec_let_scrutinee_needs_par}; use std::mem; use errors::Applicability; use rustc_data_structures::thin_vec::ThinVec; /// Possibly accepts an `token::Interpolated` expression (a pre-parsed expression /// dropped into the token stream, which happens while parsing the result of /// macro expansion). Placement of these is not as complex as I feared it would /// be. The important thing is to make sure that lookahead doesn't balk at /// `token::Interpolated` tokens. macro_rules! maybe_whole_expr { ($p:expr) => { if let token::Interpolated(nt) = &$p.token.kind { match &**nt { token::NtExpr(e) | token::NtLiteral(e) => { let e = e.clone(); $p.bump(); return Ok(e); } token::NtPath(path) => { let path = path.clone(); $p.bump(); return Ok($p.mk_expr( $p.token.span, ExprKind::Path(None, path), ThinVec::new() )); } token::NtBlock(block) => { let block = block.clone(); $p.bump(); return Ok($p.mk_expr( $p.token.span, ExprKind::Block(block, None), ThinVec::new() )); } // N.B: `NtIdent(ident)` is normalized to `Ident` in `fn bump`. _ => {}, }; } } } #[derive(Debug)] pub(super) enum LhsExpr { NotYetParsed, AttributesParsed(ThinVec<Attribute>), AlreadyParsed(P<Expr>), } impl From<Option<ThinVec<Attribute>>> for LhsExpr { fn from(o: Option<ThinVec<Attribute>>) -> Self { if let Some(attrs) = o { LhsExpr::AttributesParsed(attrs) } else { LhsExpr::NotYetParsed } } } impl From<P<Expr>> for LhsExpr { fn from(expr: P<Expr>) -> Self { LhsExpr::AlreadyParsed(expr) } } impl<'a> Parser<'a> { /// Parses an expression. #[inline] pub fn parse_expr(&mut self) -> PResult<'a, P<Expr>> { self.parse_expr_res(Restrictions::empty(), None) } fn parse_paren_expr_seq(&mut self) -> PResult<'a, Vec<P<Expr>>> { self.parse_paren_comma_seq(|p| { match p.parse_expr() { Ok(expr) => Ok(expr), Err(mut err) => match p.token.kind { token::Ident(name, false) if name == kw::Underscore && p.look_ahead(1, |t| { t == &token::Comma }) => { // Special-case handling of `foo(_, _, _)` err.emit(); let sp = p.token.span; p.bump(); Ok(p.mk_expr(sp, ExprKind::Err, ThinVec::new())) } _ => Err(err), }, } }).map(|(r, _)| r) } /// Parses an expression, subject to the given restrictions. #[inline] pub(super) fn parse_expr_res( &mut self, r: Restrictions, already_parsed_attrs: Option<ThinVec<Attribute>> ) -> PResult<'a, P<Expr>> { self.with_res(r, |this| this.parse_assoc_expr(already_parsed_attrs)) } /// Parses an associative expression. /// /// This parses an expression accounting for associativity and precedence of the operators in /// the expression. #[inline] fn parse_assoc_expr( &mut self, already_parsed_attrs: Option<ThinVec<Attribute>>, ) -> PResult<'a, P<Expr>> { self.parse_assoc_expr_with(0, already_parsed_attrs.into()) } /// Parses an associative expression with operators of at least `min_prec` precedence. pub(super) fn parse_assoc_expr_with( &mut self, min_prec: usize, lhs: LhsExpr, ) -> PResult<'a, P<Expr>> { let mut lhs = if let LhsExpr::AlreadyParsed(expr) = lhs { expr } else { let attrs = match lhs { LhsExpr::AttributesParsed(attrs) => Some(attrs), _ => None, }; if [token::DotDot, token::DotDotDot, token::DotDotEq].contains(&self.token.kind) { return self.parse_prefix_range_expr(attrs); } else { self.parse_prefix_expr(attrs)? } }; let last_type_ascription_set = self.last_type_ascription.is_some(); match (self.expr_is_complete(&lhs), AssocOp::from_token(&self.token)) { (true, None) => { self.last_type_ascription = None; // Semi-statement forms are odd. See https://github.com/rust-lang/rust/issues/29071 return Ok(lhs); } (false, _) => {} // continue parsing the expression // An exhaustive check is done in the following block, but these are checked first // because they *are* ambiguous but also reasonable looking incorrect syntax, so we // want to keep their span info to improve diagnostics in these cases in a later stage. (true, Some(AssocOp::Multiply)) | // `{ 42 } *foo = bar;` or `{ 42 } * 3` (true, Some(AssocOp::Subtract)) | // `{ 42 } -5` (true, Some(AssocOp::LAnd)) | // `{ 42 } &&x` (#61475) (true, Some(AssocOp::Add)) // `{ 42 } + 42 // If the next token is a keyword, then the tokens above *are* unambiguously incorrect: // `if x { a } else { b } && if y { c } else { d }` if !self.look_ahead(1, |t| t.is_reserved_ident()) => { self.last_type_ascription = None; // These cases are ambiguous and can't be identified in the parser alone let sp = self.sess.source_map().start_point(self.token.span); self.sess.ambiguous_block_expr_parse.borrow_mut().insert(sp, lhs.span); return Ok(lhs); } (true, Some(ref op)) if !op.can_continue_expr_unambiguously() => { self.last_type_ascription = None; return Ok(lhs); } (true, Some(_)) => { // We've found an expression that would be parsed as a statement, but the next // token implies this should be parsed as an expression. // For example: `if let Some(x) = x { x } else { 0 } / 2` let mut err = self.struct_span_err(self.token.span, &format!( "expected expression, found `{}`", pprust::token_to_string(&self.token), )); err.span_label(self.token.span, "expected expression"); self.sess.expr_parentheses_needed( &mut err, lhs.span, Some(pprust::expr_to_string(&lhs), )); err.emit(); } } self.expected_tokens.push(TokenType::Operator); while let Some(op) = AssocOp::from_token(&self.token) { // Adjust the span for interpolated LHS to point to the `$lhs` token and not to what // it refers to. Interpolated identifiers are unwrapped early and never show up here // as `PrevTokenKind::Interpolated` so if LHS is a single identifier we always process // it as "interpolated", it doesn't change the answer for non-interpolated idents. let lhs_span = match (self.prev_token_kind, &lhs.node) { (PrevTokenKind::Interpolated, _) => self.prev_span, (PrevTokenKind::Ident, &ExprKind::Path(None, ref path)) if path.segments.len() == 1 => self.prev_span, _ => lhs.span, }; let cur_op_span = self.token.span; let restrictions = if op.is_assign_like() { self.restrictions & Restrictions::NO_STRUCT_LITERAL } else { self.restrictions }; let prec = op.precedence(); if prec < min_prec { break; } // Check for deprecated `...` syntax if self.token == token::DotDotDot && op == AssocOp::DotDotEq { self.err_dotdotdot_syntax(self.token.span); } if self.token == token::LArrow { self.err_larrow_operator(self.token.span); } self.bump(); if op.is_comparison() { self.check_no_chained_comparison(&lhs, &op); } // Special cases: if op == AssocOp::As { lhs = self.parse_assoc_op_cast(lhs, lhs_span, ExprKind::Cast)?; continue } else if op == AssocOp::Colon { let maybe_path = self.could_ascription_be_path(&lhs.node); self.last_type_ascription = Some((self.prev_span, maybe_path)); lhs = self.parse_assoc_op_cast(lhs, lhs_span, ExprKind::Type)?; continue } else if op == AssocOp::DotDot || op == AssocOp::DotDotEq { // If we didn’t have to handle `x..`/`x..=`, it would be pretty easy to // generalise it to the Fixity::None code. // // We have 2 alternatives here: `x..y`/`x..=y` and `x..`/`x..=` The other // two variants are handled with `parse_prefix_range_expr` call above. let rhs = if self.is_at_start_of_range_notation_rhs() { Some(self.parse_assoc_expr_with(prec + 1, LhsExpr::NotYetParsed)?) } else { None }; let (lhs_span, rhs_span) = (lhs.span, if let Some(ref x) = rhs { x.span } else { cur_op_span }); let limits = if op == AssocOp::DotDot { RangeLimits::HalfOpen } else { RangeLimits::Closed }; let r = self.mk_range(Some(lhs), rhs, limits)?; lhs = self.mk_expr(lhs_span.to(rhs_span), r, ThinVec::new()); break } let fixity = op.fixity(); let prec_adjustment = match fixity { Fixity::Right => 0, Fixity::Left => 1, // We currently have no non-associative operators that are not handled above by // the special cases. The code is here only for future convenience. Fixity::None => 1, }; let rhs = self.with_res( restrictions - Restrictions::STMT_EXPR, |this| this.parse_assoc_expr_with(prec + prec_adjustment, LhsExpr::NotYetParsed) )?; // Make sure that the span of the parent node is larger than the span of lhs and rhs, // including the attributes. let lhs_span = lhs .attrs .iter() .filter(|a| a.style == AttrStyle::Outer) .next() .map_or(lhs_span, |a| a.span); let span = lhs_span.to(rhs.span); lhs = match op { AssocOp::Add | AssocOp::Subtract | AssocOp::Multiply | AssocOp::Divide | AssocOp::Modulus | AssocOp::LAnd | AssocOp::LOr | AssocOp::BitXor | AssocOp::BitAnd | AssocOp::BitOr | AssocOp::ShiftLeft | AssocOp::ShiftRight | AssocOp::Equal | AssocOp::Less | AssocOp::LessEqual | AssocOp::NotEqual | AssocOp::Greater | AssocOp::GreaterEqual => { let ast_op = op.to_ast_binop().unwrap(); let binary = self.mk_binary(source_map::respan(cur_op_span, ast_op), lhs, rhs); self.mk_expr(span, binary, ThinVec::new()) } AssocOp::Assign => self.mk_expr(span, ExprKind::Assign(lhs, rhs), ThinVec::new()), AssocOp::AssignOp(k) => { let aop = match k { token::Plus => BinOpKind::Add, token::Minus => BinOpKind::Sub, token::Star => BinOpKind::Mul, token::Slash => BinOpKind::Div, token::Percent => BinOpKind::Rem, token::Caret => BinOpKind::BitXor, token::And => BinOpKind::BitAnd, token::Or => BinOpKind::BitOr, token::Shl => BinOpKind::Shl, token::Shr => BinOpKind::Shr, }; let aopexpr = self.mk_assign_op(source_map::respan(cur_op_span, aop), lhs, rhs); self.mk_expr(span, aopexpr, ThinVec::new()) } AssocOp::As | AssocOp::Colon | AssocOp::DotDot | AssocOp::DotDotEq => { self.bug("AssocOp should have been handled by special case") } }; if let Fixity::None = fixity { break } } if last_type_ascription_set { self.last_type_ascription = None; } Ok(lhs) } /// Checks if this expression is a successfully parsed statement. fn expr_is_complete(&self, e: &Expr) -> bool { self.restrictions.contains(Restrictions::STMT_EXPR) && !classify::expr_requires_semi_to_be_stmt(e) } fn is_at_start_of_range_notation_rhs(&self) -> bool { if self.token.can_begin_expr() { // parse `for i in 1.. { }` as infinite loop, not as `for i in (1..{})`. if self.token == token::OpenDelim(token::Brace) { return !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL); } true } else { false } } /// Parse prefix-forms of range notation: `..expr`, `..`, `..=expr` fn parse_prefix_range_expr( &mut self, already_parsed_attrs: Option<ThinVec<Attribute>> ) -> PResult<'a, P<Expr>> { // Check for deprecated `...` syntax if self.token == token::DotDotDot { self.err_dotdotdot_syntax(self.token.span); } debug_assert!([token::DotDot, token::DotDotDot, token::DotDotEq].contains(&self.token.kind), "parse_prefix_range_expr: token {:?} is not DotDot/DotDotEq", self.token); let tok = self.token.clone(); let attrs = self.parse_or_use_outer_attributes(already_parsed_attrs)?; let lo = self.token.span; let mut hi = self.token.span; self.bump(); let opt_end = if self.is_at_start_of_range_notation_rhs() { // RHS must be parsed with more associativity than the dots. let next_prec = AssocOp::from_token(&tok).unwrap().precedence() + 1; Some(self.parse_assoc_expr_with(next_prec, LhsExpr::NotYetParsed) .map(|x| { hi = x.span; x })?) } else { None }; let limits = if tok == token::DotDot { RangeLimits::HalfOpen } else { RangeLimits::Closed }; let r = self.mk_range(None, opt_end, limits)?; Ok(self.mk_expr(lo.to(hi), r, attrs)) } /// Parse a prefix-unary-operator expr fn parse_prefix_expr( &mut self, already_parsed_attrs: Option<ThinVec<Attribute>> ) -> PResult<'a, P<Expr>> { let attrs = self.parse_or_use_outer_attributes(already_parsed_attrs)?; let lo = self.token.span; // Note: when adding new unary operators, don't forget to adjust TokenKind::can_begin_expr() let (hi, ex) = match self.token.kind { token::Not => { self.bump(); let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; (lo.to(span), self.mk_unary(UnOp::Not, e)) } // Suggest `!` for bitwise negation when encountering a `~` token::Tilde => { self.bump(); let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; let span_of_tilde = lo; self.struct_span_err(span_of_tilde, "`~` cannot be used as a unary operator") .span_suggestion_short( span_of_tilde, "use `!` to perform bitwise negation", "!".to_owned(), Applicability::MachineApplicable ) .emit(); (lo.to(span), self.mk_unary(UnOp::Not, e)) } token::BinOp(token::Minus) => { self.bump(); let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; (lo.to(span), self.mk_unary(UnOp::Neg, e)) } token::BinOp(token::Star) => { self.bump(); let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; (lo.to(span), self.mk_unary(UnOp::Deref, e)) } token::BinOp(token::And) | token::AndAnd => { self.expect_and()?; let m = self.parse_mutability(); let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; (lo.to(span), ExprKind::AddrOf(m, e)) } token::Ident(..) if self.token.is_keyword(kw::Box) => { self.bump(); let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; (lo.to(span), ExprKind::Box(e)) } token::Ident(..) if self.token.is_ident_named(sym::not) => { // `not` is just an ordinary identifier in Rust-the-language, // but as `rustc`-the-compiler, we can issue clever diagnostics // for confused users who really want to say `!` let token_cannot_continue_expr = |t: &Token| match t.kind { // These tokens can start an expression after `!`, but // can't continue an expression after an ident token::Ident(name, is_raw) => token::ident_can_begin_expr(name, t.span, is_raw), token::Literal(..) | token::Pound => true, _ => t.is_whole_expr(), }; let cannot_continue_expr = self.look_ahead(1, token_cannot_continue_expr); if cannot_continue_expr { self.bump(); // Emit the error ... self.struct_span_err( self.token.span, &format!("unexpected {} after identifier",self.this_token_descr()) ) .span_suggestion_short( // Span the `not` plus trailing whitespace to avoid // trailing whitespace after the `!` in our suggestion self.sess.source_map() .span_until_non_whitespace(lo.to(self.token.span)), "use `!` to perform logical negation", "!".to_owned(), Applicability::MachineApplicable ) .emit(); // —and recover! (just as if we were in the block // for the `token::Not` arm) let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; (lo.to(span), self.mk_unary(UnOp::Not, e)) } else { return self.parse_dot_or_call_expr(Some(attrs)); } } _ => { return self.parse_dot_or_call_expr(Some(attrs)); } }; return Ok(self.mk_expr(lo.to(hi), ex, attrs)); } /// Returns the span of expr, if it was not interpolated or the span of the interpolated token. fn interpolated_or_expr_span( &self, expr: PResult<'a, P<Expr>>, ) -> PResult<'a, (Span, P<Expr>)> { expr.map(|e| { if self.prev_token_kind == PrevTokenKind::Interpolated { (self.prev_span, e) } else { (e.span, e) } }) } fn parse_assoc_op_cast(&mut self, lhs: P<Expr>, lhs_span: Span, expr_kind: fn(P<Expr>, P<Ty>) -> ExprKind) -> PResult<'a, P<Expr>> { let mk_expr = |this: &mut Self, rhs: P<Ty>| { this.mk_expr(lhs_span.to(rhs.span), expr_kind(lhs, rhs), ThinVec::new()) }; // Save the state of the parser before parsing type normally, in case there is a // LessThan comparison after this cast. let parser_snapshot_before_type = self.clone(); match self.parse_ty_no_plus() { Ok(rhs) => { Ok(mk_expr(self, rhs)) } Err(mut type_err) => { // Rewind to before attempting to parse the type with generics, to recover // from situations like `x as usize < y` in which we first tried to parse // `usize < y` as a type with generic arguments. let parser_snapshot_after_type = self.clone(); mem::replace(self, parser_snapshot_before_type); match self.parse_path(PathStyle::Expr) { Ok(path) => { let (op_noun, op_verb) = match self.token.kind { token::Lt => ("comparison", "comparing"), token::BinOp(token::Shl) => ("shift", "shifting"), _ => { // We can end up here even without `<` being the next token, for // example because `parse_ty_no_plus` returns `Err` on keywords, // but `parse_path` returns `Ok` on them due to error recovery. // Return original error and parser state. mem::replace(self, parser_snapshot_after_type); return Err(type_err); } }; // Successfully parsed the type path leaving a `<` yet to parse. type_err.cancel(); // Report non-fatal diagnostics, keep `x as usize` as an expression // in AST and continue parsing. let msg = format!("`<` is interpreted as a start of generic \ arguments for `{}`, not a {}", path, op_noun); let span_after_type = parser_snapshot_after_type.token.span; let expr = mk_expr(self, P(Ty { span: path.span, node: TyKind::Path(None, path), id: ast::DUMMY_NODE_ID })); let expr_str = self.span_to_snippet(expr.span) .unwrap_or_else(|_| pprust::expr_to_string(&expr)); self.struct_span_err(self.token.span, &msg) .span_label( self.look_ahead(1, |t| t.span).to(span_after_type), "interpreted as generic arguments" ) .span_label(self.token.span, format!("not interpreted as {}", op_noun)) .span_suggestion( expr.span, &format!("try {} the cast value", op_verb), format!("({})", expr_str), Applicability::MachineApplicable ) .emit(); Ok(expr) } Err(mut path_err) => { // Couldn't parse as a path, return original error and parser state. path_err.cancel(); mem::replace(self, parser_snapshot_after_type); Err(type_err) } } } } } /// Parses `a.b` or `a(13)` or `a[4]` or just `a`. fn parse_dot_or_call_expr( &mut self, already_parsed_attrs: Option<ThinVec<Attribute>>, ) -> PResult<'a, P<Expr>> { let attrs = self.parse_or_use_outer_attributes(already_parsed_attrs)?; let b = self.parse_bottom_expr(); let (span, b) = self.interpolated_or_expr_span(b)?; self.parse_dot_or_call_expr_with(b, span, attrs) } pub(super) fn parse_dot_or_call_expr_with( &mut self, e0: P<Expr>, lo: Span, mut attrs: ThinVec<Attribute>, ) -> PResult<'a, P<Expr>> { // Stitch the list of outer attributes onto the return value. // A little bit ugly, but the best way given the current code // structure self.parse_dot_or_call_expr_with_(e0, lo).map(|expr| expr.map(|mut expr| { attrs.extend::<Vec<_>>(expr.attrs.into()); expr.attrs = attrs; match expr.node { ExprKind::If(..) if !expr.attrs.is_empty() => { // Just point to the first attribute in there... let span = expr.attrs[0].span; self.span_err(span, "attributes are not yet allowed on `if` expressions"); } _ => {} } expr }) ) } fn parse_dot_or_call_expr_with_(&mut self, e0: P<Expr>, lo: Span) -> PResult<'a, P<Expr>> { let mut e = e0; let mut hi; loop { // expr? while self.eat(&token::Question) { let hi = self.prev_span; e = self.mk_expr(lo.to(hi), ExprKind::Try(e), ThinVec::new()); } // expr.f if self.eat(&token::Dot) { match self.token.kind { token::Ident(..) => { e = self.parse_dot_suffix(e, lo)?; } token::Literal(token::Lit { kind: token::Integer, symbol, suffix }) => { let span = self.token.span; self.bump(); let field = ExprKind::Field(e, Ident::new(symbol, span)); e = self.mk_expr(lo.to(span), field, ThinVec::new()); self.expect_no_suffix(span, "a tuple index", suffix); } token::Literal(token::Lit { kind: token::Float, symbol, .. }) => { self.bump(); let fstr = symbol.as_str(); let msg = format!("unexpected token: `{}`", symbol); let mut err = self.diagnostic().struct_span_err(self.prev_span, &msg); err.span_label(self.prev_span, "unexpected token"); if fstr.chars().all(|x| "0123456789.".contains(x)) { let float = match fstr.parse::<f64>().ok() { Some(f) => f, None => continue, }; let sugg = pprust::to_string(|s| { s.popen(); s.print_expr(&e); s.s.word( "."); s.print_usize(float.trunc() as usize); s.pclose(); s.s.word("."); s.s.word(fstr.splitn(2, ".").last().unwrap().to_string()) }); err.span_suggestion( lo.to(self.prev_span), "try parenthesizing the first index", sugg, Applicability::MachineApplicable ); } return Err(err); } _ => { // FIXME Could factor this out into non_fatal_unexpected or something. let actual = self.this_token_to_string(); self.span_err(self.token.span, &format!("unexpected token: `{}`", actual)); } } continue; } if self.expr_is_complete(&e) { break; } match self.token.kind { // expr(...) token::OpenDelim(token::Paren) => { let seq = self.parse_paren_expr_seq().map(|es| { let nd = self.mk_call(e, es); let hi = self.prev_span; self.mk_expr(lo.to(hi), nd, ThinVec::new()) }); e = self.recover_seq_parse_error(token::Paren, lo, seq); } // expr[...] // Could be either an index expression or a slicing expression. token::OpenDelim(token::Bracket) => { self.bump(); let ix = self.parse_expr()?; hi = self.token.span; self.expect(&token::CloseDelim(token::Bracket))?; let index = self.mk_index(e, ix); e = self.mk_expr(lo.to(hi), index, ThinVec::new()) } _ => return Ok(e) } } return Ok(e); } /// Assuming we have just parsed `.`, continue parsing into an expression. fn parse_dot_suffix(&mut self, self_arg: P<Expr>, lo: Span) -> PResult<'a, P<Expr>> { if self.token.span.rust_2018() && self.eat_keyword(kw::Await) { return self.mk_await_expr(self_arg, lo); } let segment = self.parse_path_segment(PathStyle::Expr)?; self.check_trailing_angle_brackets(&segment, token::OpenDelim(token::Paren)); Ok(match self.token.kind { token::OpenDelim(token::Paren) => { // Method call `expr.f()` let mut args = self.parse_paren_expr_seq()?; args.insert(0, self_arg); let span = lo.to(self.prev_span); self.mk_expr(span, ExprKind::MethodCall(segment, args), ThinVec::new()) } _ => { // Field access `expr.f` if let Some(args) = segment.args { self.span_err(args.span(), "field expressions may not have generic arguments"); } let span = lo.to(self.prev_span); self.mk_expr(span, ExprKind::Field(self_arg, segment.ident), ThinVec::new()) } }) } /// At the bottom (top?) of the precedence hierarchy, /// Parses things like parenthesized exprs, macros, `return`, etc. /// /// N.B., this does not parse outer attributes, and is private because it only works /// correctly if called from `parse_dot_or_call_expr()`. fn parse_bottom_expr(&mut self) -> PResult<'a, P<Expr>> { maybe_recover_from_interpolated_ty_qpath!(self, true); maybe_whole_expr!(self); // Outer attributes are already parsed and will be // added to the return value after the fact. // // Therefore, prevent sub-parser from parsing // attributes by giving them a empty "already parsed" list. let mut attrs = ThinVec::new(); let lo = self.token.span; let mut hi = self.token.span; let ex: ExprKind; macro_rules! parse_lit { () => { match self.parse_lit() { Ok(literal) => { hi = self.prev_span; ex = ExprKind::Lit(literal); } Err(mut err) => { self.cancel(&mut err); return Err(self.expected_expression_found()); } } } } // Note: when adding new syntax here, don't forget to adjust TokenKind::can_begin_expr(). match self.token.kind { // This match arm is a special-case of the `_` match arm below and // could be removed without changing functionality, but it's faster // to have it here, especially for programs with large constants. token::Literal(_) => { parse_lit!() } token::OpenDelim(token::Paren) => { self.bump(); attrs.extend(self.parse_inner_attributes()?); // (e) is parenthesized e // (e,) is a tuple with only one field, e let mut es = vec![]; let mut trailing_comma = false; let mut recovered = false; while self.token != token::CloseDelim(token::Paren) { es.push(match self.parse_expr() { Ok(es) => es, Err(mut err) => { // recover from parse error in tuple list match self.token.kind { token::Ident(name, false) if name == kw::Underscore && self.look_ahead(1, |t| { t == &token::Comma }) => { // Special-case handling of `Foo<(_, _, _)>` err.emit(); let sp = self.token.span; self.bump(); self.mk_expr(sp, ExprKind::Err, ThinVec::new()) } _ => return Ok( self.recover_seq_parse_error(token::Paren, lo, Err(err)), ), } } }); recovered = self.expect_one_of( &[], &[token::Comma, token::CloseDelim(token::Paren)], )?; if self.eat(&token::Comma) { trailing_comma = true; } else { trailing_comma = false; break; } } if !recovered { self.bump(); } hi = self.prev_span; ex = if es.len() == 1 && !trailing_comma { ExprKind::Paren(es.into_iter().nth(0).unwrap()) } else { ExprKind::Tup(es) }; } token::OpenDelim(token::Brace) => { return self.parse_block_expr(None, lo, BlockCheckMode::Default, attrs); } token::BinOp(token::Or) | token::OrOr => { return self.parse_lambda_expr(attrs); } token::OpenDelim(token::Bracket) => { self.bump(); attrs.extend(self.parse_inner_attributes()?); if self.eat(&token::CloseDelim(token::Bracket)) { // Empty vector. ex = ExprKind::Array(Vec::new()); } else { // Nonempty vector. let first_expr = self.parse_expr()?; if self.eat(&token::Semi) { // Repeating array syntax: [ 0; 512 ] let count = AnonConst { id: ast::DUMMY_NODE_ID, value: self.parse_expr()?, }; self.expect(&token::CloseDelim(token::Bracket))?; ex = ExprKind::Repeat(first_expr, count); } else if self.eat(&token::Comma) { // Vector with two or more elements. let remaining_exprs = self.parse_seq_to_end( &token::CloseDelim(token::Bracket), SeqSep::trailing_allowed(token::Comma), |p| Ok(p.parse_expr()?) )?; let mut exprs = vec![first_expr]; exprs.extend(remaining_exprs); ex = ExprKind::Array(exprs); } else { // Vector with one element. self.expect(&token::CloseDelim(token::Bracket))?; ex = ExprKind::Array(vec![first_expr]); } } hi = self.prev_span; } _ => { if self.eat_lt() { let (qself, path) = self.parse_qpath(PathStyle::Expr)?; hi = path.span; return Ok(self.mk_expr(lo.to(hi), ExprKind::Path(Some(qself), path), attrs)); } if self.check_keyword(kw::Move) || self.check_keyword(kw::Static) { return self.parse_lambda_expr(attrs); } if self.eat_keyword(kw::If) { return self.parse_if_expr(attrs); } if self.eat_keyword(kw::For) { let lo = self.prev_span; return self.parse_for_expr(None, lo, attrs); } if self.eat_keyword(kw::While) { let lo = self.prev_span; return self.parse_while_expr(None, lo, attrs); } if let Some(label) = self.eat_label() { let lo = label.ident.span; self.expect(&token::Colon)?; if self.eat_keyword(kw::While) { return self.parse_while_expr(Some(label), lo, attrs) } if self.eat_keyword(kw::For) { return self.parse_for_expr(Some(label), lo, attrs) } if self.eat_keyword(kw::Loop) { return self.parse_loop_expr(Some(label), lo, attrs) } if self.token == token::OpenDelim(token::Brace) { return self.parse_block_expr(Some(label), lo, BlockCheckMode::Default, attrs); } let msg = "expected `while`, `for`, `loop` or `{` after a label"; let mut err = self.fatal(msg); err.span_label(self.token.span, msg); return Err(err); } if self.eat_keyword(kw::Loop) { let lo = self.prev_span; return self.parse_loop_expr(None, lo, attrs); } if self.eat_keyword(kw::Continue) { let label = self.eat_label(); let ex = ExprKind::Continue(label); let hi = self.prev_span; return Ok(self.mk_expr(lo.to(hi), ex, attrs)); } if self.eat_keyword(kw::Match) { let match_sp = self.prev_span; return self.parse_match_expr(attrs).map_err(|mut err| { err.span_label(match_sp, "while parsing this match expression"); err }); } if self.eat_keyword(kw::Unsafe) { return self.parse_block_expr( None, lo, BlockCheckMode::Unsafe(ast::UserProvided), attrs); } if self.is_do_catch_block() { let mut db = self.fatal("found removed `do catch` syntax"); db.help("Following RFC #2388, the new non-placeholder syntax is `try`"); return Err(db); } if self.is_try_block() { let lo = self.token.span; assert!(self.eat_keyword(kw::Try)); return self.parse_try_block(lo, attrs); } // Span::rust_2018() is somewhat expensive; don't get it repeatedly. let is_span_rust_2018 = self.token.span.rust_2018(); if is_span_rust_2018 && self.check_keyword(kw::Async) { return if self.is_async_block() { // check for `async {` and `async move {` self.parse_async_block(attrs) } else { self.parse_lambda_expr(attrs) }; } if self.eat_keyword(kw::Return) { if self.token.can_begin_expr() { let e = self.parse_expr()?; hi = e.span; ex = ExprKind::Ret(Some(e)); } else { ex = ExprKind::Ret(None); } } else if self.eat_keyword(kw::Break) { let label = self.eat_label(); let e = if self.token.can_begin_expr() && !(self.token == token::OpenDelim(token::Brace) && self.restrictions.contains( Restrictions::NO_STRUCT_LITERAL)) { Some(self.parse_expr()?) } else { None }; ex = ExprKind::Break(label, e); hi = self.prev_span; } else if self.eat_keyword(kw::Yield) { if self.token.can_begin_expr() { let e = self.parse_expr()?; hi = e.span; ex = ExprKind::Yield(Some(e)); } else { ex = ExprKind::Yield(None); } let span = lo.to(hi); self.sess.gated_spans.yields.borrow_mut().push(span); } else if self.eat_keyword(kw::Let) { return self.parse_let_expr(attrs); } else if is_span_rust_2018 && self.eat_keyword(kw::Await) { let (await_hi, e_kind) = self.parse_incorrect_await_syntax(lo, self.prev_span)?; hi = await_hi; ex = e_kind; } else if self.token.is_path_start() { let path = self.parse_path(PathStyle::Expr)?; // `!`, as an operator, is prefix, so we know this isn't that if self.eat(&token::Not) { // MACRO INVOCATION expression let (delim, tts) = self.expect_delimited_token_tree()?; hi = self.prev_span; ex = ExprKind::Mac(Mac { path, tts, delim, span: lo.to(hi), prior_type_ascription: self.last_type_ascription, }); } else if self.check(&token::OpenDelim(token::Brace)) { if let Some(expr) = self.maybe_parse_struct_expr(lo, &path, &attrs) { return expr; } else { hi = path.span; ex = ExprKind::Path(None, path); } } else { hi = path.span; ex = ExprKind::Path(None, path); } } else { if !self.unclosed_delims.is_empty() && self.check(&token::Semi) { // Don't complain about bare semicolons after unclosed braces // recovery in order to keep the error count down. Fixing the // delimiters will possibly also fix the bare semicolon found in // expression context. For example, silence the following error: // ``` // error: expected expression, found `;` // --> file.rs:2:13 // | // 2 | foo(bar(; // | ^ expected expression // ``` self.bump(); return Ok(self.mk_expr(self.token.span, ExprKind::Err, ThinVec::new())); } parse_lit!() } } } let expr = self.mk_expr(lo.to(hi), ex, attrs); self.maybe_recover_from_bad_qpath(expr, true) } /// Matches `'-' lit | lit` (cf. `ast_validation::AstValidator::check_expr_within_pat`). crate fn parse_literal_maybe_minus(&mut self) -> PResult<'a, P<Expr>> { maybe_whole_expr!(self); let minus_lo = self.token.span; let minus_present = self.eat(&token::BinOp(token::Minus)); let lo = self.token.span; let literal = self.parse_lit()?; let hi = self.prev_span; let expr = self.mk_expr(lo.to(hi), ExprKind::Lit(literal), ThinVec::new()); if minus_present { let minus_hi = self.prev_span; let unary = self.mk_unary(UnOp::Neg, expr); Ok(self.mk_expr(minus_lo.to(minus_hi), unary, ThinVec::new())) } else { Ok(expr) } } /// Parses a block or unsafe block. crate fn parse_block_expr( &mut self, opt_label: Option<Label>, lo: Span, blk_mode: BlockCheckMode, outer_attrs: ThinVec<Attribute>, ) -> PResult<'a, P<Expr>> { self.expect(&token::OpenDelim(token::Brace))?; let mut attrs = outer_attrs; attrs.extend(self.parse_inner_attributes()?); let blk = self.parse_block_tail(lo, blk_mode)?; return Ok(self.mk_expr(blk.span, ExprKind::Block(blk, opt_label), attrs)); } /// Parses `move |args| expr`. fn parse_lambda_expr(&mut self, attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { let lo = self.token.span; let movability = if self.eat_keyword(kw::Static) { Movability::Static } else { Movability::Movable }; let asyncness = if self.token.span.rust_2018() { self.parse_asyncness() } else { IsAsync::NotAsync }; if asyncness.is_async() { // Feature gate `async ||` closures. self.sess.gated_spans.async_closure.borrow_mut().push(self.prev_span); } let capture_clause = self.parse_capture_clause(); let decl = self.parse_fn_block_decl()?; let decl_hi = self.prev_span; let body = match decl.output { FunctionRetTy::Default(_) => { let restrictions = self.restrictions - Restrictions::STMT_EXPR; self.parse_expr_res(restrictions, None)? }, _ => { // If an explicit return type is given, require a // block to appear (RFC 968). let body_lo = self.token.span; self.parse_block_expr(None, body_lo, BlockCheckMode::Default, ThinVec::new())? } }; Ok(self.mk_expr( lo.to(body.span), ExprKind::Closure(capture_clause, asyncness, movability, decl, body, lo.to(decl_hi)), attrs)) } /// Parse an optional `move` prefix to a closure lke construct. fn parse_capture_clause(&mut self) -> CaptureBy { if self.eat_keyword(kw::Move) { CaptureBy::Value } else { CaptureBy::Ref } } /// Parses the `|arg, arg|` header of a closure. fn parse_fn_block_decl(&mut self) -> PResult<'a, P<FnDecl>> { let inputs_captures = { if self.eat(&token::OrOr) { Vec::new() } else { self.expect(&token::BinOp(token::Or))?; let args = self.parse_seq_to_before_tokens( &[&token::BinOp(token::Or), &token::OrOr], SeqSep::trailing_allowed(token::Comma), TokenExpectType::NoExpect, |p| p.parse_fn_block_arg() )?.0; self.expect_or()?; args } }; let output = self.parse_ret_ty(true)?; Ok(P(FnDecl { inputs: inputs_captures, output, c_variadic: false })) } /// Parses an argument in a lambda header (e.g., `|arg, arg|`). fn parse_fn_block_arg(&mut self) -> PResult<'a, Arg> { let lo = self.token.span; let attrs = self.parse_arg_attributes()?; let pat = self.parse_pat(PARAM_EXPECTED)?; let t = if self.eat(&token::Colon) { self.parse_ty()? } else { P(Ty { id: ast::DUMMY_NODE_ID, node: TyKind::Infer, span: self.prev_span, }) }; let span = lo.to(self.token.span); Ok(Arg { attrs: attrs.into(), ty: t, pat, span, id: ast::DUMMY_NODE_ID }) } /// Parses an `if` expression (`if` token already eaten). fn parse_if_expr(&mut self, attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { let lo = self.prev_span; let cond = self.parse_cond_expr()?; // Verify that the parsed `if` condition makes sense as a condition. If it is a block, then // verify that the last statement is either an implicit return (no `;`) or an explicit // return. This won't catch blocks with an explicit `return`, but that would be caught by // the dead code lint. if self.eat_keyword(kw::Else) || !cond.returns() { let sp = self.sess.source_map().next_point(lo); let mut err = self.diagnostic() .struct_span_err(sp, "missing condition for `if` expression"); err.span_label(sp, "expected if condition here"); return Err(err) } let not_block = self.token != token::OpenDelim(token::Brace); let thn = self.parse_block().map_err(|mut err| { if not_block { err.span_label(lo, "this `if` statement has a condition, but no block"); } err })?; let mut els: Option<P<Expr>> = None; let mut hi = thn.span; if self.eat_keyword(kw::Else) { let elexpr = self.parse_else_expr()?; hi = elexpr.span; els = Some(elexpr); } Ok(self.mk_expr(lo.to(hi), ExprKind::If(cond, thn, els), attrs)) } /// Parse the condition of a `if`- or `while`-expression fn parse_cond_expr(&mut self) -> PResult<'a, P<Expr>> { let cond = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; if let ExprKind::Let(..) = cond.node { // Remove the last feature gating of a `let` expression since it's stable. let last = self.sess.gated_spans.let_chains.borrow_mut().pop(); debug_assert_eq!(cond.span, last.unwrap()); } Ok(cond) } /// Parses a `let $pat = $expr` pseudo-expression. /// The `let` token has already been eaten. fn parse_let_expr(&mut self, attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { let lo = self.prev_span; // FIXME(or_patterns, Centril | dlrobertson): use `parse_top_pat` instead. let pat = self.parse_top_pat_unpack(GateOr::No)?; self.expect(&token::Eq)?; let expr = self.with_res( Restrictions::NO_STRUCT_LITERAL, |this| this.parse_assoc_expr_with(1 + prec_let_scrutinee_needs_par(), None.into()) )?; let span = lo.to(expr.span); self.sess.gated_spans.let_chains.borrow_mut().push(span); Ok(self.mk_expr(span, ExprKind::Let(pat, expr), attrs)) } /// `else` token already eaten fn parse_else_expr(&mut self) -> PResult<'a, P<Expr>> { if self.eat_keyword(kw::If) { return self.parse_if_expr(ThinVec::new()); } else { let blk = self.parse_block()?; return Ok(self.mk_expr(blk.span, ExprKind::Block(blk, None), ThinVec::new())); } } /// Parse a 'for' .. 'in' expression ('for' token already eaten) fn parse_for_expr( &mut self, opt_label: Option<Label>, span_lo: Span, mut attrs: ThinVec<Attribute> ) -> PResult<'a, P<Expr>> { // Parse: `for <src_pat> in <src_expr> <src_loop_block>` // Record whether we are about to parse `for (`. // This is used below for recovery in case of `for ( $stuff ) $block` // in which case we will suggest `for $stuff $block`. let begin_paren = match self.token.kind { token::OpenDelim(token::Paren) => Some(self.token.span), _ => None, }; let pat = self.parse_top_pat(GateOr::Yes)?; if !self.eat_keyword(kw::In) { let in_span = self.prev_span.between(self.token.span); self.struct_span_err(in_span, "missing `in` in `for` loop") .span_suggestion_short( in_span, "try adding `in` here", " in ".into(), // has been misleading, at least in the past (closed Issue #48492) Applicability::MaybeIncorrect ) .emit(); } let in_span = self.prev_span; self.check_for_for_in_in_typo(in_span); let expr = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; let pat = self.recover_parens_around_for_head(pat, &expr, begin_paren); let (iattrs, loop_block) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); let hi = self.prev_span; Ok(self.mk_expr(span_lo.to(hi), ExprKind::ForLoop(pat, expr, loop_block, opt_label), attrs)) } /// Parses a `while` or `while let` expression (`while` token already eaten). fn parse_while_expr( &mut self, opt_label: Option<Label>, span_lo: Span, mut attrs: ThinVec<Attribute> ) -> PResult<'a, P<Expr>> { let cond = self.parse_cond_expr()?; let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); let span = span_lo.to(body.span); Ok(self.mk_expr(span, ExprKind::While(cond, body, opt_label), attrs)) } /// Parse `loop {...}`, `loop` token already eaten. fn parse_loop_expr( &mut self, opt_label: Option<Label>, span_lo: Span, mut attrs: ThinVec<Attribute> ) -> PResult<'a, P<Expr>> { let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); let span = span_lo.to(body.span); Ok(self.mk_expr(span, ExprKind::Loop(body, opt_label), attrs)) } fn eat_label(&mut self) -> Option<Label> { if let Some(ident) = self.token.lifetime() { let span = self.token.span; self.bump(); Some(Label { ident: Ident::new(ident.name, span) }) } else { None } } // `match` token already eaten fn parse_match_expr(&mut self, mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { let match_span = self.prev_span; let lo = self.prev_span; let discriminant = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; if let Err(mut e) = self.expect(&token::OpenDelim(token::Brace)) { if self.token == token::Semi { e.span_suggestion_short( match_span, "try removing this `match`", String::new(), Applicability::MaybeIncorrect // speculative ); } return Err(e) } attrs.extend(self.parse_inner_attributes()?); let mut arms: Vec<Arm> = Vec::new(); while self.token != token::CloseDelim(token::Brace) { match self.parse_arm() { Ok(arm) => arms.push(arm), Err(mut e) => { // Recover by skipping to the end of the block. e.emit(); self.recover_stmt(); let span = lo.to(self.token.span); if self.token == token::CloseDelim(token::Brace) { self.bump(); } return Ok(self.mk_expr(span, ExprKind::Match(discriminant, arms), attrs)); } } } let hi = self.token.span; self.bump(); return Ok(self.mk_expr(lo.to(hi), ExprKind::Match(discriminant, arms), attrs)); } crate fn parse_arm(&mut self) -> PResult<'a, Arm> { let attrs = self.parse_outer_attributes()?; let lo = self.token.span; // FIXME(or_patterns, Centril | dlrobertson): use `parse_top_pat` instead. let pat = self.parse_top_pat_unpack(GateOr::No)?; let guard = if self.eat_keyword(kw::If) { Some(self.parse_expr()?) } else { None }; let arrow_span = self.token.span; self.expect(&token::FatArrow)?; let arm_start_span = self.token.span; let expr = self.parse_expr_res(Restrictions::STMT_EXPR, None) .map_err(|mut err| { err.span_label(arrow_span, "while parsing the `match` arm starting here"); err })?; let require_comma = classify::expr_requires_semi_to_be_stmt(&expr) && self.token != token::CloseDelim(token::Brace); let hi = self.token.span; if require_comma { let cm = self.sess.source_map(); self.expect_one_of(&[token::Comma], &[token::CloseDelim(token::Brace)]) .map_err(|mut err| { match (cm.span_to_lines(expr.span), cm.span_to_lines(arm_start_span)) { (Ok(ref expr_lines), Ok(ref arm_start_lines)) if arm_start_lines.lines[0].end_col == expr_lines.lines[0].end_col && expr_lines.lines.len() == 2 && self.token == token::FatArrow => { // We check whether there's any trailing code in the parse span, // if there isn't, we very likely have the following: // // X | &Y => "y" // | -- - missing comma // | | // | arrow_span // X | &X => "x" // | - ^^ self.token.span // | | // | parsed until here as `"y" & X` err.span_suggestion_short( cm.next_point(arm_start_span), "missing a comma here to end this `match` arm", ",".to_owned(), Applicability::MachineApplicable ); } _ => { err.span_label(arrow_span, "while parsing the `match` arm starting here"); } } err })?; } else { self.eat(&token::Comma); } Ok(ast::Arm { attrs, pats: pat, // FIXME(or_patterns, Centril | dlrobertson): this should just be `pat,`. guard, body: expr, span: lo.to(hi), id: ast::DUMMY_NODE_ID, }) } /// Parses a `try {...}` expression (`try` token already eaten). fn parse_try_block( &mut self, span_lo: Span, mut attrs: ThinVec<Attribute> ) -> PResult<'a, P<Expr>> { let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); if self.eat_keyword(kw::Catch) { let mut error = self.struct_span_err(self.prev_span, "keyword `catch` cannot follow a `try` block"); error.help("try using `match` on the result of the `try` block instead"); error.emit(); Err(error) } else { Ok(self.mk_expr(span_lo.to(body.span), ExprKind::TryBlock(body), attrs)) } } fn is_do_catch_block(&self) -> bool { self.token.is_keyword(kw::Do) && self.is_keyword_ahead(1, &[kw::Catch]) && self.look_ahead(2, |t| *t == token::OpenDelim(token::Brace)) && !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL) } fn is_try_block(&self) -> bool { self.token.is_keyword(kw::Try) && self.look_ahead(1, |t| *t == token::OpenDelim(token::Brace)) && self.token.span.rust_2018() && // prevent `while try {} {}`, `if try {} {} else {}`, etc. !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL) } /// Parses an `async move? {...}` expression. pub fn parse_async_block(&mut self, mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { let span_lo = self.token.span; self.expect_keyword(kw::Async)?; let capture_clause = self.parse_capture_clause(); let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); Ok(self.mk_expr( span_lo.to(body.span), ExprKind::Async(capture_clause, ast::DUMMY_NODE_ID, body), attrs)) } fn is_async_block(&self) -> bool { self.token.is_keyword(kw::Async) && ( ( // `async move {` self.is_keyword_ahead(1, &[kw::Move]) && self.look_ahead(2, |t| *t == token::OpenDelim(token::Brace)) ) || ( // `async {` self.look_ahead(1, |t| *t == token::OpenDelim(token::Brace)) ) ) } fn maybe_parse_struct_expr( &mut self, lo: Span, path: &ast::Path, attrs: &ThinVec<Attribute>, ) -> Option<PResult<'a, P<Expr>>> { let struct_allowed = !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL); let certainly_not_a_block = || self.look_ahead(1, |t| t.is_ident()) && ( // `{ ident, ` cannot start a block self.look_ahead(2, |t| t == &token::Comma) || self.look_ahead(2, |t| t == &token::Colon) && ( // `{ ident: token, ` cannot start a block self.look_ahead(4, |t| t == &token::Comma) || // `{ ident: ` cannot start a block unless it's a type ascription `ident: Type` self.look_ahead(3, |t| !t.can_begin_type()) ) ); if struct_allowed || certainly_not_a_block() { // This is a struct literal, but we don't can't accept them here let expr = self.parse_struct_expr(lo, path.clone(), attrs.clone()); if let (Ok(expr), false) = (&expr, struct_allowed) { self.struct_span_err( expr.span, "struct literals are not allowed here", ) .multipart_suggestion( "surround the struct literal with parentheses", vec![ (lo.shrink_to_lo(), "(".to_string()), (expr.span.shrink_to_hi(), ")".to_string()), ], Applicability::MachineApplicable, ) .emit(); } return Some(expr); } None } pub(super) fn parse_struct_expr( &mut self, lo: Span, pth: ast::Path, mut attrs: ThinVec<Attribute> ) -> PResult<'a, P<Expr>> { let struct_sp = lo.to(self.prev_span); self.bump(); let mut fields = Vec::new(); let mut base = None; attrs.extend(self.parse_inner_attributes()?); while self.token != token::CloseDelim(token::Brace) { if self.eat(&token::DotDot) { let exp_span = self.prev_span; match self.parse_expr() { Ok(e) => { base = Some(e); } Err(mut e) => { e.emit(); self.recover_stmt(); } } if self.token == token::Comma { self.struct_span_err( exp_span.to(self.prev_span), "cannot use a comma after the base struct", ) .span_suggestion_short( self.token.span, "remove this comma", String::new(), Applicability::MachineApplicable ) .note("the base struct must always be the last field") .emit(); self.recover_stmt(); } break; } let mut recovery_field = None; if let token::Ident(name, _) = self.token.kind { if !self.token.is_reserved_ident() && self.look_ahead(1, |t| *t == token::Colon) { // Use in case of error after field-looking code: `S { foo: () with a }` recovery_field = Some(ast::Field { ident: Ident::new(name, self.token.span), span: self.token.span, expr: self.mk_expr(self.token.span, ExprKind::Err, ThinVec::new()), is_shorthand: false, attrs: ThinVec::new(), id: ast::DUMMY_NODE_ID, }); } } let mut parsed_field = None; match self.parse_field() { Ok(f) => parsed_field = Some(f), Err(mut e) => { e.span_label(struct_sp, "while parsing this struct"); e.emit(); // If the next token is a comma, then try to parse // what comes next as additional fields, rather than // bailing out until next `}`. if self.token != token::Comma { self.recover_stmt_(SemiColonMode::Comma, BlockMode::Ignore); if self.token != token::Comma { break; } } } } match self.expect_one_of(&[token::Comma], &[token::CloseDelim(token::Brace)]) { Ok(_) => if let Some(f) = parsed_field.or(recovery_field) { // only include the field if there's no parse error for the field name fields.push(f); } Err(mut e) => { if let Some(f) = recovery_field { fields.push(f); } e.span_label(struct_sp, "while parsing this struct"); e.emit(); self.recover_stmt_(SemiColonMode::Comma, BlockMode::Ignore); self.eat(&token::Comma); } } } let span = lo.to(self.token.span); self.expect(&token::CloseDelim(token::Brace))?; return Ok(self.mk_expr(span, ExprKind::Struct(pth, fields, base), attrs)); } /// Parse ident (COLON expr)? fn parse_field(&mut self) -> PResult<'a, Field> { let attrs = self.parse_outer_attributes()?; let lo = self.token.span; // Check if a colon exists one ahead. This means we're parsing a fieldname. let (fieldname, expr, is_shorthand) = if self.look_ahead(1, |t| { t == &token::Colon || t == &token::Eq }) { let fieldname = self.parse_field_name()?; // Check for an equals token. This means the source incorrectly attempts to // initialize a field with an eq rather than a colon. if self.token == token::Eq { self.diagnostic() .struct_span_err(self.token.span, "expected `:`, found `=`") .span_suggestion( fieldname.span.shrink_to_hi().to(self.token.span), "replace equals symbol with a colon", ":".to_string(), Applicability::MachineApplicable, ) .emit(); } self.bump(); // `:` (fieldname, self.parse_expr()?, false) } else { let fieldname = self.parse_ident_common(false)?; // Mimic `x: x` for the `x` field shorthand. let path = ast::Path::from_ident(fieldname); let expr = self.mk_expr(fieldname.span, ExprKind::Path(None, path), ThinVec::new()); (fieldname, expr, true) }; Ok(ast::Field { ident: fieldname, span: lo.to(expr.span), expr, is_shorthand, attrs: attrs.into(), id: ast::DUMMY_NODE_ID, }) } fn err_dotdotdot_syntax(&self, span: Span) { self.struct_span_err(span, "unexpected token: `...`") .span_suggestion( span, "use `..` for an exclusive range", "..".to_owned(), Applicability::MaybeIncorrect ) .span_suggestion( span, "or `..=` for an inclusive range", "..=".to_owned(), Applicability::MaybeIncorrect ) .emit(); } fn err_larrow_operator(&self, span: Span) { self.struct_span_err( span, "unexpected token: `<-`" ).span_suggestion( span, "if you meant to write a comparison against a negative value, add a \ space in between `<` and `-`", "< -".to_string(), Applicability::MaybeIncorrect ).emit(); } fn mk_assign_op(&self, binop: BinOp, lhs: P<Expr>, rhs: P<Expr>) -> ExprKind { ExprKind::AssignOp(binop, lhs, rhs) } fn mk_range( &self, start: Option<P<Expr>>, end: Option<P<Expr>>, limits: RangeLimits ) -> PResult<'a, ExprKind> { if end.is_none() && limits == RangeLimits::Closed { Err(self.span_fatal_err(self.token.span, Error::InclusiveRangeWithNoEnd)) } else { Ok(ExprKind::Range(start, end, limits)) } } fn mk_unary(&self, unop: UnOp, expr: P<Expr>) -> ExprKind { ExprKind::Unary(unop, expr) } fn mk_binary(&self, binop: BinOp, lhs: P<Expr>, rhs: P<Expr>) -> ExprKind { ExprKind::Binary(binop, lhs, rhs) } fn mk_index(&self, expr: P<Expr>, idx: P<Expr>) -> ExprKind { ExprKind::Index(expr, idx) } fn mk_call(&self, f: P<Expr>, args: Vec<P<Expr>>) -> ExprKind { ExprKind::Call(f, args) } fn mk_await_expr(&mut self, self_arg: P<Expr>, lo: Span) -> PResult<'a, P<Expr>> { let span = lo.to(self.prev_span); let await_expr = self.mk_expr(span, ExprKind::Await(self_arg), ThinVec::new()); self.recover_from_await_method_call(); Ok(await_expr) } crate fn mk_expr(&self, span: Span, node: ExprKind, attrs: ThinVec<Attribute>) -> P<Expr> { P(Expr { node, span, attrs, id: ast::DUMMY_NODE_ID }) } }
41.795608
100
0.477873
64733f0e03de2a57f9fa20696ba019dc2ddcd4d8
4,421
use std::collections::BTreeMap; use chrono::Utc; use futures::StreamExt; use k8s_openapi::{ api::{ core::v1::{Pod, Service}, networking::v1::Ingress, }, apimachinery::pkg::apis::meta::v1::OwnerReference, }; use kube::{api::ListParams, Api, Client, Resource, ResourceExt}; use kube_runtime::controller::{Context, Controller, ReconcilerAction}; use snafu::{ResultExt, Snafu}; use tracing::{trace, warn}; use super::Ephemeron; mod conditions; mod endpoints; mod expiry; mod ingress; mod pod; mod service; const PROJECT_NAME: &str = "ephemeron"; #[derive(Debug, Snafu)] pub enum Error { #[snafu(display("Failed to delete expired resource: {}", source))] DeleteExpired { source: expiry::Error }, #[snafu(display("Failed to reconcile pod: {}", source))] ReconcilePod { source: pod::Error }, #[snafu(display("Failed to reconcile service: {}", source))] ReconcileService { source: service::Error }, #[snafu(display("Failed to reconcile ingress: {}", source))] ReconcileIngress { source: ingress::Error }, #[snafu(display("Failed to reconcile endpoints: {}", source))] ReconcileEndpoints { source: endpoints::Error }, } pub type Result<T, E = Error> = std::result::Result<T, E>; // TODO Configurable const NS: &str = "default"; pub async fn run(client: Client, domain: String) { let context = Context::new(ContextData { client: client.clone(), domain, }); let lp = ListParams::default(); Controller::<Ephemeron>::new(Api::all(client.clone()), lp.clone()) .owns::<Pod>(Api::namespaced(client.clone(), NS), lp.clone()) .owns::<Service>(Api::namespaced(client.clone(), NS), lp.clone()) .owns::<Ingress>(Api::namespaced(client.clone(), NS), lp) .run(reconciler, error_policy, context) .filter_map(|x| async move { x.ok() }) .for_each(|(_, action)| async move { trace!("Reconciled: requeue after {:?}", action.requeue_after); }) .await; } // Data to store in context struct ContextData { client: Client, domain: String, } #[tracing::instrument(skip(eph, ctx), level = "trace")] async fn reconciler(eph: Ephemeron, ctx: Context<ContextData>) -> Result<ReconcilerAction> { if let Some(conditions) = eph.status.as_ref().map(|s| &s.conditions) { trace!("conditions: {:?}", conditions); } if let Some(action) = expiry::reconcile(&eph, ctx.clone()) .await .context(DeleteExpired)? { return Ok(action); } if let Some(action) = pod::reconcile(&eph, ctx.clone()) .await .context(ReconcilePod)? { return Ok(action); } if let Some(action) = service::reconcile(&eph, ctx.clone()) .await .context(ReconcileService)? { return Ok(action); } if let Some(action) = ingress::reconcile(&eph, ctx.clone()) .await .context(ReconcileIngress)? { return Ok(action); } if let Some(action) = endpoints::reconcile(&eph, ctx.clone()) .await .context(ReconcileEndpoints)? { return Ok(action); } // Nothing happened in this loop, so the resource is in the desired state. // Requeue around when this expires unless something else triggers reconciliation. Ok(ReconcilerAction { requeue_after: Some( (eph.spec.expiration_time - Utc::now()) .to_std() .unwrap_or_default(), ), }) } #[allow(clippy::needless_pass_by_value)] /// An error handler called when the reconciler fails. fn error_policy(error: &Error, _ctx: Context<ContextData>) -> ReconcilerAction { warn!("reconciler failed: {}", error); ReconcilerAction { requeue_after: None, } } fn make_common_labels(name: &str) -> BTreeMap<String, String> { BTreeMap::from([ ("app.kubernetes.io/name".to_owned(), name.to_owned()), ( "app.kubernetes.io/managed-by".to_owned(), PROJECT_NAME.to_owned(), ), ]) } fn to_owner_reference(eph: &Ephemeron) -> OwnerReference { OwnerReference { api_version: Ephemeron::api_version(&()).into_owned(), kind: Ephemeron::kind(&()).into_owned(), name: eph.name(), uid: eph.metadata.uid.clone().expect(".metadata.uid"), controller: Some(true), block_owner_deletion: Some(true), } }
29.278146
92
0.615698
23a079fd5e3714fc2a3385049a4d6f37cf4dcac4
581
<?xml version="1.0" encoding="UTF-8"?> <WebElementEntity> <description></description> <name>entitlements_no</name> <tag></tag> <elementGuidId>e8679a28-6141-4dc3-b446-e94b777f0790</elementGuidId> <selectorCollection> <entry> <key>BASIC</key> <value></value> </entry> <entry> <key>XPATH</key> <value>//input[@id=&quot;entitlements_entitlement&quot;]</value> </entry> </selectorCollection> <selectorMethod>XPATH</selectorMethod> <useRalativeImagePath>false</useRalativeImagePath> </WebElementEntity>
29.05
73
0.654045
bf145c9205b0b7fd11a3978bdc1cb1104012c123
42,982
//! Provides a builder API for generating Rust code. //! //! The general strategy for using the crate is as follows: //! //! 1. Create a `Scope` instance. //! 2. Use the builder API to add elements to the scope. //! 3. Call `Scope::to_string()` to get the generated code. //! //! For example: //! //! ```rust //! use codegen::Scope; //! //! let mut scope = Scope::new(); //! //! scope.new_struct("Foo") //! .derive("Debug") //! .field("one", "usize") //! .field("two", "String"); //! //! println!("{}", scope.to_string()); //! ``` #![deny(warnings, missing_debug_implementations, missing_docs)] #![doc(html_root_url = "https://docs.rs/codegen/0.1.0")] extern crate indexmap; use indexmap::IndexMap; use std::fmt::{self, Write}; /// Defines a scope. /// /// A scope contains modules, types, etc... #[derive(Debug, Clone)] pub struct Scope { /// Scope documentation docs: Option<Docs>, /// Imports imports: IndexMap<String, IndexMap<String, Import>>, /// Contents of the documentation, items: Vec<Item>, } #[derive(Debug, Clone)] enum Item { Module(Module), Struct(Struct), Trait(Trait), Enum(Enum), Impl(Impl), Raw(String), } /// Defines a module. #[derive(Debug, Clone)] pub struct Module { /// Module name name: String, /// Visibility vis: Option<String>, /// Module documentation docs: Option<Docs>, /// Contents of the module scope: Scope, } /// Defines an enumeration. #[derive(Debug, Clone)] pub struct Enum { type_def: TypeDef, variants: Vec<Variant>, } /// Defines a struct. #[derive(Debug, Clone)] pub struct Struct { type_def: TypeDef, /// Struct fields fields: Fields, } /// Define a trait. #[derive(Debug, Clone)] pub struct Trait { type_def: TypeDef, parents: Vec<Type>, associated_tys: Vec<AssociatedType>, fns: Vec<Function>, } /// Defines a type. #[derive(Debug, Clone)] pub struct Type { name: String, generics: Vec<Type>, } /// Defines a type definition. #[derive(Debug, Clone)] struct TypeDef { ty: Type, vis: Option<String>, docs: Option<Docs>, derive: Vec<String>, bounds: Vec<Bound>, } /// Defines an enum variant. #[derive(Debug, Clone)] pub struct Variant { name: String, fields: Fields, } /// Defines a set of fields. #[derive(Debug, Clone)] enum Fields { Empty, Tuple(Vec<Type>), Named(Vec<Field>), } /// Defines a struct field. #[derive(Debug, Clone)] struct Field { /// Field name name: String, /// Field type ty: Type, } /// Defines an associated type. #[derive(Debug, Clone)] pub struct AssociatedType(Bound); #[derive(Debug, Clone)] struct Bound { name: String, bound: Vec<Type>, } /// Defines an impl block. #[derive(Debug, Clone)] pub struct Impl { /// The struct being implemented target: Type, /// Impl level generics generics: Vec<String>, /// If implementing a trait impl_trait: Option<Type>, /// Associated types assoc_tys: Vec<Field>, /// Bounds bounds: Vec<Bound>, fns: Vec<Function>, } /// Defines an import (`use` statement). #[derive(Debug, Clone)] pub struct Import { line: String, vis: Option<String>, } /// Defines a function. #[derive(Debug, Clone)] pub struct Function { /// Name of the function name: String, /// Function documentation docs: Option<Docs>, /// Function visibility vis: Option<String>, /// Function generics generics: Vec<String>, /// If the function takes `&self` or `&mut self` arg_self: Option<String>, /// Function arguments args: Vec<Field>, /// Return type ret: Option<Type>, /// Where bounds bounds: Vec<Bound>, /// Body contents body: Option<Vec<Body>>, } /// Defines a code block. This is used to define a function body. #[derive(Debug, Clone)] pub struct Block { before: Option<String>, after: Option<String>, body: Vec<Body>, } #[derive(Debug, Clone)] enum Body { String(String), Block(Block), } #[derive(Debug, Clone)] struct Docs { docs: String, } /// Configures how a scope is formatted. #[derive(Debug)] pub struct Formatter<'a> { /// Write destination dst: &'a mut String, /// Number of spaces to start a new line with. spaces: usize, /// Number of spaces per indentiation indent: usize, } const DEFAULT_INDENT: usize = 4; // ===== impl Scope ===== impl Scope { /// Returns a new scope pub fn new() -> Self { Scope { docs: None, imports: IndexMap::new(), items: vec![], } } /// Import a type into the scope. /// /// This results in a new `use` statement being added to the beginning of /// the scope. pub fn import(&mut self, path: &str, ty: &str) -> &mut Import { // handle cases where the caller wants to refer to a type namespaced // within the containing namespace, like "a::B". let ty = ty.split("::").next().unwrap_or(ty); self.imports.entry(path.to_string()) .or_insert(IndexMap::new()) .entry(ty.to_string()) .or_insert_with(|| Import::new(path, ty)) } /// Push a new module definition, returning a mutable reference to it. /// /// # Panics /// /// Since a module's name must uniquely identify it within the scope in /// which it is defined, pushing a module whose name is already defined /// in this scope will cause this function to panic. /// /// In many cases, the [`get_or_new_module`] function is preferrable, as it /// will return the existing definition instead. /// /// [`get_or_new_module`]: #method.get_or_new_module pub fn new_module(&mut self, name: &str) -> &mut Module { self.push_module(Module::new(name)); match *self.items.last_mut().unwrap() { Item::Module(ref mut v) => v, _ => unreachable!(), } } /// Returns a mutable reference to a module if it is exists in this scope. pub fn get_module_mut<Q: ?Sized>(&mut self, name: &Q) -> Option<&mut Module> where String: PartialEq<Q>, { self.items.iter_mut() .filter_map(|item| match item { &mut Item::Module(ref mut module) if module.name == *name => Some(module), _ => None, }) .next() } /// Returns a mutable reference to a module if it is exists in this scope. pub fn get_module<Q: ?Sized>(&self, name: &Q) -> Option<&Module> where String: PartialEq<Q>, { self.items.iter() .filter_map(|item| match item { &Item::Module(ref module) if module.name == *name => Some(module), _ => None, }) .next() } /// Returns a mutable reference to a module, creating it if it does /// not exist. pub fn get_or_new_module(&mut self, name: &str) -> &mut Module { if self.get_module(name).is_some() { self.get_module_mut(name).unwrap() } else { self.new_module(name) } } /// Push a module definition. /// /// # Panics /// /// Since a module's name must uniquely identify it within the scope in /// which it is defined, pushing a module whose name is already defined /// in this scope will cause this function to panic. /// /// In many cases, the [`get_or_new_module`] function is preferrable, as it will /// return the existing definition instead. /// /// [`get_or_new_module`]: #method.get_or_new_module pub fn push_module(&mut self, item: Module) -> &mut Self { assert!(self.get_module(&item.name).is_none()); self.items.push(Item::Module(item)); self } /// Push a new struct definition, returning a mutable reference to it. pub fn new_struct(&mut self, name: &str) -> &mut Struct { self.push_struct(Struct::new(name)); match *self.items.last_mut().unwrap() { Item::Struct(ref mut v) => v, _ => unreachable!(), } } /// Push a struct definition pub fn push_struct(&mut self, item: Struct) -> &mut Self { self.items.push(Item::Struct(item)); self } /// Push a new trait definition, returning a mutable reference to it. pub fn new_trait(&mut self, name: &str) -> &mut Trait { self.push_trait(Trait::new(name)); match *self.items.last_mut().unwrap() { Item::Trait(ref mut v) => v, _ => unreachable!(), } } /// Push a trait definition pub fn push_trait(&mut self, item: Trait) -> &mut Self { self.items.push(Item::Trait(item)); self } /// Push a new struct definition, returning a mutable reference to it. pub fn new_enum(&mut self, name: &str) -> &mut Enum { self.push_enum(Enum::new(name)); match *self.items.last_mut().unwrap() { Item::Enum(ref mut v) => v, _ => unreachable!(), } } /// Push a structure definition pub fn push_enum(&mut self, item: Enum) -> &mut Self { self.items.push(Item::Enum(item)); self } /// Push a new `impl` block, returning a mutable reference to it. pub fn new_impl(&mut self, target: &str) -> &mut Impl { self.push_impl(Impl::new(target)); match *self.items.last_mut().unwrap() { Item::Impl(ref mut v) => v, _ => unreachable!(), } } /// Push an `impl` block. pub fn push_impl(&mut self, item: Impl) -> &mut Self { self.items.push(Item::Impl(item)); self } /// Push a raw string to the scope. /// /// This string will be included verbatim in the formatted string. pub fn raw(&mut self, val: &str) -> &mut Self { self.items.push(Item::Raw(val.to_string())); self } /// Return a string representation of the scope. pub fn to_string(&self) -> String { let mut ret = String::new(); self.fmt(&mut Formatter::new(&mut ret)).unwrap(); // Remove the trailing newline if ret.as_bytes().last() == Some(&b'\n') { ret.pop(); } ret } /// Formats the scope using the given formatter. pub fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { self.fmt_imports(fmt)?; if !self.imports.is_empty() { write!(fmt, "\n")?; } for (i, item) in self.items.iter().enumerate() { if i != 0 { write!(fmt, "\n")?; } match *item { Item::Module(ref v) => v.fmt(fmt)?, Item::Struct(ref v) => v.fmt(fmt)?, Item::Trait(ref v) => v.fmt(fmt)?, Item::Enum(ref v) => v.fmt(fmt)?, Item::Impl(ref v) => v.fmt(fmt)?, Item::Raw(ref v) => { write!(fmt, "{}\n", v)?; } } } Ok(()) } fn fmt_imports(&self, fmt: &mut Formatter) -> fmt::Result { // First, collect all visibilities let mut visibilities = vec![]; for (_, imports) in &self.imports { for (_, import) in imports { if !visibilities.contains(&import.vis) { visibilities.push(import.vis.clone()); } } } let mut tys = vec![]; // Loop over all visibilities and format the associated imports for vis in &visibilities { for (path, imports) in &self.imports { tys.clear(); for (ty, import) in imports { if *vis == import.vis { tys.push(ty); } } if !tys.is_empty() { if let Some(ref vis) = *vis { write!(fmt, "{} ", vis)?; } write!(fmt, "use {}::", path)?; if tys.len() > 1 { write!(fmt, "{{")?; for (i, ty) in tys.iter().enumerate() { if i != 0 { write!(fmt, ", ")?; } write!(fmt, "{}", ty)?; } write!(fmt, "}};\n")?; } else if tys.len() == 1 { write!(fmt, "{};\n", tys[0])?; } } } } Ok(()) } } // ===== impl Module ===== impl Module { /// Return a new, blank module pub fn new(name: &str) -> Self { Module { name: name.to_string(), vis: None, docs: None, scope: Scope::new(), } } /// Returns a mutable reference to the module's scope. pub fn scope(&mut self) -> &mut Scope { &mut self.scope } /// Set the module visibility. pub fn vis(&mut self, vis: &str) -> &mut Self { self.vis = Some(vis.to_string()); self } /// Import a type into the module's scope. /// /// This results in a new `use` statement bein added to the beginning of the /// module. pub fn import(&mut self, path: &str, ty: &str) -> &mut Self { self.scope.import(path, ty); self } /// Push a new module definition, returning a mutable reference to it. /// /// # Panics /// /// Since a module's name must uniquely identify it within the scope in /// which it is defined, pushing a module whose name is already defined /// in this scope will cause this function to panic. /// /// In many cases, the [`get_or_new_module`] function is preferrable, as it /// will return the existing definition instead. /// /// [`get_or_new_module`]: #method.get_or_new_module pub fn new_module(&mut self, name: &str) -> &mut Module { self.scope.new_module(name) } /// Returns a reference to a module if it is exists in this scope. pub fn get_module<Q: ?Sized>(&self, name: &Q) -> Option<&Module> where String: PartialEq<Q>, { self.scope.get_module(name) } /// Returns a mutable reference to a module if it is exists in this scope. pub fn get_module_mut<Q: ?Sized>(&mut self, name: &Q) -> Option<&mut Module> where String: PartialEq<Q>, { self.scope.get_module_mut(name) } /// Returns a mutable reference to a module, creating it if it does /// not exist. pub fn get_or_new_module(&mut self, name: &str) -> &mut Module { self.scope.get_or_new_module(name) } /// Push a module definition. /// /// # Panics /// /// Since a module's name must uniquely identify it within the scope in /// which it is defined, pushing a module whose name is already defined /// in this scope will cause this function to panic. /// /// In many cases, the [`get_or_new_module`] function is preferrable, as it will /// return the existing definition instead. /// /// [`get_or_new_module`]: #method.get_or_new_module pub fn push_module(&mut self, item: Module) -> &mut Self { self.scope.push_module(item); self } /// Push a new struct definition, returning a mutable reference to it. pub fn new_struct(&mut self, name: &str) -> &mut Struct { self.scope.new_struct(name) } /// Push a structure definition pub fn push_struct(&mut self, item: Struct) -> &mut Self { self.scope.push_struct(item); self } /// Push a new enum definition, returning a mutable reference to it. pub fn new_enum(&mut self, name: &str) -> &mut Enum { self.scope.new_enum(name) } /// Push an enum definition pub fn push_enum(&mut self, item: Enum) -> &mut Self { self.scope.push_enum(item); self } /// Push a new `impl` block, returning a mutable reference to it. pub fn new_impl(&mut self, target: &str) -> &mut Impl { self.scope.new_impl(target) } /// Push an `impl` block. pub fn push_impl(&mut self, item: Impl) -> &mut Self { self.scope.push_impl(item); self } /// Formats the module using the given formatter. pub fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { if let Some(ref vis) = self.vis { write!(fmt, "{} ", vis)?; } write!(fmt, "mod {}", self.name)?; fmt.block(|fmt| { self.scope.fmt(fmt) }) } } // ===== impl Struct ===== impl Struct { /// Return a structure definition with the provided name pub fn new(name: &str) -> Self { Struct { type_def: TypeDef::new(name), fields: Fields::Empty, } } /// Returns a reference to the type pub fn ty(&self) -> &Type { &self.type_def.ty } /// Set the structure visibility. pub fn vis(&mut self, vis: &str) -> &mut Self { self.type_def.vis(vis); self } /// Add a generic to the struct. pub fn generic(&mut self, name: &str) -> &mut Self { self.type_def.ty.generic(name); self } /// Add a `where` bound to the struct. pub fn bound<T>(&mut self, name: &str, ty: T) -> &mut Self where T: Into<Type>, { self.type_def.bound(name, ty); self } /// Set the structure documentation. pub fn doc(&mut self, docs: &str) -> &mut Self { self.type_def.doc(docs); self } /// Add a new type that the struct should derive. pub fn derive(&mut self, name: &str) -> &mut Self { self.type_def.derive(name); self } /// Add a named field to the struct. /// /// A struct can either set named fields with this function or tuple fields /// with `tuple_field`, but not both. pub fn field<T>(&mut self, name: &str, ty: T) -> &mut Self where T: Into<Type>, { self.fields.named(name, ty); self } /// Add a tuple field to the struct. /// /// A struct can either set tuple fields with this function or named fields /// with `field`, but not both. pub fn tuple_field<T>(&mut self, ty: T) -> &mut Self where T: Into<Type>, { self.fields.tuple(ty); self } /// Formats the struct using the given formatter. pub fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { self.type_def.fmt_head("struct", &[], fmt)?; self.fields.fmt(fmt)?; match self.fields { Fields::Empty => { write!(fmt, ";\n")?; } Fields::Tuple(..) => { write!(fmt, ";\n")?; } _ => {} } Ok(()) } } // ===== impl Trait ===== impl Trait { /// Return a trait definition with the provided name pub fn new(name: &str) -> Self { Trait { type_def: TypeDef::new(name), parents: vec![], associated_tys: vec![], fns: vec![], } } /// Returns a reference to the type pub fn ty(&self) -> &Type { &self.type_def.ty } /// Set the trait visibility. pub fn vis(&mut self, vis: &str) -> &mut Self { self.type_def.vis(vis); self } /// Add a generic to the trait pub fn generic(&mut self, name: &str) -> &mut Self { self.type_def.ty.generic(name); self } /// Add a `where` bound to the trait. pub fn bound<T>(&mut self, name: &str, ty: T) -> &mut Self where T: Into<Type>, { self.type_def.bound(name, ty); self } /// Add a parent trait. pub fn parent<T>(&mut self, ty: T) -> &mut Self where T: Into<Type>, { self.parents.push(ty.into()); self } /// Set the trait documentation. pub fn doc(&mut self, docs: &str) -> &mut Self { self.type_def.doc(docs); self } /// Add an associated type. Returns a mutable reference to the new /// associated type for futher configuration. pub fn associated_type(&mut self, name: &str) -> &mut AssociatedType { self.associated_tys.push(AssociatedType(Bound { name: name.to_string(), bound: vec![], })); self.associated_tys.last_mut().unwrap() } /// Push a new function definition, returning a mutable reference to it. pub fn new_fn(&mut self, name: &str) -> &mut Function { let mut func = Function::new(name); func.body = None; self.push_fn(func); self.fns.last_mut().unwrap() } /// Push a function definition. pub fn push_fn(&mut self, item: Function) -> &mut Self { self.fns.push(item); self } /// Formats the scope using the given formatter. pub fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { self.type_def.fmt_head("trait", &self.parents, fmt)?; fmt.block(|fmt| { let assoc = &self.associated_tys; // format associated types if !assoc.is_empty() { for ty in assoc { let ty = &ty.0; write!(fmt, "type {}", ty.name)?; if !ty.bound.is_empty() { write!(fmt, ": ")?; fmt_bound_rhs(&ty.bound, fmt)?; } write!(fmt, ";\n")?; } } for (i, func) in self.fns.iter().enumerate() { if i != 0 || !assoc.is_empty() { write!(fmt, "\n")?; } func.fmt(true, fmt)?; } Ok(()) }) } } // ===== impl Enum ===== impl Enum { /// Return a enum definition with the provided name. pub fn new(name: &str) -> Self { Enum { type_def: TypeDef::new(name), variants: vec![], } } /// Returns a reference to the type. pub fn ty(&self) -> &Type { &self.type_def.ty } /// Set the enum visibility. pub fn vis(&mut self, vis: &str) -> &mut Self { self.type_def.vis(vis); self } /// Add a generic to the enum. pub fn generic(&mut self, name: &str) -> &mut Self { self.type_def.ty.generic(name); self } /// Add a `where` bound to the enum. pub fn bound<T>(&mut self, name: &str, ty: T) -> &mut Self where T: Into<Type>, { self.type_def.bound(name, ty); self } /// Set the enum documentation. pub fn doc(&mut self, docs: &str) -> &mut Self { self.type_def.doc(docs); self } /// Add a new type that the struct should derive. pub fn derive(&mut self, name: &str) -> &mut Self { self.type_def.derive(name); self } /// Push a variant to the enum, returning a mutable reference to it. pub fn new_variant(&mut self, name: &str) -> &mut Variant { self.push_variant(Variant::new(name)); self.variants.last_mut().unwrap() } /// Push a variant to the enum. pub fn push_variant(&mut self, item: Variant) -> &mut Self { self.variants.push(item); self } /// Formats the enum using the given formatter. pub fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { self.type_def.fmt_head("enum", &[], fmt)?; fmt.block(|fmt| { for variant in &self.variants { variant.fmt(fmt)?; } Ok(()) }) } } // ===== impl Variant ===== impl Variant { /// Return a new enum variant with the given name. pub fn new(name: &str) -> Self { Variant { name: name.to_string(), fields: Fields::Empty, } } /// Add a named field to the variant. pub fn named<T>(&mut self, name: &str, ty: T) -> &mut Self where T: Into<Type>, { self.fields.named(name, ty); self } /// Add a tuple field to the variant. pub fn tuple(&mut self, ty: &str) -> &mut Self { self.fields.tuple(ty); self } /// Formats the variant using the given formatter. pub fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "{}", self.name)?; self.fields.fmt(fmt)?; write!(fmt, ",\n")?; Ok(()) } } // ===== impl Type ===== impl Type { /// Return a new type with the given name. pub fn new(name: &str) -> Self { Type { name: name.to_string(), generics: vec![], } } /// Add a generic to the type. pub fn generic<T>(&mut self, ty: T) -> &mut Self where T: Into<Type>, { // Make sure that the name doesn't already include generics assert!(!self.name.contains("<"), "type name already includes generics"); self.generics.push(ty.into()); self } /// Rewrite the `Type` with the provided path /// /// TODO: Is this needed? pub fn path(&self, path: &str) -> Type { // TODO: This isn't really correct assert!(!self.name.contains("::")); let mut name = path.to_string(); name.push_str("::"); name.push_str(&self.name); Type { name, generics: self.generics.clone(), } } /// Formats the struct using the given formatter. pub fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "{}", self.name)?; Type::fmt_slice(&self.generics, fmt) } fn fmt_slice(generics: &[Type], fmt: &mut Formatter) -> fmt::Result { if !generics.is_empty() { write!(fmt, "<")?; for (i, ty) in generics.iter().enumerate() { if i != 0 { write!(fmt, ", ")? } ty.fmt(fmt)?; } write!(fmt, ">")?; } Ok(()) } } impl<'a> From<&'a str> for Type { fn from(src: &'a str) -> Self { Type::new(src) } } impl From<String> for Type { fn from(src: String) -> Self { Type { name: src, generics: vec![], } } } impl<'a> From<&'a String> for Type { fn from(src: &'a String) -> Self { Type::new(src) } } impl<'a> From<&'a Type> for Type { fn from(src: &'a Type) -> Self { src.clone() } } // ===== impl TypeDef ===== impl TypeDef { /// Return a structure definition with the provided name fn new(name: &str) -> Self { TypeDef { ty: Type::new(name), vis: None, docs: None, derive: vec![], bounds: vec![], } } fn vis(&mut self, vis: &str) { self.vis = Some(vis.to_string()); } fn bound<T>(&mut self, name: &str, ty: T) where T: Into<Type>, { self.bounds.push(Bound { name: name.to_string(), bound: vec![ty.into()], }); } fn doc(&mut self, docs: &str) { self.docs = Some(Docs::new(docs)); } fn derive(&mut self, name: &str) { self.derive.push(name.to_string()); } fn fmt_head(&self, keyword: &str, parents: &[Type], fmt: &mut Formatter) -> fmt::Result { if let Some(ref docs) = self.docs { docs.fmt(fmt)?; } self.fmt_derive(fmt)?; if let Some(ref vis) = self.vis { write!(fmt, "{} ", vis)?; } write!(fmt, "{} ", keyword)?; self.ty.fmt(fmt)?; if !parents.is_empty() { for (i, ty) in parents.iter().enumerate() { if i == 0 { write!(fmt, ": ")?; } else { write!(fmt, " + ")?; } ty.fmt(fmt)?; } } fmt_bounds(&self.bounds, fmt)?; Ok(()) } fn fmt_derive(&self, fmt: &mut Formatter) -> fmt::Result { if !self.derive.is_empty() { write!(fmt, "#[derive(")?; for (i, name) in self.derive.iter().enumerate() { if i != 0 { write!(fmt, ", ")? } write!(fmt, "{}", name)?; } write!(fmt, ")]\n")?; } Ok(()) } } fn fmt_generics(generics: &[String], fmt: &mut Formatter) -> fmt::Result { if !generics.is_empty() { write!(fmt, "<")?; for (i, ty) in generics.iter().enumerate() { if i != 0 { write!(fmt, ", ")? } write!(fmt, "{}", ty)?; } write!(fmt, ">")?; } Ok(()) } fn fmt_bounds(bounds: &[Bound], fmt: &mut Formatter) -> fmt::Result { if !bounds.is_empty() { write!(fmt, "\n")?; // Write first bound write!(fmt, "where {}: ", bounds[0].name)?; fmt_bound_rhs(&bounds[0].bound, fmt)?; write!(fmt, ",\n")?; for bound in &bounds[1..] { write!(fmt, " {}: ", bound.name)?; fmt_bound_rhs(&bound.bound, fmt)?; write!(fmt, ",\n")?; } } Ok(()) } fn fmt_bound_rhs(tys: &[Type], fmt: &mut Formatter) -> fmt::Result { for (i, ty) in tys.iter().enumerate() { if i != 0 { write!(fmt, " + ")? } ty.fmt(fmt)?; } Ok(()) } // ===== impl AssociatedType ===== impl AssociatedType { /// Add a bound to the associated type. pub fn bound<T>(&mut self, ty: T) -> &mut Self where T: Into<Type>, { self.0.bound.push(ty.into()); self } } // ===== impl Fields ===== impl Fields { fn named<T>(&mut self, name: &str, ty: T) -> &mut Self where T: Into<Type>, { match *self { Fields::Empty => { *self = Fields::Named(vec![Field { name: name.to_string(), ty: ty.into(), }]); } Fields::Named(ref mut fields) => { fields.push(Field { name: name.to_string(), ty: ty.into(), }); } _ => panic!("field list is named"), } self } fn tuple<T>(&mut self, ty: T) -> &mut Self where T: Into<Type>, { match *self { Fields::Empty => { *self = Fields::Tuple(vec![ty.into()]); } Fields::Tuple(ref mut fields) => { fields.push(ty.into()); } _ => panic!("field list is tuple"), } self } fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { match *self { Fields::Named(ref fields) => { assert!(!fields.is_empty()); fmt.block(|fmt| { for f in fields { write!(fmt, "{}: ", f.name)?; f.ty.fmt(fmt)?; write!(fmt, ",\n")?; } Ok(()) })?; } Fields::Tuple(ref tys) => { assert!(!tys.is_empty()); write!(fmt, "(")?; for (i, ty) in tys.iter().enumerate() { if i != 0 { write!(fmt, ", ")?; } ty.fmt(fmt)?; } write!(fmt, ")")?; } Fields::Empty => {} } Ok(()) } } // ===== impl Impl ===== impl Impl { /// Return a new impl definition pub fn new<T>(target: T) -> Self where T: Into<Type>, { Impl { target: target.into(), generics: vec![], impl_trait: None, assoc_tys: vec![], bounds: vec![], fns: vec![], } } /// Add a generic to the impl block. /// /// This adds the generic for the block (`impl<T>`) and not the target type. pub fn generic(&mut self, name: &str) -> &mut Self { self.generics.push(name.to_string()); self } /// Add a generic to the target type. pub fn target_generic<T>(&mut self, ty: T) -> &mut Self where T: Into<Type>, { self.target.generic(ty); self } /// Set the trait that the impl block is implementing. pub fn impl_trait<T>(&mut self, ty: T) -> &mut Self where T: Into<Type>, { self.impl_trait = Some(ty.into()); self } /// Set an associated type. pub fn associate_type<T>(&mut self, name: &str, ty: T) -> &mut Self where T: Into<Type>, { self.assoc_tys.push(Field { name: name.to_string(), ty: ty.into(), }); self } /// Add a `where` bound to the impl block. pub fn bound<T>(&mut self, name: &str, ty: T) -> &mut Self where T: Into<Type>, { self.bounds.push(Bound { name: name.to_string(), bound: vec![ty.into()], }); self } /// Push a new function definition, returning a mutable reference to it. pub fn new_fn(&mut self, name: &str) -> &mut Function { self.push_fn(Function::new(name)); self.fns.last_mut().unwrap() } /// Push a function definition. pub fn push_fn(&mut self, item: Function) -> &mut Self { self.fns.push(item); self } /// Formats the impl block using the given formatter. pub fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "impl")?; fmt_generics(&self.generics[..], fmt)?; if let Some(ref t) = self.impl_trait { write!(fmt, " ")?; t.fmt(fmt)?; write!(fmt, " for")?; } write!(fmt, " ")?; self.target.fmt(fmt)?; fmt_bounds(&self.bounds, fmt)?; fmt.block(|fmt| { // format associated types if !self.assoc_tys.is_empty() { for ty in &self.assoc_tys { write!(fmt, "type {} = ", ty.name)?; ty.ty.fmt(fmt)?; write!(fmt, ";\n")?; } } for (i, func) in self.fns.iter().enumerate() { if i != 0 || !self.assoc_tys.is_empty() { write!(fmt, "\n")?; } func.fmt(false, fmt)?; } Ok(()) }) } } // ===== impl Import ===== impl Import { /// Return a new import. pub fn new(path: &str, ty: &str) -> Self { Import { line: format!("{}::{}", path, ty), vis: None, } } /// Set the import visibility. pub fn vis(&mut self, vis: &str) -> &mut Self { self.vis = Some(vis.to_string()); self } } // ===== impl Func ===== impl Function { /// Return a new function definition. pub fn new(name: &str) -> Self { Function { name: name.to_string(), docs: None, vis: None, generics: vec![], arg_self: None, args: vec![], ret: None, bounds: vec![], body: Some(vec![]), } } /// Set the function documentation. pub fn doc(&mut self, docs: &str) -> &mut Self { self.docs = Some(Docs::new(docs)); self } /// Set the function visibility. pub fn vis(&mut self, vis: &str) -> &mut Self { self.vis = Some(vis.to_string()); self } /// Add a generic to the function. pub fn generic(&mut self, name: &str) -> &mut Self { self.generics.push(name.to_string()); self } /// Add `self` as a function argument. pub fn arg_self(&mut self) -> &mut Self { self.arg_self = Some("self".to_string()); self } /// Add `&self` as a function argument. pub fn arg_ref_self(&mut self) -> &mut Self { self.arg_self = Some("&self".to_string()); self } /// Add `&mut self` as a function argument. pub fn arg_mut_self(&mut self) -> &mut Self { self.arg_self = Some("&mut self".to_string()); self } /// Add a function argument. pub fn arg<T>(&mut self, name: &str, ty: T) -> &mut Self where T: Into<Type>, { self.args.push(Field { name: name.to_string(), ty: ty.into(), }); self } /// Set the function return type. pub fn ret<T>(&mut self, ty: T) -> &mut Self where T: Into<Type>, { self.ret = Some(ty.into()); self } /// Add a `where` bound to the function. pub fn bound<T>(&mut self, name: &str, ty: T) -> &mut Self where T: Into<Type>, { self.bounds.push(Bound { name: name.to_string(), bound: vec![ty.into()], }); self } /// Push a line to the function implementation. pub fn line<T>(&mut self, line: T) -> &mut Self where T: ToString, { self.body.get_or_insert(vec![]) .push(Body::String(line.to_string())); self } /// Push a block to the function implementation pub fn push_block(&mut self, block: Block) -> &mut Self { self.body.get_or_insert(vec![]) .push(Body::Block(block)); self } /// Formats the function using the given formatter. pub fn fmt(&self, is_trait: bool, fmt: &mut Formatter) -> fmt::Result { if let Some(ref docs) = self.docs { docs.fmt(fmt)?; } if is_trait { assert!(self.vis.is_none(), "trait fns do not have visibility modifiers"); } if let Some(ref vis) = self.vis { write!(fmt, "{} ", vis)?; } write!(fmt, "fn {}", self.name)?; fmt_generics(&self.generics, fmt)?; write!(fmt, "(")?; if let Some(ref s) = self.arg_self { write!(fmt, "{}", s)?; } for (i, arg) in self.args.iter().enumerate() { if i != 0 || self.arg_self.is_some() { write!(fmt, ", ")?; } write!(fmt, "{}: ", arg.name)?; arg.ty.fmt(fmt)?; } write!(fmt, ")")?; if let Some(ref ret) = self.ret { write!(fmt, " -> ")?; ret.fmt(fmt)?; } fmt_bounds(&self.bounds, fmt)?; match self.body { Some(ref body) => { fmt.block(|fmt| { for b in body { b.fmt(fmt)?; } Ok(()) }) } None => { if !is_trait { panic!("impl blocks must define fn bodies"); } write!(fmt, ";\n") } } } } // ===== impl Block ===== impl Block { /// Returns an empty code block. pub fn new(before: &str) -> Self { Block { before: Some(before.to_string()), after: None, body: vec![], } } /// Push a line to the code block. pub fn line<T>(&mut self, line: T) -> &mut Self where T: ToString, { self.body.push(Body::String(line.to_string())); self } /// Push a nested block to this block. pub fn push_block(&mut self, block: Block) -> &mut Self { self.body.push(Body::Block(block)); self } /// Add a snippet after the block. pub fn after(&mut self, after: &str) -> &mut Self { self.after = Some(after.to_string()); self } /// Formats the block using the given formatter. pub fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { if let Some(ref before) = self.before { write!(fmt, "{}", before)?; } // Inlined `Formatter::fmt` if !fmt.is_start_of_line() { write!(fmt, " ")?; } write!(fmt, "{{\n")?; fmt.indent(|fmt| { for b in &self.body { b.fmt(fmt)?; } Ok(()) })?; write!(fmt, "}}")?; if let Some(ref after) = self.after { write!(fmt, "{}", after)?; } write!(fmt, "\n")?; Ok(()) } } // ===== impl Body ===== impl Body { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { match *self { Body::String(ref s) => { write!(fmt, "{}\n", s) } Body::Block(ref b) => { b.fmt(fmt) } } } } // ===== impl Docs ===== impl Docs { fn new(docs: &str) -> Self { Docs { docs: docs.to_string() } } fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { for line in self.docs.lines() { write!(fmt, "/// {}\n", line)?; } Ok(()) } } // ===== impl Formatter ===== impl<'a> Formatter<'a> { /// Return a new formatter that writes to the given string. pub fn new(dst: &'a mut String) -> Self { Formatter { dst, spaces: 0, indent: DEFAULT_INDENT, } } fn block<F>(&mut self, f: F) -> fmt::Result where F: FnOnce(&mut Self) -> fmt::Result { if !self.is_start_of_line() { write!(self, " ")?; } write!(self, "{{\n")?; self.indent(f)?; write!(self, "}}\n")?; Ok(()) } /// Call the given function with the indentation level incremented by one. fn indent<F, R>(&mut self, f: F) -> R where F: FnOnce(&mut Self) -> R { self.spaces += self.indent; let ret = f(self); self.spaces -= self.indent; ret } fn is_start_of_line(&self) -> bool { self.dst.is_empty() || self.dst.as_bytes().last() == Some(&b'\n') } fn push_spaces(&mut self) { for _ in 0..self.spaces { self.dst.push_str(" "); } } } impl<'a> fmt::Write for Formatter<'a> { fn write_str(&mut self, s: &str) -> fmt::Result { let mut first = true; let mut should_indent = self.is_start_of_line(); for line in s.lines() { if !first { self.dst.push_str("\n"); } first = false; let do_indent = should_indent && !line.is_empty() && line.as_bytes()[0] != b'\n'; if do_indent { self.push_spaces(); } // If this loops again, then we just wrote a new line should_indent = true; self.dst.push_str(line); } if s.as_bytes().last() == Some(&b'\n') { self.dst.push_str("\n"); } Ok(()) } }
24.759217
86
0.493835
9121057a8e1340f40c6072ae22926491d2438415
28,459
use std::collections::VecDeque; use std::ops::{Deref, DerefMut}; use std::sync::Arc; use std::time::Instant; use parking_lot::Mutex; use thiserror; use tokio::select; use google_cloud_googleapis::spanner::v1::{BatchCreateSessionsRequest, DeleteSessionRequest, Session}; use crate::apiv1::conn_pool::ConnectionManager; use crate::apiv1::spanner_client::{ping_query_request, Client}; use google_cloud_gax::cancel::CancellationToken; use google_cloud_gax::grpc::{Code, Status}; use tokio::sync::broadcast; use tokio::sync::oneshot; use tokio::task::JoinHandle; use tokio::time::{sleep, timeout, Duration}; type Waiters = Mutex<VecDeque<oneshot::Sender<SessionHandle>>>; /// Session pub struct SessionHandle { pub session: Session, pub spanner_client: Client, valid: bool, last_used_at: std::time::Instant, last_checked_at: std::time::Instant, last_pong_at: std::time::Instant, } impl SessionHandle { pub(crate) fn new(session: Session, spanner_client: Client, now: Instant) -> SessionHandle { SessionHandle { session, spanner_client, valid: true, last_used_at: now, last_checked_at: now, last_pong_at: now, } } pub async fn invalidate_if_needed<T>(&mut self, arg: Result<T, Status>) -> Result<T, Status> { return match arg { Ok(s) => Ok(s), Err(e) => { if e.code() == Code::NotFound && e.message().contains("Session not found:") { self.invalidate().await; } Err(e) } }; } async fn invalidate(&mut self) { tracing::debug!("session invalidate {}", self.session.name); let request = DeleteSessionRequest { name: self.session.name.to_string(), }; match self.spanner_client.delete_session(request, None, None).await { Ok(_s) => self.valid = false, Err(e) => { tracing::error!("session remove error {} error={:?}", self.session.name, e); } } } } /// ManagedSession pub struct ManagedSession { session_pool: SessionPool, session: Option<SessionHandle>, } impl ManagedSession { pub(crate) fn new(session_pool: SessionPool, session: SessionHandle) -> Self { ManagedSession { session_pool, session: Some(session), } } } impl Drop for ManagedSession { fn drop(&mut self) { let session = self.session.take().unwrap(); self.session_pool.recycle(session); } } impl Deref for ManagedSession { type Target = SessionHandle; fn deref(&self) -> &Self::Target { self.session.as_ref().unwrap() } } impl DerefMut for ManagedSession { fn deref_mut(&mut self) -> &mut Self::Target { self.session.as_mut().unwrap() } } pub struct Sessions { sessions: VecDeque<SessionHandle>, inuse: usize, } impl Sessions { fn grow(&mut self, session: SessionHandle) { self.sessions.push_back(session); } fn num_opened(&self) -> usize { self.inuse + self.sessions.len() } fn take(&mut self) -> Option<SessionHandle> { match self.sessions.pop_front() { None => None, Some(s) => { self.inuse += 1; Some(s) } } } fn release(&mut self, session: SessionHandle) { self.inuse -= 1; if session.valid { self.sessions.push_back(session); } } } pub struct SessionPool { inner: Arc<Mutex<Sessions>>, waiters: Arc<Waiters>, allocation_request_sender: broadcast::Sender<bool>, } impl SessionPool { async fn new( database: String, conn_pool: &ConnectionManager, min_opened: usize, allocation_request_sender: broadcast::Sender<bool>, ) -> Result<Self, Status> { let init_pool = Self::init_pool(database, conn_pool, min_opened).await?; let waiters = Arc::new(Waiters::new(VecDeque::new())); Ok(SessionPool { inner: Arc::new(Mutex::new(Sessions { sessions: init_pool, inuse: 0, })), waiters, allocation_request_sender, }) } async fn init_pool( database: String, conn_pool: &ConnectionManager, min_opened: usize, ) -> Result<VecDeque<SessionHandle>, Status> { let channel_num = conn_pool.num(); let creation_count_per_channel = min_opened / channel_num; let mut sessions = Vec::<SessionHandle>::new(); for _ in 0..channel_num { let next_client = conn_pool.conn(); match batch_create_session(next_client, database.clone(), creation_count_per_channel).await { Ok(r) => { for i in r { sessions.push(i); } } Err(e) => return Err(e), } } tracing::debug!("initial session created count = {}", sessions.len()); Ok(sessions.into()) } fn request(&self) -> oneshot::Receiver<SessionHandle> { let (sender, receiver) = oneshot::channel(); { self.waiters.lock().push_back(sender); } let _ = self.allocation_request_sender.send(true); return receiver; } fn num_opened(&self) -> usize { self.inner.lock().num_opened() } fn num_waiting(&self) -> usize { self.waiters.lock().len() } fn grow(&self, mut sessions: Vec<SessionHandle>) { while let Some(session) = sessions.pop() { match { self.waiters.lock().pop_front() } { Some(c) => { let mut inner = self.inner.lock(); match c.send(session) { Err(session) => inner.grow(session), _ => { // Mark as using when notify to waiter directory. inner.inuse += 1 } }; } None => self.inner.lock().grow(session), }; } } fn recycle(&self, session: SessionHandle) { if session.valid { tracing::trace!("recycled name={}", session.session.name.to_string()); match { self.waiters.lock().pop_front() } { Some(c) => { if let Err(session) = c.send(session) { self.inner.lock().release(session) } } None => self.inner.lock().release(session), }; } else { self.inner.lock().release(session); // request session creation let _ = self.allocation_request_sender.send(true); } } } impl Clone for SessionPool { fn clone(&self) -> Self { SessionPool { inner: Arc::clone(&self.inner), waiters: Arc::clone(&self.waiters), allocation_request_sender: self.allocation_request_sender.clone(), } } } #[derive(Clone)] pub struct SessionConfig { /// max_opened is the maximum number of opened sessions allowed by the session /// pool. If the client tries to open a session and there are already /// max_opened sessions, it will block until one becomes available or the /// context passed to the client method is canceled or times out. pub max_opened: usize, /// min_opened is the minimum number of opened sessions that the session pool /// tries to maintain. Session pool won't continue to expire sessions if /// number of opened connections drops below min_opened. However, if a session /// is found to be broken, it will still be evicted from the session pool, /// therefore it is posssible that the number of opened sessions drops below /// min_opened. pub min_opened: usize, /// max_idle is the maximum number of idle sessions, pool is allowed to keep. pub max_idle: usize, /// idle_timeout is the wait time before discarding an idle session. /// Sessions older than this value since they were last used will be discarded. /// However, if the number of sessions is less than or equal to min_opened, it will not be discarded. pub idle_timeout: std::time::Duration, pub session_alive_trust_duration: std::time::Duration, /// session_get_timeout is the maximum value of the waiting time that occurs when retrieving from the connection pool when there is no idle session. pub session_get_timeout: std::time::Duration, /// refresh_interval is the interval of cleanup and health check functions. pub refresh_interval: std::time::Duration, /// incStep is the number of sessions to create in one batch when at least /// one more session is needed. inc_step: usize, } impl Default for SessionConfig { fn default() -> Self { SessionConfig { max_opened: 400, min_opened: 10, max_idle: 300, inc_step: 25, idle_timeout: std::time::Duration::from_secs(30 * 60), session_alive_trust_duration: std::time::Duration::from_secs(55 * 60), session_get_timeout: std::time::Duration::from_secs(1), refresh_interval: std::time::Duration::from_secs(5 * 60), } } } pub struct SessionManager { session_pool: SessionPool, session_get_timeout: Duration, cancel: CancellationToken, tasks: Vec<JoinHandle<()>>, } #[derive(thiserror::Error, Debug)] pub enum SessionError { #[error("session get time out")] SessionGetTimeout, #[error("failed to create session")] FailedToCreateSession, #[error(transparent)] GRPC(#[from] Status), } impl SessionManager { pub async fn new( database: impl Into<String>, conn_pool: ConnectionManager, config: SessionConfig, ) -> Result<SessionManager, Status> { let database = database.into(); let (sender, receiver) = broadcast::channel(1); let session_pool = SessionPool::new(database.clone(), &conn_pool, config.min_opened, sender).await?; let cancel = CancellationToken::new(); let session_get_timeout = config.session_get_timeout; let task_cleaner = schedule_refresh(config.clone(), session_pool.clone(), cancel.clone()); let task_listener = listen_session_creation_request( config, session_pool.clone(), database, conn_pool, receiver, cancel.clone(), ); let sm = SessionManager { session_get_timeout, session_pool, cancel, tasks: vec![task_cleaner, task_listener], }; Ok(sm) } pub fn num_opened(&self) -> usize { self.session_pool.num_opened() } pub fn session_waiters(&self) -> usize { self.session_pool.num_waiting() } pub async fn get(&self) -> Result<ManagedSession, SessionError> { if let Some(mut s) = self.session_pool.inner.lock().take() { s.last_used_at = Instant::now(); return Ok(ManagedSession::new(self.session_pool.clone(), s)); } // Wait for the session creation. return match timeout(self.session_get_timeout, self.session_pool.request()).await { Ok(Ok(mut session)) => { session.last_used_at = Instant::now(); Ok(ManagedSession { session_pool: self.session_pool.clone(), session: Some(session), }) } _ => Err(SessionError::SessionGetTimeout), }; } pub(crate) async fn close(&self) { if self.cancel.is_cancelled() { return; } self.cancel.cancel(); sleep(Duration::from_secs(1)).await; for task in &self.tasks { task.abort(); } let mut sessions = self.session_pool.inner.lock(); while let Some(mut session) = sessions.take() { delete_session(&mut session).await; session.valid = false; sessions.release(session); } } } fn listen_session_creation_request( config: SessionConfig, session_pool: SessionPool, database: String, conn_pool: ConnectionManager, mut rx: broadcast::Receiver<bool>, cancel: CancellationToken, ) -> JoinHandle<()> { tokio::spawn(async move { let mut allocation_request_size = 0; loop { select! { _ = rx.recv() => {}, _ = cancel.cancelled() => break } let num_opened = session_pool.num_opened(); if num_opened >= config.min_opened && allocation_request_size >= session_pool.num_waiting() { continue; } let mut creation_count = config.max_opened - num_opened; if creation_count > config.inc_step { creation_count = config.inc_step; } if creation_count == 0 { continue; } allocation_request_size += creation_count; let database = database.clone(); let next_client = conn_pool.conn(); match batch_create_session(next_client, database, creation_count).await { Ok(fresh_sessions) => { allocation_request_size -= creation_count; session_pool.grow(fresh_sessions) } Err(e) => { allocation_request_size -= creation_count; tracing::error!("failed to create new sessions {:?}", e) } }; } tracing::trace!("stop session creating listener") }) } fn schedule_refresh(config: SessionConfig, session_pool: SessionPool, cancel: CancellationToken) -> JoinHandle<()> { let start = Instant::now() + config.refresh_interval; let mut interval = tokio::time::interval_at(start.into(), config.refresh_interval); tokio::spawn(async move { loop { select! { _ = interval.tick() => {}, _ = cancel.cancelled() => break } let max_removing_count = session_pool.num_opened() as i64 - config.max_idle as i64; if max_removing_count < 0 { continue; } let now = Instant::now(); shrink_idle_sessions( now, config.idle_timeout, &session_pool, max_removing_count as usize, cancel.clone(), ) .await; health_check( now + Duration::from_nanos(1), config.session_alive_trust_duration, &session_pool, cancel.clone(), ) .await; } tracing::trace!("stop session cleaner") }) } async fn health_check( now: Instant, session_alive_trust_duration: Duration, sessions: &SessionPool, cancel: CancellationToken, ) { let sleep_duration = Duration::from_millis(10); loop { select! { _ = sleep(sleep_duration) => {}, _ = cancel.cancelled() => break } let mut s = { // temporary take let mut locked = sessions.inner.lock(); match locked.take() { Some(mut s) => { // all the session check complete. if s.last_checked_at == now { locked.release(s); break; } if std::cmp::max(s.last_used_at, s.last_pong_at) + session_alive_trust_duration >= now { s.last_checked_at = now; locked.release(s); continue; } s } None => break, } }; let request = ping_query_request(s.session.name.clone()); match s.spanner_client.execute_sql(request, None, None).await { Ok(_) => { s.last_checked_at = now; s.last_pong_at = now; sessions.recycle(s); } Err(_) => { delete_session(&mut s).await; s.valid = false; sessions.recycle(s); } } } } async fn shrink_idle_sessions( now: Instant, idle_timeout: Duration, session_pool: &SessionPool, max_shrink_count: usize, cancel: CancellationToken, ) { let mut removed_count = 0; let sleep_duration = Duration::from_millis(10); loop { if removed_count >= max_shrink_count { break; } select! { _ = sleep(sleep_duration) => {}, _ = cancel.cancelled() => break } // get old session let mut s = { // temporary take let mut locked = session_pool.inner.lock(); match locked.take() { Some(mut s) => { // all the session check complete. if s.last_checked_at == now { locked.release(s); break; } if s.last_used_at + idle_timeout >= now { s.last_checked_at = now; locked.release(s); continue; } s } None => break, } }; removed_count += 1; delete_session(&mut s).await; s.valid = false; session_pool.recycle(s); } } async fn delete_session(session: &mut SessionHandle) { let session_name = &session.session.name; let request = DeleteSessionRequest { name: session_name.to_string(), }; match session.spanner_client.delete_session(request, None, None).await { Ok(_) => {} Err(e) => tracing::error!("failed to delete session {}, {:?}", session_name, e), } } async fn batch_create_session( mut spanner_client: Client, database: String, creation_count: usize, ) -> Result<Vec<SessionHandle>, Status> { let request = BatchCreateSessionsRequest { database, session_template: None, session_count: creation_count as i32, }; tracing::debug!("spawn session creation request : count to create = {}", creation_count); let response = spanner_client .batch_create_sessions(request, None, None) .await? .into_inner(); let now = Instant::now(); Ok(response .session .into_iter() .map(|s| SessionHandle::new(s, spanner_client.clone(), now)) .collect::<Vec<SessionHandle>>()) } #[cfg(test)] mod tests { use crate::apiv1::conn_pool::ConnectionManager; use crate::session::{health_check, shrink_idle_sessions, SessionConfig, SessionManager}; use serial_test::serial; use google_cloud_gax::cancel::CancellationToken; use google_cloud_gax::conn::Environment; use std::sync::atomic::{AtomicI64, Ordering}; use std::sync::Arc; use std::time::Instant; use tokio::time::{sleep, Duration}; pub const DATABASE: &str = "projects/local-project/instances/test-instance/databases/local-database"; async fn assert_rush(use_invalidate: bool, config: SessionConfig) { let cm = ConnectionManager::new(1, &Environment::Emulator("localhost:9010".to_string())) .await .unwrap(); let max = config.max_opened; let min = config.min_opened; let sm = std::sync::Arc::new(SessionManager::new(DATABASE, cm, config).await.unwrap()); let counter = Arc::new(AtomicI64::new(0)); for _ in 0..100 { let sm = sm.clone(); let counter = Arc::clone(&counter); tokio::spawn(async move { let mut session = sm.get().await.unwrap(); if use_invalidate { session.invalidate().await; } counter.fetch_add(1, Ordering::SeqCst); }); } while counter.load(Ordering::SeqCst) < 100 { sleep(Duration::from_millis(5)).await; } sleep(tokio::time::Duration::from_millis(100)).await; assert_eq!(sm.session_pool.inner.lock().inuse, 0); let num_opened = sm.num_opened(); assert!(num_opened <= max, "idle session must be lteq {} now is {}", max, num_opened); assert!(num_opened >= min, "idle session must be gteq {} now is {}", min, num_opened); } #[tokio::test(flavor = "multi_thread")] #[serial] async fn test_shrink_sessions_not_expired() { let cm = ConnectionManager::new(1, &Environment::Emulator("localhost:9010".to_string())) .await .unwrap(); let idle_timeout = Duration::from_secs(100); let mut config = SessionConfig::default(); config.min_opened = 5; config.idle_timeout = idle_timeout; config.max_opened = 5; let sm = std::sync::Arc::new(SessionManager::new(DATABASE, cm, config).await.unwrap()); sleep(Duration::from_secs(1)).await; let cancel = CancellationToken::new(); shrink_idle_sessions(Instant::now(), idle_timeout, &sm.session_pool, 5, cancel.clone()).await; assert_eq!(sm.num_opened(), 5); cancel.cancel(); } #[tokio::test(flavor = "multi_thread")] #[serial] async fn test_shrink_sessions_all_expired() { let cm = ConnectionManager::new(1, &Environment::Emulator("localhost:9010".to_string())) .await .unwrap(); let idle_timeout = Duration::from_millis(1); let mut config = SessionConfig::default(); config.min_opened = 5; config.idle_timeout = idle_timeout; config.max_opened = 5; let sm = std::sync::Arc::new(SessionManager::new(DATABASE, cm, config).await.unwrap()); sleep(Duration::from_secs(1)).await; let cancel = CancellationToken::new(); shrink_idle_sessions(Instant::now(), idle_timeout, &sm.session_pool, 100, cancel.clone()).await; tokio::time::sleep(tokio::time::Duration::from_millis(500)).await; cancel.cancel(); // expired but created by allocation batch assert_eq!(sm.num_opened(), 5); } #[tokio::test(flavor = "multi_thread")] #[serial] async fn test_health_check_checked() { let cm = ConnectionManager::new(1, &Environment::Emulator("localhost:9010".to_string())) .await .unwrap(); let session_alive_trust_duration = Duration::from_millis(10); let mut config = SessionConfig::default(); config.min_opened = 5; config.session_alive_trust_duration = session_alive_trust_duration; config.max_opened = 5; let sm = std::sync::Arc::new(SessionManager::new(DATABASE, cm, config).await.unwrap()); sleep(Duration::from_secs(1)).await; let cancel = CancellationToken::new(); health_check(Instant::now(), session_alive_trust_duration, &sm.session_pool, cancel.clone()).await; assert_eq!(sm.num_opened(), 5); tokio::time::sleep(tokio::time::Duration::from_millis(500)).await; cancel.cancel(); } #[tokio::test(flavor = "multi_thread")] #[serial] async fn test_health_check_not_checked() { let cm = ConnectionManager::new(1, &Environment::Emulator("localhost:9010".to_string())) .await .unwrap(); let session_alive_trust_duration = Duration::from_secs(10); let mut config = SessionConfig::default(); config.min_opened = 5; config.session_alive_trust_duration = session_alive_trust_duration; config.max_opened = 5; let sm = std::sync::Arc::new(SessionManager::new(DATABASE, cm, config).await.unwrap()); sleep(Duration::from_secs(1)).await; let cancel = CancellationToken::new(); health_check(Instant::now(), session_alive_trust_duration, &sm.session_pool, cancel.clone()).await; assert_eq!(sm.num_opened(), 5); tokio::time::sleep(tokio::time::Duration::from_millis(500)).await; cancel.cancel(); } #[tokio::test(flavor = "multi_thread")] #[serial] async fn test_schedule_refresh() { let conn_pool = ConnectionManager::new(1, &Environment::Emulator("localhost:9010".to_string())) .await .unwrap(); let mut config = SessionConfig::default(); config.idle_timeout = Duration::from_millis(10); config.session_alive_trust_duration = Duration::from_millis(10); config.refresh_interval = Duration::from_millis(250); config.min_opened = 10; config.max_idle = 20; config.max_opened = 45; let sm = SessionManager::new(DATABASE, conn_pool, config).await.unwrap(); { let mut sessions = Vec::new(); for _ in 0..45 { sessions.push(sm.get().await.unwrap()); } // all the session are using assert_eq!(sm.num_opened(), 45); { assert_eq!(sm.session_pool.inner.lock().inuse, 45, "all the session are using"); } sleep(tokio::time::Duration::from_secs(1)).await; } // idle session removed after cleanup sleep(tokio::time::Duration::from_secs(3)).await; { let available_sessions = sm.session_pool.inner.lock().sessions.len(); assert!( available_sessions == 19 || available_sessions == 20, "available sessions are 19 or 20 (19 means that the cleaner is popping session)" ); } assert_eq!(sm.num_opened(), 20, "num sessions are 20"); assert_eq!(sm.session_waiters(), 0, "session waiters is 0"); } #[tokio::test(flavor = "multi_thread")] #[serial] async fn test_rush_invalidate() { let mut config = SessionConfig::default(); config.session_get_timeout = Duration::from_secs(20); config.min_opened = 10; config.max_idle = 20; config.max_opened = 45; assert_rush(true, config).await; } #[tokio::test(flavor = "multi_thread")] #[serial] async fn test_rush_invalidate_with_cleanup() { let mut config = SessionConfig::default(); config.idle_timeout = Duration::from_millis(10); config.session_alive_trust_duration = Duration::from_millis(10); config.refresh_interval = Duration::from_millis(250); config.session_get_timeout = Duration::from_secs(20); config.min_opened = 10; config.max_idle = 20; config.max_opened = 45; assert_rush(true, config).await; } #[tokio::test(flavor = "multi_thread")] #[serial] async fn test_rush() { let mut config = SessionConfig::default(); config.session_get_timeout = Duration::from_secs(20); config.min_opened = 10; config.max_idle = 20; config.max_opened = 45; assert_rush(false, config).await; } #[tokio::test(flavor = "multi_thread")] #[serial] async fn test_rush_with_cleanup() { let mut config = SessionConfig::default(); config.idle_timeout = Duration::from_millis(10); config.session_alive_trust_duration = Duration::from_millis(10); config.refresh_interval = Duration::from_millis(250); config.session_get_timeout = Duration::from_secs(20); config.min_opened = 10; config.max_idle = 20; config.max_opened = 45; assert_rush(false, config).await; } #[tokio::test(flavor = "multi_thread")] #[serial] async fn test_close() { let cm = ConnectionManager::new(1, &Environment::Emulator("localhost:9010".to_string())) .await .unwrap(); let config = SessionConfig::default(); let sm = SessionManager::new(DATABASE, cm, config.clone()).await.unwrap(); assert_eq!(sm.num_opened(), config.min_opened); sm.close().await; assert_eq!(sm.num_opened(), 0) } }
33.324356
152
0.573105
cc917022b2d42480460bfc44d2f44502e1b9a55c
32,101
// pathfinder/simd/src/x86/swizzle_f32x4.rs // // Copyright © 2019 The Pathfinder Project Developers. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use crate::x86::F32x4; use std::arch::x86_64; impl F32x4 { #[inline] pub fn xxxx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 0)) } } #[inline] pub fn yxxx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 1)) } } #[inline] pub fn zxxx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 2)) } } #[inline] pub fn wxxx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 3)) } } #[inline] pub fn xyxx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 4)) } } #[inline] pub fn yyxx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 5)) } } #[inline] pub fn zyxx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 6)) } } #[inline] pub fn wyxx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 7)) } } #[inline] pub fn xzxx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 8)) } } #[inline] pub fn yzxx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 9)) } } #[inline] pub fn zzxx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 10)) } } #[inline] pub fn wzxx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 11)) } } #[inline] pub fn xwxx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 12)) } } #[inline] pub fn ywxx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 13)) } } #[inline] pub fn zwxx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 14)) } } #[inline] pub fn wwxx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 15)) } } #[inline] pub fn xxyx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 16)) } } #[inline] pub fn yxyx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 17)) } } #[inline] pub fn zxyx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 18)) } } #[inline] pub fn wxyx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 19)) } } #[inline] pub fn xyyx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 20)) } } #[inline] pub fn yyyx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 21)) } } #[inline] pub fn zyyx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 22)) } } #[inline] pub fn wyyx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 23)) } } #[inline] pub fn xzyx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 24)) } } #[inline] pub fn yzyx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 25)) } } #[inline] pub fn zzyx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 26)) } } #[inline] pub fn wzyx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 27)) } } #[inline] pub fn xwyx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 28)) } } #[inline] pub fn ywyx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 29)) } } #[inline] pub fn zwyx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 30)) } } #[inline] pub fn wwyx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 31)) } } #[inline] pub fn xxzx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 32)) } } #[inline] pub fn yxzx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 33)) } } #[inline] pub fn zxzx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 34)) } } #[inline] pub fn wxzx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 35)) } } #[inline] pub fn xyzx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 36)) } } #[inline] pub fn yyzx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 37)) } } #[inline] pub fn zyzx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 38)) } } #[inline] pub fn wyzx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 39)) } } #[inline] pub fn xzzx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 40)) } } #[inline] pub fn yzzx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 41)) } } #[inline] pub fn zzzx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 42)) } } #[inline] pub fn wzzx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 43)) } } #[inline] pub fn xwzx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 44)) } } #[inline] pub fn ywzx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 45)) } } #[inline] pub fn zwzx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 46)) } } #[inline] pub fn wwzx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 47)) } } #[inline] pub fn xxwx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 48)) } } #[inline] pub fn yxwx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 49)) } } #[inline] pub fn zxwx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 50)) } } #[inline] pub fn wxwx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 51)) } } #[inline] pub fn xywx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 52)) } } #[inline] pub fn yywx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 53)) } } #[inline] pub fn zywx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 54)) } } #[inline] pub fn wywx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 55)) } } #[inline] pub fn xzwx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 56)) } } #[inline] pub fn yzwx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 57)) } } #[inline] pub fn zzwx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 58)) } } #[inline] pub fn wzwx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 59)) } } #[inline] pub fn xwwx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 60)) } } #[inline] pub fn ywwx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 61)) } } #[inline] pub fn zwwx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 62)) } } #[inline] pub fn wwwx(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 63)) } } #[inline] pub fn xxxy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 64)) } } #[inline] pub fn yxxy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 65)) } } #[inline] pub fn zxxy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 66)) } } #[inline] pub fn wxxy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 67)) } } #[inline] pub fn xyxy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 68)) } } #[inline] pub fn yyxy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 69)) } } #[inline] pub fn zyxy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 70)) } } #[inline] pub fn wyxy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 71)) } } #[inline] pub fn xzxy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 72)) } } #[inline] pub fn yzxy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 73)) } } #[inline] pub fn zzxy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 74)) } } #[inline] pub fn wzxy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 75)) } } #[inline] pub fn xwxy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 76)) } } #[inline] pub fn ywxy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 77)) } } #[inline] pub fn zwxy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 78)) } } #[inline] pub fn wwxy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 79)) } } #[inline] pub fn xxyy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 80)) } } #[inline] pub fn yxyy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 81)) } } #[inline] pub fn zxyy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 82)) } } #[inline] pub fn wxyy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 83)) } } #[inline] pub fn xyyy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 84)) } } #[inline] pub fn yyyy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 85)) } } #[inline] pub fn zyyy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 86)) } } #[inline] pub fn wyyy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 87)) } } #[inline] pub fn xzyy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 88)) } } #[inline] pub fn yzyy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 89)) } } #[inline] pub fn zzyy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 90)) } } #[inline] pub fn wzyy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 91)) } } #[inline] pub fn xwyy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 92)) } } #[inline] pub fn ywyy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 93)) } } #[inline] pub fn zwyy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 94)) } } #[inline] pub fn wwyy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 95)) } } #[inline] pub fn xxzy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 96)) } } #[inline] pub fn yxzy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 97)) } } #[inline] pub fn zxzy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 98)) } } #[inline] pub fn wxzy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 99)) } } #[inline] pub fn xyzy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 100)) } } #[inline] pub fn yyzy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 101)) } } #[inline] pub fn zyzy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 102)) } } #[inline] pub fn wyzy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 103)) } } #[inline] pub fn xzzy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 104)) } } #[inline] pub fn yzzy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 105)) } } #[inline] pub fn zzzy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 106)) } } #[inline] pub fn wzzy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 107)) } } #[inline] pub fn xwzy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 108)) } } #[inline] pub fn ywzy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 109)) } } #[inline] pub fn zwzy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 110)) } } #[inline] pub fn wwzy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 111)) } } #[inline] pub fn xxwy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 112)) } } #[inline] pub fn yxwy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 113)) } } #[inline] pub fn zxwy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 114)) } } #[inline] pub fn wxwy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 115)) } } #[inline] pub fn xywy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 116)) } } #[inline] pub fn yywy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 117)) } } #[inline] pub fn zywy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 118)) } } #[inline] pub fn wywy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 119)) } } #[inline] pub fn xzwy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 120)) } } #[inline] pub fn yzwy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 121)) } } #[inline] pub fn zzwy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 122)) } } #[inline] pub fn wzwy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 123)) } } #[inline] pub fn xwwy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 124)) } } #[inline] pub fn ywwy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 125)) } } #[inline] pub fn zwwy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 126)) } } #[inline] pub fn wwwy(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 127)) } } #[inline] pub fn xxxz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 128)) } } #[inline] pub fn yxxz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 129)) } } #[inline] pub fn zxxz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 130)) } } #[inline] pub fn wxxz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 131)) } } #[inline] pub fn xyxz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 132)) } } #[inline] pub fn yyxz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 133)) } } #[inline] pub fn zyxz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 134)) } } #[inline] pub fn wyxz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 135)) } } #[inline] pub fn xzxz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 136)) } } #[inline] pub fn yzxz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 137)) } } #[inline] pub fn zzxz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 138)) } } #[inline] pub fn wzxz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 139)) } } #[inline] pub fn xwxz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 140)) } } #[inline] pub fn ywxz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 141)) } } #[inline] pub fn zwxz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 142)) } } #[inline] pub fn wwxz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 143)) } } #[inline] pub fn xxyz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 144)) } } #[inline] pub fn yxyz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 145)) } } #[inline] pub fn zxyz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 146)) } } #[inline] pub fn wxyz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 147)) } } #[inline] pub fn xyyz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 148)) } } #[inline] pub fn yyyz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 149)) } } #[inline] pub fn zyyz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 150)) } } #[inline] pub fn wyyz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 151)) } } #[inline] pub fn xzyz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 152)) } } #[inline] pub fn yzyz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 153)) } } #[inline] pub fn zzyz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 154)) } } #[inline] pub fn wzyz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 155)) } } #[inline] pub fn xwyz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 156)) } } #[inline] pub fn ywyz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 157)) } } #[inline] pub fn zwyz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 158)) } } #[inline] pub fn wwyz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 159)) } } #[inline] pub fn xxzz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 160)) } } #[inline] pub fn yxzz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 161)) } } #[inline] pub fn zxzz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 162)) } } #[inline] pub fn wxzz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 163)) } } #[inline] pub fn xyzz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 164)) } } #[inline] pub fn yyzz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 165)) } } #[inline] pub fn zyzz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 166)) } } #[inline] pub fn wyzz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 167)) } } #[inline] pub fn xzzz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 168)) } } #[inline] pub fn yzzz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 169)) } } #[inline] pub fn zzzz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 170)) } } #[inline] pub fn wzzz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 171)) } } #[inline] pub fn xwzz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 172)) } } #[inline] pub fn ywzz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 173)) } } #[inline] pub fn zwzz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 174)) } } #[inline] pub fn wwzz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 175)) } } #[inline] pub fn xxwz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 176)) } } #[inline] pub fn yxwz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 177)) } } #[inline] pub fn zxwz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 178)) } } #[inline] pub fn wxwz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 179)) } } #[inline] pub fn xywz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 180)) } } #[inline] pub fn yywz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 181)) } } #[inline] pub fn zywz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 182)) } } #[inline] pub fn wywz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 183)) } } #[inline] pub fn xzwz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 184)) } } #[inline] pub fn yzwz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 185)) } } #[inline] pub fn zzwz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 186)) } } #[inline] pub fn wzwz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 187)) } } #[inline] pub fn xwwz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 188)) } } #[inline] pub fn ywwz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 189)) } } #[inline] pub fn zwwz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 190)) } } #[inline] pub fn wwwz(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 191)) } } #[inline] pub fn xxxw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 192)) } } #[inline] pub fn yxxw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 193)) } } #[inline] pub fn zxxw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 194)) } } #[inline] pub fn wxxw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 195)) } } #[inline] pub fn xyxw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 196)) } } #[inline] pub fn yyxw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 197)) } } #[inline] pub fn zyxw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 198)) } } #[inline] pub fn wyxw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 199)) } } #[inline] pub fn xzxw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 200)) } } #[inline] pub fn yzxw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 201)) } } #[inline] pub fn zzxw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 202)) } } #[inline] pub fn wzxw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 203)) } } #[inline] pub fn xwxw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 204)) } } #[inline] pub fn ywxw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 205)) } } #[inline] pub fn zwxw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 206)) } } #[inline] pub fn wwxw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 207)) } } #[inline] pub fn xxyw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 208)) } } #[inline] pub fn yxyw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 209)) } } #[inline] pub fn zxyw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 210)) } } #[inline] pub fn wxyw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 211)) } } #[inline] pub fn xyyw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 212)) } } #[inline] pub fn yyyw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 213)) } } #[inline] pub fn zyyw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 214)) } } #[inline] pub fn wyyw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 215)) } } #[inline] pub fn xzyw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 216)) } } #[inline] pub fn yzyw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 217)) } } #[inline] pub fn zzyw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 218)) } } #[inline] pub fn wzyw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 219)) } } #[inline] pub fn xwyw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 220)) } } #[inline] pub fn ywyw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 221)) } } #[inline] pub fn zwyw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 222)) } } #[inline] pub fn wwyw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 223)) } } #[inline] pub fn xxzw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 224)) } } #[inline] pub fn yxzw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 225)) } } #[inline] pub fn zxzw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 226)) } } #[inline] pub fn wxzw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 227)) } } #[inline] pub fn xyzw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 228)) } } #[inline] pub fn yyzw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 229)) } } #[inline] pub fn zyzw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 230)) } } #[inline] pub fn wyzw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 231)) } } #[inline] pub fn xzzw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 232)) } } #[inline] pub fn yzzw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 233)) } } #[inline] pub fn zzzw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 234)) } } #[inline] pub fn wzzw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 235)) } } #[inline] pub fn xwzw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 236)) } } #[inline] pub fn ywzw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 237)) } } #[inline] pub fn zwzw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 238)) } } #[inline] pub fn wwzw(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 239)) } } #[inline] pub fn xxww(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 240)) } } #[inline] pub fn yxww(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 241)) } } #[inline] pub fn zxww(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 242)) } } #[inline] pub fn wxww(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 243)) } } #[inline] pub fn xyww(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 244)) } } #[inline] pub fn yyww(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 245)) } } #[inline] pub fn zyww(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 246)) } } #[inline] pub fn wyww(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 247)) } } #[inline] pub fn xzww(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 248)) } } #[inline] pub fn yzww(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 249)) } } #[inline] pub fn zzww(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 250)) } } #[inline] pub fn wzww(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 251)) } } #[inline] pub fn xwww(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 252)) } } #[inline] pub fn ywww(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 253)) } } #[inline] pub fn zwww(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 254)) } } #[inline] pub fn wwww(self) -> F32x4 { unsafe { F32x4(x86_64::_mm_shuffle_ps(self.0, self.0, 255)) } } }
24.788417
69
0.549516
9c40b39554303d4c234357625ce1756bfd90e04f
12,176
//! //! # UAVCAN acceptance filter configuration //! //! This library implements the automatic hardware acceptance filter configuration described //! in section 4.2.4.4 of the UAVCAN specification. //! //! To reduce the amount of CPU time spent processing messages, a UAVCAN device can use hardware //! acceptance filters to ignore CAN messages that it is not interested in. When the application //! is interested in more message IDs than the number of filters that the hardware supports, //! this library can find a quasi-optimal set of filters that accepts all interesting message //! IDs and the minimum number of non-interesting message IDs. //! //! ## Basic operation //! //! 1. Find the set of message IDs the application is interested in, based on the topics, requests, //! and responses it wants to receive //! 2. For each interesting message ID, create a filter that matches exactly that ID. Optimize those //! filters down to the number of filters the hardware supports: //! //! ``` //! use canadensis_filter_config::{optimize, Filter}; //! //! let interesting_message_ids = [0x107d552a, 0x11733775, 0x136b957b, 0x126bbdaa, 0x1073373b]; //! let mut ideal_filters = [ //! Filter::exact_match(interesting_message_ids[0]), //! Filter::exact_match(interesting_message_ids[1]), //! Filter::exact_match(interesting_message_ids[2]), //! Filter::exact_match(interesting_message_ids[3]), //! Filter::exact_match(interesting_message_ids[4]), //! ]; //! // Using an imaginary CAN peripheral that supports only 2 receive filters //! let max_hardware_filters = 2; //! let optimized_filters = optimize(&mut ideal_filters, max_hardware_filters); //! assert_eq!(optimized_filters.len(), 2); //! //! // Each interesting message ID will be accepted by at least one of the optimized filters //! for &id in interesting_message_ids.iter() { //! assert!(optimized_filters.iter().any(|filter| filter.accepts(id))); //! } //! ``` //! //! 3. Apply the resulting filters to the CAN hardware #![no_std] /// Mask of allowed extended CAN IDs const EXTENDED_ID_MASK: u32 = 0x1fff_ffff; /// Bit 31, used to mark a filter as valid const VALID_BIT: u32 = 0x8000_0000; /// A generic mask-based filter for extended CAN IDs /// /// A filter will accept a message if (message_id & filter.mask) == (filter.id & filter.mask). #[derive(Clone)] #[cfg_attr(test, derive(PartialEq))] pub struct Filter { /// Mask of bits to compare (0x1fff_ffff requires all ID bits to match, 0x0 accepts any ID) mask: u32, /// Message ID to accept /// /// The most significant bit (bit 31) indicates that this filter is valid. The optimize function /// uses this bit to keep track of empty slots in the slice of filters. id_and_valid: u32, } impl Filter { /// Creates a filter /// /// If the mask or ID is too large to fit into 29 bits, it will be silently truncated. #[inline] pub fn new(mask: u32, id: u32) -> Self { Filter { mask: mask & EXTENDED_ID_MASK, id_and_valid: (id & EXTENDED_ID_MASK) | VALID_BIT, } } /// Creates a filter that matches exactly one message ID /// /// If the ID is too larg to fit into 29 bits, it will be silently truncated. pub fn exact_match(id: u32) -> Self { Filter::new(EXTENDED_ID_MASK, id) } /// Returns the mask of this filter, which indicates the bits that are checked #[inline] pub fn mask(&self) -> u32 { self.mask } /// Returns the message ID that this filter (partially) matches #[inline] pub fn id(&self) -> u32 { self.id_and_valid & EXTENDED_ID_MASK } /// Returns true if this filter is valid fn is_valid(&self) -> bool { (self.id_and_valid & VALID_BIT) != 0 } /// Marks this filter as not valid and resets its mask and ID fn invalidate(&mut self) { self.mask = 0; self.id_and_valid = 0; } /// Returns the number of 1 bits in the mask /// /// A higher rank means that the filter will reject more messages. fn rank(&self) -> u32 { self.mask.count_ones() } /// Returns true if this filter accepts a message with the provided ID pub fn accepts(&self, id: u32) -> bool { (self.mask() & id) == (self.mask() & self.id()) } } mod debug_impl { use super::Filter; use core::fmt::{Debug, Formatter, Result}; impl Debug for Filter { fn fmt(&self, f: &mut Formatter<'_>) -> Result { f.debug_struct("Filter") .field("valid", &self.is_valid()) .field("mask", &DebugHex(self.mask())) .field("id", &DebugHex(self.id())) .finish() } } struct DebugHex(u32); impl Debug for DebugHex { fn fmt(&self, f: &mut Formatter<'_>) -> Result { write!(f, "{:#010x}", self.0) } } } fn merge_masks(a: &Filter, b: &Filter) -> u32 { a.mask() & b.mask() & !(a.id() ^ b.id()) } /// Merges two filters, producing a new filter that accepts the union of the IDs accepted by the /// two input filters (and possibly more IDs) fn merge(a: &Filter, b: &Filter) -> Filter { let mask = merge_masks(a, b); Filter::new(mask, a.id() & mask) } /// Combines a slice of ideal filters down to max_filters filters that will accept a superset /// of the message IDs of the ideal filters /// /// The returned slice will be a sub-slice of ideal_filters. /// /// If max_filters is zero, this function returns an empty slice. If max_filters is greater than /// the length of ideal_filters, this function returns ideal_filters. pub fn optimize(ideal_filters: &mut [Filter], max_filters: usize) -> &[Filter] { if max_filters == 0 { // Can't really do anything when nothing can be filtered return &[]; } let working_filters = ideal_filters; // Step 1: Merge filters merge_filters(working_filters, max_filters); // In debug mode, check that not too many filters remain debug_assert!( working_filters .iter() .filter(|filter| filter.is_valid()) .count() <= max_filters ); // Step 2: Compact compact(working_filters); // Step 3: Return only the beginning part of the slice that contains valid filters let first_invalid = working_filters .iter() .position(|filter| !filter.is_valid()) .unwrap_or(working_filters.len()); let (valid_filters, invalid_filters) = working_filters.split_at(first_invalid); #[cfg(debug_assertions)] { // Check that the filters have been correctly split into valid and invalid assert!(valid_filters.iter().all(Filter::is_valid)); assert!(!invalid_filters.iter().any(Filter::is_valid)); } #[cfg(not(debug_assertions))] let _ = invalid_filters; valid_filters } /// Merges filters so that a maximum of max_filters are valid fn merge_filters(working_filters: &mut [Filter], max_filters: usize) { assert_ne!(max_filters, 0); let mut valid_filters = working_filters.len(); while valid_filters > max_filters { // Find the pair of valid filters with the maximum rank when merged let mut max_rank = 0; let mut max_rank_indices = (0, 0); for i in 0..working_filters.len() { for j in (i + 1)..working_filters.len() { let filter1 = &working_filters[i]; let filter2 = &working_filters[j]; if filter1.is_valid() && filter2.is_valid() { let rank = merge(filter1, filter2).rank(); if rank >= max_rank { max_rank_indices = (i, j); max_rank = rank; } } } } // Merge those filters into the first, invalidate the second working_filters[max_rank_indices.0] = merge( &working_filters[max_rank_indices.0], &working_filters[max_rank_indices.1], ); working_filters[max_rank_indices.1].invalidate(); valid_filters -= 1; debug_assert_eq!( valid_filters, working_filters .iter() .filter(|filter| filter.is_valid()) .count() ); } } /// Moves all valid filter to the beginning of filters, and all invalid filters to the end fn compact(filters: &mut [Filter]) { // This could use the core library sort functions, but they add a lot of code size // (about 5000 bytes for thumbvem-none-eabihf). // Do this simple thing, based on insertion sort, instead. // This is O(n^2), but the number of filters is likely to be less than 100. for i in 1..filters.len() { let mut j = i; while j != 0 && !filters[j - 1].is_valid() && filters[j].is_valid() { filters.swap(j - 1, j); j -= 1; } } } #[cfg(test)] mod test_compact { use super::{compact, Filter}; /// Returns an invalid filter fn invalid() -> Filter { let mut filter = Filter::new(0, 0); filter.invalidate(); filter } /// Returns a valid filter fn valid() -> Filter { Filter::new(0, 0) } fn check(inputs: &mut [Filter]) { compact(inputs); let first_invalid_index = inputs .iter() .position(|filter| !filter.is_valid()) .unwrap_or(inputs.len()); let (valid, invalid) = inputs.split_at(first_invalid_index); assert!(valid.iter().all(Filter::is_valid)); assert!(!invalid.iter().any(Filter::is_valid)); } #[test] fn basics() { check(&mut []); check(&mut [valid()]); check(&mut [invalid()]); check(&mut [valid(), invalid()]); check(&mut [invalid(), valid()]); check(&mut [invalid(), invalid()]); check(&mut [valid(), valid()]); } fn valid_from_bit_8(value: u8, bit: u8) -> Filter { if ((value >> bit) & 1) == 1 { valid() } else { invalid() } } #[test] fn longer() { // Check all combinations of 8 valid and invalid filters for permutation in 0..=u8::MAX { let mut filters = [ valid_from_bit_8(permutation, 0), valid_from_bit_8(permutation, 1), valid_from_bit_8(permutation, 2), valid_from_bit_8(permutation, 3), valid_from_bit_8(permutation, 4), valid_from_bit_8(permutation, 5), valid_from_bit_8(permutation, 6), valid_from_bit_8(permutation, 7), ]; check(&mut filters); } } } #[cfg(test)] mod test_single_merge { use super::{merge, Filter, EXTENDED_ID_MASK}; /// Merge two filters that are the same, result should be equal to the inputs #[test] fn merge_two_equal() { let test_filters = [ Filter::new(0x0, 0x0), Filter::new(0x1, 0x1), Filter::new(0x1, 0x0), Filter::new(0x3, 0x0), Filter::new(0x3, 0x1), Filter::new(0x3, 0x2), Filter::new(0x3, 0x3), ]; for filter in test_filters.iter() { let combined = merge(filter, filter); assert_eq!(&combined, filter); } } /// Merge two filters that accept exactly one ID each /// /// The combination should accept both IDs #[test] fn merge_two_exact_id() { let ids = [ (0x0, 0x0), (0x10, 0x0), (0x0, 0x10), (EXTENDED_ID_MASK, 0x0), (0x0, EXTENDED_ID_MASK), (0x12933, 0x12932), ]; for &(id1, id2) in ids.iter() { let filter1 = Filter::new(EXTENDED_ID_MASK, id1); let filter2 = Filter::new(EXTENDED_ID_MASK, id2); assert!(filter1.accepts(id1)); assert!(filter2.accepts(id2)); let merged = merge(&filter1, &filter2); assert!(merged.accepts(id1)); assert!(merged.accepts(id2)); } } }
33.5427
100
0.596501
de891cb5bf677d2267bf1b255d18b6cbbd5c4d1c
24,848
use glow::HasContext; use std::sync::Arc; // https://webgl2fundamentals.org/webgl/lessons/webgl-data-textures.html const GL_UNMASKED_VENDOR_WEBGL: u32 = 0x9245; const GL_UNMASKED_RENDERER_WEBGL: u32 = 0x9246; impl super::Adapter { /// According to the OpenGL specification, the version information is /// expected to follow the following syntax: /// /// ~~~bnf /// <major> ::= <number> /// <minor> ::= <number> /// <revision> ::= <number> /// <vendor-info> ::= <string> /// <release> ::= <major> "." <minor> ["." <release>] /// <version> ::= <release> [" " <vendor-info>] /// ~~~ /// /// Note that this function is intentionally lenient in regards to parsing, /// and will try to recover at least the first two version numbers without /// resulting in an `Err`. /// # Notes /// `WebGL 2` version returned as `OpenGL ES 3.0` fn parse_version(mut src: &str) -> Result<(u8, u8), crate::InstanceError> { let webgl_sig = "WebGL "; // According to the WebGL specification // VERSION WebGL<space>1.0<space><vendor-specific information> // SHADING_LANGUAGE_VERSION WebGL<space>GLSL<space>ES<space>1.0<space><vendor-specific information> let is_webgl = src.starts_with(webgl_sig); if is_webgl { let pos = src.rfind(webgl_sig).unwrap_or(0); src = &src[pos + webgl_sig.len()..]; } else { let es_sig = " ES "; match src.rfind(es_sig) { Some(pos) => { src = &src[pos + es_sig.len()..]; } None => { log::warn!("ES not found in '{}'", src); return Err(crate::InstanceError); } } }; let glsl_es_sig = "GLSL ES "; let is_glsl = match src.find(glsl_es_sig) { Some(pos) => { src = &src[pos + glsl_es_sig.len()..]; true } None => false, }; let (version, _vendor_info) = match src.find(' ') { Some(i) => (&src[..i], src[i + 1..].to_string()), None => (src, String::new()), }; // TODO: make this even more lenient so that we can also accept // `<major> "." <minor> [<???>]` let mut it = version.split('.'); let major = it.next().and_then(|s| s.parse().ok()); let minor = it.next().and_then(|s| { let trimmed = if s.starts_with('0') { "0" } else { s.trim_end_matches('0') }; trimmed.parse().ok() }); match (major, minor) { (Some(major), Some(minor)) => Ok(( // Return WebGL 2.0 version as OpenGL ES 3.0 if is_webgl && !is_glsl { major + 1 } else { major }, minor, )), _ => { log::warn!("Unable to extract the version from '{}'", version); Err(crate::InstanceError) } } } fn make_info(vendor_orig: String, renderer_orig: String) -> wgt::AdapterInfo { let vendor = vendor_orig.to_lowercase(); let renderer = renderer_orig.to_lowercase(); // opengl has no way to discern device_type, so we can try to infer it from the renderer string let strings_that_imply_integrated = [ " xpress", // space here is on purpose so we don't match express "radeon hd 4200", "radeon hd 4250", "radeon hd 4290", "radeon hd 4270", "radeon hd 4225", "radeon hd 3100", "radeon hd 3200", "radeon hd 3000", "radeon hd 3300", "radeon(tm) r4 graphics", "radeon(tm) r5 graphics", "radeon(tm) r6 graphics", "radeon(tm) r7 graphics", "radeon r7 graphics", "nforce", // all nvidia nforce are integrated "tegra", // all nvidia tegra are integrated "shield", // all nvidia shield are integrated "igp", "mali", "intel", "v3d", ]; let strings_that_imply_cpu = ["mesa offscreen", "swiftshader", "llvmpipe"]; //TODO: handle Intel Iris XE as discreet let inferred_device_type = if vendor.contains("qualcomm") || vendor.contains("intel") || strings_that_imply_integrated .iter() .any(|&s| renderer.contains(s)) { wgt::DeviceType::IntegratedGpu } else if strings_that_imply_cpu.iter().any(|&s| renderer.contains(s)) { wgt::DeviceType::Cpu } else { wgt::DeviceType::DiscreteGpu }; // source: Sascha Willems at Vulkan let vendor_id = if vendor.contains("amd") { 0x1002 } else if vendor.contains("imgtec") { 0x1010 } else if vendor.contains("nvidia") { 0x10DE } else if vendor.contains("arm") { 0x13B5 } else if vendor.contains("qualcomm") { 0x5143 } else if vendor.contains("intel") { 0x8086 } else if vendor.contains("broadcom") { 0x14e4 } else { 0 }; wgt::AdapterInfo { name: renderer_orig, vendor: vendor_id, device: 0, device_type: inferred_device_type, backend: wgt::Backend::Gl, } } pub(super) unsafe fn expose( context: super::AdapterContext, ) -> Option<crate::ExposedAdapter<super::Api>> { let gl = context.lock(); let extensions = gl.supported_extensions(); let (vendor_const, renderer_const) = if extensions.contains("WEBGL_debug_renderer_info") { (GL_UNMASKED_VENDOR_WEBGL, GL_UNMASKED_RENDERER_WEBGL) } else { (glow::VENDOR, glow::RENDERER) }; let (vendor, renderer) = { let vendor = gl.get_parameter_string(vendor_const); let renderer = gl.get_parameter_string(renderer_const); (vendor, renderer) }; let version = gl.get_parameter_string(glow::VERSION); log::info!("Vendor: {}", vendor); log::info!("Renderer: {}", renderer); log::info!("Version: {}", version); log::debug!("Extensions: {:#?}", extensions); let ver = Self::parse_version(&version).ok()?; let shading_language_version = { let sl_version = gl.get_parameter_string(glow::SHADING_LANGUAGE_VERSION); log::info!("SL version: {}", sl_version); let (sl_major, sl_minor) = Self::parse_version(&sl_version).ok()?; let value = sl_major as u16 * 100 + sl_minor as u16 * 10; naga::back::glsl::Version::Embedded(value) }; let vertex_shader_storage_blocks = gl.get_parameter_i32(glow::MAX_VERTEX_SHADER_STORAGE_BLOCKS) as u32; let fragment_shader_storage_blocks = gl.get_parameter_i32(glow::MAX_FRAGMENT_SHADER_STORAGE_BLOCKS) as u32; let vertex_shader_storage_textures = gl.get_parameter_i32(glow::MAX_VERTEX_IMAGE_UNIFORMS) as u32; let fragment_shader_storage_textures = gl.get_parameter_i32(glow::MAX_FRAGMENT_IMAGE_UNIFORMS) as u32; // WORKAROUND: // In order to work around an issue with GL on RPI4 and similar, we ignore a zero vertex ssbo count if there are vertex sstos. (more info: https://github.com/gfx-rs/wgpu/pull/1607#issuecomment-874938961) // The hardware does not want us to write to these SSBOs, but GLES cannot express that. We detect this case and disable writing to SSBOs. let vertex_ssbo_false_zero = vertex_shader_storage_blocks == 0 && vertex_shader_storage_textures != 0; if vertex_ssbo_false_zero { // We only care about fragment here as the 0 is a lie. log::warn!("Max vertex shader SSBO == 0 and SSTO != 0. Interpreting as false zero."); } let max_storage_buffers_per_shader_stage = if vertex_shader_storage_blocks == 0 { fragment_shader_storage_blocks } else { vertex_shader_storage_blocks.min(fragment_shader_storage_blocks) }; let max_storage_textures_per_shader_stage = if vertex_shader_storage_textures == 0 { fragment_shader_storage_textures } else { vertex_shader_storage_textures.min(fragment_shader_storage_textures) }; let mut features = wgt::Features::empty() | wgt::Features::TEXTURE_COMPRESSION_ETC2 | wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES; features.set( wgt::Features::DEPTH_CLAMPING, extensions.contains("GL_EXT_depth_clamp"), ); features.set( wgt::Features::VERTEX_WRITABLE_STORAGE, ver >= (3, 1) && (vertex_shader_storage_blocks != 0 || vertex_ssbo_false_zero) && vertex_shader_storage_textures != 0, ); let mut downlevel_flags = wgt::DownlevelFlags::empty() | wgt::DownlevelFlags::DEVICE_LOCAL_IMAGE_COPIES | wgt::DownlevelFlags::NON_POWER_OF_TWO_MIPMAPPED_TEXTURES | wgt::DownlevelFlags::CUBE_ARRAY_TEXTURES | wgt::DownlevelFlags::COMPARISON_SAMPLERS; downlevel_flags.set(wgt::DownlevelFlags::COMPUTE_SHADERS, ver >= (3, 1)); downlevel_flags.set( wgt::DownlevelFlags::FRAGMENT_WRITABLE_STORAGE, ver >= (3, 1), ); downlevel_flags.set(wgt::DownlevelFlags::INDIRECT_EXECUTION, ver >= (3, 1)); //TODO: we can actually support positive `base_vertex` in the same way // as we emulate the `start_instance`. But we can't deal with negatives... downlevel_flags.set(wgt::DownlevelFlags::BASE_VERTEX, ver >= (3, 2)); downlevel_flags.set( wgt::DownlevelFlags::INDEPENDENT_BLENDING, ver >= (3, 2) || extensions.contains("GL_EXT_draw_buffers_indexed"), ); downlevel_flags.set( wgt::DownlevelFlags::VERTEX_STORAGE, ver >= (3, 1) && (vertex_shader_storage_blocks > 0 || vertex_ssbo_false_zero), ); let max_texture_size = gl.get_parameter_i32(glow::MAX_TEXTURE_SIZE) as u32; let max_texture_3d_size = gl.get_parameter_i32(glow::MAX_3D_TEXTURE_SIZE) as u32; let min_uniform_buffer_offset_alignment = gl.get_parameter_i32(glow::UNIFORM_BUFFER_OFFSET_ALIGNMENT); let min_storage_buffer_offset_alignment = if ver >= (3, 1) { gl.get_parameter_i32(glow::SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT) as u32 } else { 256 }; let max_uniform_buffers_per_shader_stage = gl.get_parameter_i32(glow::MAX_VERTEX_UNIFORM_BLOCKS) .min(gl.get_parameter_i32(glow::MAX_FRAGMENT_UNIFORM_BLOCKS)) as u32; let limits = wgt::Limits { max_texture_dimension_1d: max_texture_size, max_texture_dimension_2d: max_texture_size, max_texture_dimension_3d: max_texture_3d_size, max_texture_array_layers: gl.get_parameter_i32(glow::MAX_ARRAY_TEXTURE_LAYERS) as u32, max_bind_groups: crate::MAX_BIND_GROUPS as u32, max_dynamic_uniform_buffers_per_pipeline_layout: max_uniform_buffers_per_shader_stage, max_dynamic_storage_buffers_per_pipeline_layout: max_storage_buffers_per_shader_stage, max_sampled_textures_per_shader_stage: super::MAX_TEXTURE_SLOTS as u32, max_samplers_per_shader_stage: super::MAX_SAMPLERS as u32, max_storage_buffers_per_shader_stage, max_storage_textures_per_shader_stage, max_uniform_buffers_per_shader_stage, max_uniform_buffer_binding_size: gl.get_parameter_i32(glow::MAX_UNIFORM_BLOCK_SIZE) as u32, max_storage_buffer_binding_size: if ver >= (3, 1) { gl.get_parameter_i32(glow::MAX_SHADER_STORAGE_BLOCK_SIZE) } else { 0 } as u32, max_vertex_buffers: gl.get_parameter_i32(glow::MAX_VERTEX_ATTRIB_BINDINGS) as u32, max_vertex_attributes: (gl.get_parameter_i32(glow::MAX_VERTEX_ATTRIBS) as u32) .min(super::MAX_VERTEX_ATTRIBUTES as u32), max_vertex_buffer_array_stride: gl.get_parameter_i32(glow::MAX_VERTEX_ATTRIB_STRIDE) as u32, max_push_constant_size: 0, }; let mut private_caps = super::PrivateCapabilities::empty(); private_caps.set( super::PrivateCapabilities::SHADER_BINDING_LAYOUT, ver >= (3, 1), ); private_caps.set( super::PrivateCapabilities::SHADER_TEXTURE_SHADOW_LOD, extensions.contains("GL_EXT_texture_shadow_lod"), ); private_caps.set(super::PrivateCapabilities::MEMORY_BARRIERS, ver >= (3, 1)); private_caps.set( super::PrivateCapabilities::VERTEX_BUFFER_LAYOUT, ver >= (3, 1), ); let mut workarounds = super::Workarounds::empty(); let r = renderer.to_lowercase(); // Check for Mesa sRGB clear bug. See // [`super::PrivateCapabilities::MESA_I915_SRGB_SHADER_CLEAR`]. if r.contains("mesa") && r.contains("intel") && r.split(&[' ', '(', ')'][..]) .any(|substr| substr.len() == 3 && substr.chars().nth(2) == Some('l')) { log::warn!( "Detected skylake derivative running on mesa i915. Clears to srgb textures will \ use manual shader clears." ); workarounds.set(super::Workarounds::MESA_I915_SRGB_SHADER_CLEAR, true); } let downlevel_defaults = wgt::DownlevelLimits {}; // Drop the GL guard so we can move the context into AdapterShared drop(gl); Some(crate::ExposedAdapter { adapter: super::Adapter { shared: Arc::new(super::AdapterShared { context, private_caps, workarounds, shading_language_version, }), }, info: Self::make_info(vendor, renderer), features, capabilities: crate::Capabilities { limits, downlevel: wgt::DownlevelCapabilities { flags: downlevel_flags, limits: downlevel_defaults, shader_model: wgt::ShaderModel::Sm5, }, alignments: crate::Alignments { buffer_copy_offset: wgt::BufferSize::new(4).unwrap(), buffer_copy_pitch: wgt::BufferSize::new(4).unwrap(), uniform_buffer_offset: wgt::BufferSize::new( min_storage_buffer_offset_alignment as u64, ) .unwrap(), storage_buffer_offset: wgt::BufferSize::new( min_uniform_buffer_offset_alignment as u64, ) .unwrap(), }, }, }) } unsafe fn create_shader_clear_program( gl: &glow::Context, ) -> (glow::Program, glow::UniformLocation) { let program = gl .create_program() .expect("Could not create shader program"); let vertex = gl .create_shader(glow::VERTEX_SHADER) .expect("Could not create shader"); gl.shader_source(vertex, include_str!("./shader_clear.vert")); gl.compile_shader(vertex); let fragment = gl .create_shader(glow::FRAGMENT_SHADER) .expect("Could not create shader"); gl.shader_source(fragment, include_str!("./shader_clear.frag")); gl.compile_shader(fragment); gl.attach_shader(program, vertex); gl.attach_shader(program, fragment); gl.link_program(program); let color_uniform_location = gl .get_uniform_location(program, "color") .expect("Could not find color uniform in shader clear shader"); gl.delete_shader(vertex); gl.delete_shader(fragment); (program, color_uniform_location) } } impl crate::Adapter<super::Api> for super::Adapter { unsafe fn open( &self, features: wgt::Features, ) -> Result<crate::OpenDevice<super::Api>, crate::DeviceError> { let gl = &self.shared.context.lock(); gl.pixel_store_i32(glow::UNPACK_ALIGNMENT, 1); gl.pixel_store_i32(glow::PACK_ALIGNMENT, 1); let main_vao = gl .create_vertex_array() .map_err(|_| crate::DeviceError::OutOfMemory)?; gl.bind_vertex_array(Some(main_vao)); let zero_buffer = gl .create_buffer() .map_err(|_| crate::DeviceError::OutOfMemory)?; gl.bind_buffer(glow::COPY_READ_BUFFER, Some(zero_buffer)); let zeroes = vec![0u8; super::ZERO_BUFFER_SIZE]; gl.buffer_data_u8_slice(glow::COPY_READ_BUFFER, &zeroes, glow::STATIC_DRAW); // Compile the shader program we use for doing manual clears to work around Mesa fastclear // bug. let (shader_clear_program, shader_clear_program_color_uniform_location) = Self::create_shader_clear_program(gl); Ok(crate::OpenDevice { device: super::Device { shared: Arc::clone(&self.shared), main_vao, #[cfg(feature = "renderdoc")] render_doc: Default::default(), }, queue: super::Queue { shared: Arc::clone(&self.shared), features, draw_fbo: gl .create_framebuffer() .map_err(|_| crate::DeviceError::OutOfMemory)?, copy_fbo: gl .create_framebuffer() .map_err(|_| crate::DeviceError::OutOfMemory)?, shader_clear_program, shader_clear_program_color_uniform_location, zero_buffer, temp_query_results: Vec::new(), draw_buffer_count: 1, }, }) } unsafe fn texture_format_capabilities( &self, format: wgt::TextureFormat, ) -> crate::TextureFormatCapabilities { use crate::TextureFormatCapabilities as Tfc; use wgt::TextureFormat as Tf; // The storage types are sprinkled based on section // "TEXTURE IMAGE LOADS AND STORES" of GLES-3.2 spec. let unfiltered_color = Tfc::SAMPLED | Tfc::COLOR_ATTACHMENT; let filtered_color = unfiltered_color | Tfc::SAMPLED_LINEAR | Tfc::COLOR_ATTACHMENT_BLEND; match format { Tf::R8Unorm | Tf::R8Snorm => filtered_color, Tf::R8Uint | Tf::R8Sint | Tf::R16Uint | Tf::R16Sint => unfiltered_color, Tf::R16Float | Tf::Rg8Unorm | Tf::Rg8Snorm => filtered_color, Tf::Rg8Uint | Tf::Rg8Sint | Tf::R32Uint | Tf::R32Sint => { unfiltered_color | Tfc::STORAGE } Tf::R32Float => unfiltered_color, Tf::Rg16Uint | Tf::Rg16Sint => unfiltered_color, Tf::Rg16Float | Tf::Rgba8Unorm | Tf::Rgba8UnormSrgb => filtered_color | Tfc::STORAGE, Tf::Bgra8UnormSrgb | Tf::Rgba8Snorm | Tf::Bgra8Unorm => filtered_color, Tf::Rgba8Uint | Tf::Rgba8Sint => unfiltered_color | Tfc::STORAGE, Tf::Rgb10a2Unorm | Tf::Rg11b10Float => filtered_color, Tf::Rg32Uint | Tf::Rg32Sint => unfiltered_color, Tf::Rg32Float => unfiltered_color | Tfc::STORAGE, Tf::Rgba16Uint | Tf::Rgba16Sint => unfiltered_color | Tfc::STORAGE, Tf::Rgba16Float => filtered_color | Tfc::STORAGE, Tf::Rgba32Uint | Tf::Rgba32Sint => unfiltered_color | Tfc::STORAGE, Tf::Rgba32Float => unfiltered_color | Tfc::STORAGE, Tf::Depth32Float => Tfc::SAMPLED | Tfc::DEPTH_STENCIL_ATTACHMENT, Tf::Depth24Plus => Tfc::SAMPLED | Tfc::DEPTH_STENCIL_ATTACHMENT, Tf::Depth24PlusStencil8 => Tfc::SAMPLED | Tfc::DEPTH_STENCIL_ATTACHMENT, Tf::Rgb9e5Ufloat | Tf::Bc1RgbaUnorm | Tf::Bc1RgbaUnormSrgb | Tf::Bc2RgbaUnorm | Tf::Bc2RgbaUnormSrgb | Tf::Bc3RgbaUnorm | Tf::Bc3RgbaUnormSrgb | Tf::Bc4RUnorm | Tf::Bc4RSnorm | Tf::Bc5RgUnorm | Tf::Bc5RgSnorm | Tf::Bc6hRgbSfloat | Tf::Bc6hRgbUfloat | Tf::Bc7RgbaUnorm | Tf::Bc7RgbaUnormSrgb | Tf::Etc2RgbUnorm | Tf::Etc2RgbUnormSrgb | Tf::Etc2RgbA1Unorm | Tf::Etc2RgbA1UnormSrgb | Tf::EacRUnorm | Tf::EacRSnorm | Tf::EacRgUnorm | Tf::EacRgSnorm | Tf::Astc4x4RgbaUnorm | Tf::Astc4x4RgbaUnormSrgb | Tf::Astc5x4RgbaUnorm | Tf::Astc5x4RgbaUnormSrgb | Tf::Astc5x5RgbaUnorm | Tf::Astc5x5RgbaUnormSrgb | Tf::Astc6x5RgbaUnorm | Tf::Astc6x5RgbaUnormSrgb | Tf::Astc6x6RgbaUnorm | Tf::Astc6x6RgbaUnormSrgb | Tf::Astc8x5RgbaUnorm | Tf::Astc8x5RgbaUnormSrgb | Tf::Astc8x6RgbaUnorm | Tf::Astc8x6RgbaUnormSrgb | Tf::Astc10x5RgbaUnorm | Tf::Astc10x5RgbaUnormSrgb | Tf::Astc10x6RgbaUnorm | Tf::Astc10x6RgbaUnormSrgb | Tf::Astc8x8RgbaUnorm | Tf::Astc8x8RgbaUnormSrgb | Tf::Astc10x8RgbaUnorm | Tf::Astc10x8RgbaUnormSrgb | Tf::Astc10x10RgbaUnorm | Tf::Astc10x10RgbaUnormSrgb | Tf::Astc12x10RgbaUnorm | Tf::Astc12x10RgbaUnormSrgb | Tf::Astc12x12RgbaUnorm | Tf::Astc12x12RgbaUnormSrgb => Tfc::SAMPLED | Tfc::SAMPLED_LINEAR, } } unsafe fn surface_capabilities( &self, surface: &super::Surface, ) -> Option<crate::SurfaceCapabilities> { if surface.presentable { Some(crate::SurfaceCapabilities { formats: if surface.enable_srgb { vec![ wgt::TextureFormat::Rgba8UnormSrgb, wgt::TextureFormat::Bgra8UnormSrgb, ] } else { vec![ wgt::TextureFormat::Rgba8Unorm, wgt::TextureFormat::Bgra8Unorm, ] }, present_modes: vec![wgt::PresentMode::Fifo], //TODO composite_alpha_modes: vec![crate::CompositeAlphaMode::Opaque], //TODO swap_chain_sizes: 2..=2, current_extent: None, extents: wgt::Extent3d { width: 4, height: 4, depth_or_array_layers: 1, }..=wgt::Extent3d { width: 4096, height: 4096, depth_or_array_layers: 1, }, usage: crate::TextureUses::COLOR_TARGET, }) } else { None } } } #[cfg(test)] mod tests { use super::super::Adapter; #[test] fn test_version_parse() { let error = Err(crate::InstanceError); assert_eq!(Adapter::parse_version("1"), error); assert_eq!(Adapter::parse_version("1."), error); assert_eq!(Adapter::parse_version("1 h3l1o. W0rld"), error); assert_eq!(Adapter::parse_version("1. h3l1o. W0rld"), error); assert_eq!(Adapter::parse_version("1.2.3"), error); assert_eq!(Adapter::parse_version("OpenGL ES 3.1"), Ok((3, 1))); assert_eq!( Adapter::parse_version("OpenGL ES 2.0 Google Nexus"), Ok((2, 0)) ); assert_eq!(Adapter::parse_version("GLSL ES 1.1"), Ok((1, 1))); assert_eq!(Adapter::parse_version("OpenGL ES GLSL ES 3.20"), Ok((3, 2))); assert_eq!( // WebGL 2.0 should parse as OpenGL ES 3.0 Adapter::parse_version("WebGL 2.0 (OpenGL ES 3.0 Chromium)"), Ok((3, 0)) ); assert_eq!( Adapter::parse_version("WebGL GLSL ES 3.00 (OpenGL ES GLSL ES 3.0 Chromium)"), Ok((3, 0)) ); } }
40.403252
211
0.562782
db15186c9afc734e81ecc6bc1a34f8207c1f1036
99
pub mod graph; mod header; mod login; mod me; pub use header::*; pub use login::*; pub use me::*;
11
18
0.646465
22cf4499cbdabb8eaf91b13841edce0309770f98
507
fn main() { let input = aoc_2020_1::read_numbers("input.txt").unwrap(); let part_one = solve_part_one(&input); let part_two = solve_part_two(&input); println!("Part one: {}", part_one.unwrap_or(-1)); println!("Part two: {}", part_two.unwrap_or(-1)); } fn solve_part_one(input: &Vec<i32>) -> Result<i32, String> { let result = aoc_2020_1::find_two_numbers(input); result } fn solve_part_two(input: &Vec<i32>) -> Result<i32, String> { aoc_2020_1::find_three_numbers(input) }
29.823529
63
0.664694
916f525c76b8ee44efec6526ee597bf7b3c730f3
1,505
//============================================================================= // // WARNING: This file is AUTO-GENERATED // // Do not make changes directly to this file. // // If you would like to make a change to the library, please update the schema // definitions at https://github.com/slack-rs/slack-api-schemas // // If you would like to make a change how the library was generated, // please edit https://github.com/slack-rs/slack-rs-api/tree/master/codegen // //============================================================================= pub use crate::mod_types::team_profile_types::*; use crate::sync::requests::SlackWebRequestSender; /// Retrieve a team's profile. /// /// Wraps https://api.slack.com/methods/team.profile.get pub fn get<R>( client: &R, token: &str, request: &GetRequest<'_>, ) -> Result<GetResponse, GetError<R::Error>> where R: SlackWebRequestSender, { let params = vec![ Some(("token", token)), request .visibility .map(|visibility| ("visibility", visibility)), ]; let params = params.into_iter().filter_map(|x| x).collect::<Vec<_>>(); let url = crate::get_slack_url_for_method("team.profile.get"); client .send(&url, &params[..]) .map_err(GetError::Client) .and_then(|result| { serde_json::from_str::<GetResponse>(&result) .map_err(|e| GetError::MalformedResponse(result, e)) }) .and_then(|o| o.into()) }
32.021277
79
0.55814
69bf979c66c272cbba0e6f2ff8495fbede85e145
4,718
#[derive(Default)] pub struct Camera { view: ViewInfo, proj: ProjInfo, } impl Camera { fn new(view: ViewInfo, proj: ProjInfo) -> Self { Self { view, proj } } pub fn view_transform(&self) -> Transform { self.view.transform().to_cols_array_2d() } pub fn proj_transform(&self) -> Transform { self.proj.transform().to_cols_array_2d() } pub fn view_proj_transform(&self) -> Transform { (self.proj.transform() * self.view.transform()).to_cols_array_2d() } pub fn set_position(&mut self, position: Vec3) -> &mut Self { self.view.set_position(position.into()); self } pub fn set_target(&mut self, target: Vec3) -> &mut Self { self.view.set_target(target.into()); self } pub fn set_up(&mut self, up: Vec3) -> &mut Self { self.view.set_up(up.into()); self } } pub type Vec3 = [f32; 3]; pub type Transform = [[f32; 4]; 4]; #[derive(Default)] pub struct CameraBuilder { view: Option<ViewInfo>, proj: Option<ProjInfo>, } impl CameraBuilder { pub fn look_at(mut self, position: Vec3, target: Vec3, up: Vec3) -> Self { self.view = Some(ViewInfo::new(position.into(), target.into(), up.into())); self } pub fn perspective(mut self, fov_y: f32, aspect: f32, near: f32, far: Option<f32>) -> Self { self.proj = Some(ProjInfo::new_perspective(fov_y, aspect, near, far)); self } pub fn orthographic( mut self, left: f32, right: f32, bottom: f32, top: f32, near: f32, far: f32, ) -> Self { self.proj = Some(ProjInfo::new_orthographic( left, right, bottom, top, near, far, )); self } pub fn build(self) -> Camera { let view = self.view.unwrap_or_default(); let proj = self.proj.unwrap_or_default(); Camera::new(view, proj) } } struct ViewInfo { position: glam::Vec3, target: glam::Vec3, up: glam::Vec3, } impl Default for ViewInfo { fn default() -> Self { let pos = glam::Vec3::new(0.0, 0.0, 0.0); let target = glam::Vec3::new(0.0, 0.0, 1.0); let up = glam::Vec3::new(0.0, 1.0, 0.0); Self::new(pos, target, up) } } impl ViewInfo { pub fn new(position: glam::Vec3, mut target: glam::Vec3, up: glam::Vec3) -> Self { if position == target { // watch to z axis by default target = position + glam::Vec3::new(0.0, 0.0, 1.0); } Self { position, target, up, } } pub fn transform(&self) -> glam::Mat4 { glam::Mat4::look_at_lh(self.position, self.target, self.up) } pub fn set_position(&mut self, position: glam::Vec3) { self.position = position } pub fn set_target(&mut self, target: glam::Vec3) { self.target = target } pub fn set_up(&mut self, up: glam::Vec3) { self.up = up } } enum ProjInfo { Perspective(Perspective), Orthographic(Orthographic), } impl ProjInfo { pub fn new_perspective(fov_y: f32, aspect: f32, near: f32, far: Option<f32>) -> Self { Self::Perspective(Perspective { fov_y, aspect, near, far, }) } pub fn new_orthographic( left: f32, right: f32, bottom: f32, top: f32, near: f32, far: f32, ) -> Self { Self::Orthographic(Orthographic { left, right, bottom, top, near, far, }) } pub fn transform(&self) -> glam::Mat4 { match self { ProjInfo::Perspective(p) => { if let Some(far) = p.far { glam::Mat4::perspective_lh(p.fov_y, p.aspect, p.near, far) } else { glam::Mat4::perspective_infinite_lh(p.fov_y, p.aspect, p.near) } } ProjInfo::Orthographic(o) => { glam::Mat4::orthographic_lh(o.left, o.right, o.bottom, o.top, o.near, o.far) } } } } impl Default for ProjInfo { fn default() -> Self { Self::Orthographic(Orthographic { left: -1.0, right: 1.0, bottom: -1.0, top: 1.0, near: 0.0, far: 1.0, }) } } struct Perspective { pub fov_y: f32, pub aspect: f32, pub near: f32, pub far: Option<f32>, } struct Orthographic { pub left: f32, pub right: f32, pub bottom: f32, pub top: f32, pub near: f32, pub far: f32, }
23.014634
96
0.519288
fccccc43ccc8a6d0ae01a83dc8b418b9ee508564
2,366
/* Copyright 2019 Supercomputing Systems AG Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ ///! Very simple example that shows how to pretty print the metadata. Has proven to be a helpful ///! debugging tool. #[macro_use] extern crate clap; use clap::App; use sp_core::sr25519; use node_template_runtime::{Block, Header, SignedBlock}; use std::sync::mpsc::channel; use substrate_api_client::Api; fn main() { env_logger::init(); let url = get_node_url_from_cli(); let api = Api::<sr25519::Pair>::new(format!("ws://{}", url)); let head = api.get_finalized_head().unwrap(); println!("Finalized Head:\n {} \n", head); let h: Header = api.get_header(Some(head)).unwrap(); println!("Finalized header:\n {:?} \n", h); let b: SignedBlock = api.get_signed_block(Some(head)).unwrap(); println!("Finalized signed block:\n {:?} \n", b); println!( "Latest Header: \n {:?} \n", api.get_header::<Header>(None).unwrap() ); println!( "Latest block: \n {:?} \n", api.get_block::<Block>(None).unwrap() ); println!("Subscribing to finalized heads"); let (sender, receiver) = channel(); api.subscribe_finalized_heads(sender); for _ in 0..5 { let head: Header = receiver .recv() .map(|header| serde_json::from_str(&header).unwrap()) .unwrap(); println!("Got new Block {:?}", head); } } pub fn get_node_url_from_cli() -> String { let yml = load_yaml!("../../src/examples/cli.yml"); let matches = App::from_yaml(yml).get_matches(); let node_ip = matches.value_of("node-server").unwrap_or("127.0.0.1"); let node_port = matches.value_of("node-port").unwrap_or("9944"); let url = format!("{}:{}", node_ip, node_port); println!("Interacting with node on {}\n", url); url }
29.949367
96
0.639899
1dbac99270811374a96b63a2c38c41ddff46e154
96,502
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateStreamModeOutput {} impl std::fmt::Debug for UpdateStreamModeOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateStreamModeOutput"); formatter.finish() } } /// See [`UpdateStreamModeOutput`](crate::output::UpdateStreamModeOutput) pub mod update_stream_mode_output { /// A builder for [`UpdateStreamModeOutput`](crate::output::UpdateStreamModeOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder {} impl Builder { /// Consumes the builder and constructs a [`UpdateStreamModeOutput`](crate::output::UpdateStreamModeOutput) pub fn build(self) -> crate::output::UpdateStreamModeOutput { crate::output::UpdateStreamModeOutput {} } } } impl UpdateStreamModeOutput { /// Creates a new builder-style object to manufacture [`UpdateStreamModeOutput`](crate::output::UpdateStreamModeOutput) pub fn builder() -> crate::output::update_stream_mode_output::Builder { crate::output::update_stream_mode_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateShardCountOutput { /// <p>The name of the stream.</p> pub stream_name: std::option::Option<std::string::String>, /// <p>The current number of shards.</p> pub current_shard_count: std::option::Option<i32>, /// <p>The updated number of shards.</p> pub target_shard_count: std::option::Option<i32>, } impl UpdateShardCountOutput { /// <p>The name of the stream.</p> pub fn stream_name(&self) -> std::option::Option<&str> { self.stream_name.as_deref() } /// <p>The current number of shards.</p> pub fn current_shard_count(&self) -> std::option::Option<i32> { self.current_shard_count } /// <p>The updated number of shards.</p> pub fn target_shard_count(&self) -> std::option::Option<i32> { self.target_shard_count } } impl std::fmt::Debug for UpdateShardCountOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateShardCountOutput"); formatter.field("stream_name", &self.stream_name); formatter.field("current_shard_count", &self.current_shard_count); formatter.field("target_shard_count", &self.target_shard_count); formatter.finish() } } /// See [`UpdateShardCountOutput`](crate::output::UpdateShardCountOutput) pub mod update_shard_count_output { /// A builder for [`UpdateShardCountOutput`](crate::output::UpdateShardCountOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) stream_name: std::option::Option<std::string::String>, pub(crate) current_shard_count: std::option::Option<i32>, pub(crate) target_shard_count: std::option::Option<i32>, } impl Builder { /// <p>The name of the stream.</p> pub fn stream_name(mut self, input: impl Into<std::string::String>) -> Self { self.stream_name = Some(input.into()); self } /// <p>The name of the stream.</p> pub fn set_stream_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.stream_name = input; self } /// <p>The current number of shards.</p> pub fn current_shard_count(mut self, input: i32) -> Self { self.current_shard_count = Some(input); self } /// <p>The current number of shards.</p> pub fn set_current_shard_count(mut self, input: std::option::Option<i32>) -> Self { self.current_shard_count = input; self } /// <p>The updated number of shards.</p> pub fn target_shard_count(mut self, input: i32) -> Self { self.target_shard_count = Some(input); self } /// <p>The updated number of shards.</p> pub fn set_target_shard_count(mut self, input: std::option::Option<i32>) -> Self { self.target_shard_count = input; self } /// Consumes the builder and constructs a [`UpdateShardCountOutput`](crate::output::UpdateShardCountOutput) pub fn build(self) -> crate::output::UpdateShardCountOutput { crate::output::UpdateShardCountOutput { stream_name: self.stream_name, current_shard_count: self.current_shard_count, target_shard_count: self.target_shard_count, } } } } impl UpdateShardCountOutput { /// Creates a new builder-style object to manufacture [`UpdateShardCountOutput`](crate::output::UpdateShardCountOutput) pub fn builder() -> crate::output::update_shard_count_output::Builder { crate::output::update_shard_count_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct StopStreamEncryptionOutput {} impl std::fmt::Debug for StopStreamEncryptionOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("StopStreamEncryptionOutput"); formatter.finish() } } /// See [`StopStreamEncryptionOutput`](crate::output::StopStreamEncryptionOutput) pub mod stop_stream_encryption_output { /// A builder for [`StopStreamEncryptionOutput`](crate::output::StopStreamEncryptionOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder {} impl Builder { /// Consumes the builder and constructs a [`StopStreamEncryptionOutput`](crate::output::StopStreamEncryptionOutput) pub fn build(self) -> crate::output::StopStreamEncryptionOutput { crate::output::StopStreamEncryptionOutput {} } } } impl StopStreamEncryptionOutput { /// Creates a new builder-style object to manufacture [`StopStreamEncryptionOutput`](crate::output::StopStreamEncryptionOutput) pub fn builder() -> crate::output::stop_stream_encryption_output::Builder { crate::output::stop_stream_encryption_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct StartStreamEncryptionOutput {} impl std::fmt::Debug for StartStreamEncryptionOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("StartStreamEncryptionOutput"); formatter.finish() } } /// See [`StartStreamEncryptionOutput`](crate::output::StartStreamEncryptionOutput) pub mod start_stream_encryption_output { /// A builder for [`StartStreamEncryptionOutput`](crate::output::StartStreamEncryptionOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder {} impl Builder { /// Consumes the builder and constructs a [`StartStreamEncryptionOutput`](crate::output::StartStreamEncryptionOutput) pub fn build(self) -> crate::output::StartStreamEncryptionOutput { crate::output::StartStreamEncryptionOutput {} } } } impl StartStreamEncryptionOutput { /// Creates a new builder-style object to manufacture [`StartStreamEncryptionOutput`](crate::output::StartStreamEncryptionOutput) pub fn builder() -> crate::output::start_stream_encryption_output::Builder { crate::output::start_stream_encryption_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct SplitShardOutput {} impl std::fmt::Debug for SplitShardOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("SplitShardOutput"); formatter.finish() } } /// See [`SplitShardOutput`](crate::output::SplitShardOutput) pub mod split_shard_output { /// A builder for [`SplitShardOutput`](crate::output::SplitShardOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder {} impl Builder { /// Consumes the builder and constructs a [`SplitShardOutput`](crate::output::SplitShardOutput) pub fn build(self) -> crate::output::SplitShardOutput { crate::output::SplitShardOutput {} } } } impl SplitShardOutput { /// Creates a new builder-style object to manufacture [`SplitShardOutput`](crate::output::SplitShardOutput) pub fn builder() -> crate::output::split_shard_output::Builder { crate::output::split_shard_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct RemoveTagsFromStreamOutput {} impl std::fmt::Debug for RemoveTagsFromStreamOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("RemoveTagsFromStreamOutput"); formatter.finish() } } /// See [`RemoveTagsFromStreamOutput`](crate::output::RemoveTagsFromStreamOutput) pub mod remove_tags_from_stream_output { /// A builder for [`RemoveTagsFromStreamOutput`](crate::output::RemoveTagsFromStreamOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder {} impl Builder { /// Consumes the builder and constructs a [`RemoveTagsFromStreamOutput`](crate::output::RemoveTagsFromStreamOutput) pub fn build(self) -> crate::output::RemoveTagsFromStreamOutput { crate::output::RemoveTagsFromStreamOutput {} } } } impl RemoveTagsFromStreamOutput { /// Creates a new builder-style object to manufacture [`RemoveTagsFromStreamOutput`](crate::output::RemoveTagsFromStreamOutput) pub fn builder() -> crate::output::remove_tags_from_stream_output::Builder { crate::output::remove_tags_from_stream_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct RegisterStreamConsumerOutput { /// <p>An object that represents the details of the consumer you registered. When you register a consumer, it gets an ARN that is generated by Kinesis Data Streams.</p> pub consumer: std::option::Option<crate::model::Consumer>, } impl RegisterStreamConsumerOutput { /// <p>An object that represents the details of the consumer you registered. When you register a consumer, it gets an ARN that is generated by Kinesis Data Streams.</p> pub fn consumer(&self) -> std::option::Option<&crate::model::Consumer> { self.consumer.as_ref() } } impl std::fmt::Debug for RegisterStreamConsumerOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("RegisterStreamConsumerOutput"); formatter.field("consumer", &self.consumer); formatter.finish() } } /// See [`RegisterStreamConsumerOutput`](crate::output::RegisterStreamConsumerOutput) pub mod register_stream_consumer_output { /// A builder for [`RegisterStreamConsumerOutput`](crate::output::RegisterStreamConsumerOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) consumer: std::option::Option<crate::model::Consumer>, } impl Builder { /// <p>An object that represents the details of the consumer you registered. When you register a consumer, it gets an ARN that is generated by Kinesis Data Streams.</p> pub fn consumer(mut self, input: crate::model::Consumer) -> Self { self.consumer = Some(input); self } /// <p>An object that represents the details of the consumer you registered. When you register a consumer, it gets an ARN that is generated by Kinesis Data Streams.</p> pub fn set_consumer(mut self, input: std::option::Option<crate::model::Consumer>) -> Self { self.consumer = input; self } /// Consumes the builder and constructs a [`RegisterStreamConsumerOutput`](crate::output::RegisterStreamConsumerOutput) pub fn build(self) -> crate::output::RegisterStreamConsumerOutput { crate::output::RegisterStreamConsumerOutput { consumer: self.consumer, } } } } impl RegisterStreamConsumerOutput { /// Creates a new builder-style object to manufacture [`RegisterStreamConsumerOutput`](crate::output::RegisterStreamConsumerOutput) pub fn builder() -> crate::output::register_stream_consumer_output::Builder { crate::output::register_stream_consumer_output::Builder::default() } } /// <p> <code>PutRecords</code> results.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct PutRecordsOutput { /// <p>The number of unsuccessfully processed records in a <code>PutRecords</code> request.</p> pub failed_record_count: std::option::Option<i32>, /// <p>An array of successfully and unsuccessfully processed record results. A record that is successfully added to a stream includes <code>SequenceNumber</code> and <code>ShardId</code> in the result. A record that fails to be added to a stream includes <code>ErrorCode</code> and <code>ErrorMessage</code> in the result.</p> pub records: std::option::Option<std::vec::Vec<crate::model::PutRecordsResultEntry>>, /// <p>The encryption type used on the records. This parameter can be one of the following values:</p> /// <ul> /// <li> <p> <code>NONE</code>: Do not encrypt the records.</p> </li> /// <li> <p> <code>KMS</code>: Use server-side encryption on the records using a customer-managed Amazon Web Services KMS key.</p> </li> /// </ul> pub encryption_type: std::option::Option<crate::model::EncryptionType>, } impl PutRecordsOutput { /// <p>The number of unsuccessfully processed records in a <code>PutRecords</code> request.</p> pub fn failed_record_count(&self) -> std::option::Option<i32> { self.failed_record_count } /// <p>An array of successfully and unsuccessfully processed record results. A record that is successfully added to a stream includes <code>SequenceNumber</code> and <code>ShardId</code> in the result. A record that fails to be added to a stream includes <code>ErrorCode</code> and <code>ErrorMessage</code> in the result.</p> pub fn records(&self) -> std::option::Option<&[crate::model::PutRecordsResultEntry]> { self.records.as_deref() } /// <p>The encryption type used on the records. This parameter can be one of the following values:</p> /// <ul> /// <li> <p> <code>NONE</code>: Do not encrypt the records.</p> </li> /// <li> <p> <code>KMS</code>: Use server-side encryption on the records using a customer-managed Amazon Web Services KMS key.</p> </li> /// </ul> pub fn encryption_type(&self) -> std::option::Option<&crate::model::EncryptionType> { self.encryption_type.as_ref() } } impl std::fmt::Debug for PutRecordsOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("PutRecordsOutput"); formatter.field("failed_record_count", &self.failed_record_count); formatter.field("records", &self.records); formatter.field("encryption_type", &self.encryption_type); formatter.finish() } } /// See [`PutRecordsOutput`](crate::output::PutRecordsOutput) pub mod put_records_output { /// A builder for [`PutRecordsOutput`](crate::output::PutRecordsOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) failed_record_count: std::option::Option<i32>, pub(crate) records: std::option::Option<std::vec::Vec<crate::model::PutRecordsResultEntry>>, pub(crate) encryption_type: std::option::Option<crate::model::EncryptionType>, } impl Builder { /// <p>The number of unsuccessfully processed records in a <code>PutRecords</code> request.</p> pub fn failed_record_count(mut self, input: i32) -> Self { self.failed_record_count = Some(input); self } /// <p>The number of unsuccessfully processed records in a <code>PutRecords</code> request.</p> pub fn set_failed_record_count(mut self, input: std::option::Option<i32>) -> Self { self.failed_record_count = input; self } /// Appends an item to `records`. /// /// To override the contents of this collection use [`set_records`](Self::set_records). /// /// <p>An array of successfully and unsuccessfully processed record results. A record that is successfully added to a stream includes <code>SequenceNumber</code> and <code>ShardId</code> in the result. A record that fails to be added to a stream includes <code>ErrorCode</code> and <code>ErrorMessage</code> in the result.</p> pub fn records(mut self, input: crate::model::PutRecordsResultEntry) -> Self { let mut v = self.records.unwrap_or_default(); v.push(input); self.records = Some(v); self } /// <p>An array of successfully and unsuccessfully processed record results. A record that is successfully added to a stream includes <code>SequenceNumber</code> and <code>ShardId</code> in the result. A record that fails to be added to a stream includes <code>ErrorCode</code> and <code>ErrorMessage</code> in the result.</p> pub fn set_records( mut self, input: std::option::Option<std::vec::Vec<crate::model::PutRecordsResultEntry>>, ) -> Self { self.records = input; self } /// <p>The encryption type used on the records. This parameter can be one of the following values:</p> /// <ul> /// <li> <p> <code>NONE</code>: Do not encrypt the records.</p> </li> /// <li> <p> <code>KMS</code>: Use server-side encryption on the records using a customer-managed Amazon Web Services KMS key.</p> </li> /// </ul> pub fn encryption_type(mut self, input: crate::model::EncryptionType) -> Self { self.encryption_type = Some(input); self } /// <p>The encryption type used on the records. This parameter can be one of the following values:</p> /// <ul> /// <li> <p> <code>NONE</code>: Do not encrypt the records.</p> </li> /// <li> <p> <code>KMS</code>: Use server-side encryption on the records using a customer-managed Amazon Web Services KMS key.</p> </li> /// </ul> pub fn set_encryption_type( mut self, input: std::option::Option<crate::model::EncryptionType>, ) -> Self { self.encryption_type = input; self } /// Consumes the builder and constructs a [`PutRecordsOutput`](crate::output::PutRecordsOutput) pub fn build(self) -> crate::output::PutRecordsOutput { crate::output::PutRecordsOutput { failed_record_count: self.failed_record_count, records: self.records, encryption_type: self.encryption_type, } } } } impl PutRecordsOutput { /// Creates a new builder-style object to manufacture [`PutRecordsOutput`](crate::output::PutRecordsOutput) pub fn builder() -> crate::output::put_records_output::Builder { crate::output::put_records_output::Builder::default() } } /// <p>Represents the output for <code>PutRecord</code>.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct PutRecordOutput { /// <p>The shard ID of the shard where the data record was placed.</p> pub shard_id: std::option::Option<std::string::String>, /// <p>The sequence number identifier that was assigned to the put data record. The sequence number for the record is unique across all records in the stream. A sequence number is the identifier associated with every record put into the stream.</p> pub sequence_number: std::option::Option<std::string::String>, /// <p>The encryption type to use on the record. This parameter can be one of the following values:</p> /// <ul> /// <li> <p> <code>NONE</code>: Do not encrypt the records in the stream.</p> </li> /// <li> <p> <code>KMS</code>: Use server-side encryption on the records in the stream using a customer-managed Amazon Web Services KMS key.</p> </li> /// </ul> pub encryption_type: std::option::Option<crate::model::EncryptionType>, } impl PutRecordOutput { /// <p>The shard ID of the shard where the data record was placed.</p> pub fn shard_id(&self) -> std::option::Option<&str> { self.shard_id.as_deref() } /// <p>The sequence number identifier that was assigned to the put data record. The sequence number for the record is unique across all records in the stream. A sequence number is the identifier associated with every record put into the stream.</p> pub fn sequence_number(&self) -> std::option::Option<&str> { self.sequence_number.as_deref() } /// <p>The encryption type to use on the record. This parameter can be one of the following values:</p> /// <ul> /// <li> <p> <code>NONE</code>: Do not encrypt the records in the stream.</p> </li> /// <li> <p> <code>KMS</code>: Use server-side encryption on the records in the stream using a customer-managed Amazon Web Services KMS key.</p> </li> /// </ul> pub fn encryption_type(&self) -> std::option::Option<&crate::model::EncryptionType> { self.encryption_type.as_ref() } } impl std::fmt::Debug for PutRecordOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("PutRecordOutput"); formatter.field("shard_id", &self.shard_id); formatter.field("sequence_number", &self.sequence_number); formatter.field("encryption_type", &self.encryption_type); formatter.finish() } } /// See [`PutRecordOutput`](crate::output::PutRecordOutput) pub mod put_record_output { /// A builder for [`PutRecordOutput`](crate::output::PutRecordOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) shard_id: std::option::Option<std::string::String>, pub(crate) sequence_number: std::option::Option<std::string::String>, pub(crate) encryption_type: std::option::Option<crate::model::EncryptionType>, } impl Builder { /// <p>The shard ID of the shard where the data record was placed.</p> pub fn shard_id(mut self, input: impl Into<std::string::String>) -> Self { self.shard_id = Some(input.into()); self } /// <p>The shard ID of the shard where the data record was placed.</p> pub fn set_shard_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.shard_id = input; self } /// <p>The sequence number identifier that was assigned to the put data record. The sequence number for the record is unique across all records in the stream. A sequence number is the identifier associated with every record put into the stream.</p> pub fn sequence_number(mut self, input: impl Into<std::string::String>) -> Self { self.sequence_number = Some(input.into()); self } /// <p>The sequence number identifier that was assigned to the put data record. The sequence number for the record is unique across all records in the stream. A sequence number is the identifier associated with every record put into the stream.</p> pub fn set_sequence_number( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.sequence_number = input; self } /// <p>The encryption type to use on the record. This parameter can be one of the following values:</p> /// <ul> /// <li> <p> <code>NONE</code>: Do not encrypt the records in the stream.</p> </li> /// <li> <p> <code>KMS</code>: Use server-side encryption on the records in the stream using a customer-managed Amazon Web Services KMS key.</p> </li> /// </ul> pub fn encryption_type(mut self, input: crate::model::EncryptionType) -> Self { self.encryption_type = Some(input); self } /// <p>The encryption type to use on the record. This parameter can be one of the following values:</p> /// <ul> /// <li> <p> <code>NONE</code>: Do not encrypt the records in the stream.</p> </li> /// <li> <p> <code>KMS</code>: Use server-side encryption on the records in the stream using a customer-managed Amazon Web Services KMS key.</p> </li> /// </ul> pub fn set_encryption_type( mut self, input: std::option::Option<crate::model::EncryptionType>, ) -> Self { self.encryption_type = input; self } /// Consumes the builder and constructs a [`PutRecordOutput`](crate::output::PutRecordOutput) pub fn build(self) -> crate::output::PutRecordOutput { crate::output::PutRecordOutput { shard_id: self.shard_id, sequence_number: self.sequence_number, encryption_type: self.encryption_type, } } } } impl PutRecordOutput { /// Creates a new builder-style object to manufacture [`PutRecordOutput`](crate::output::PutRecordOutput) pub fn builder() -> crate::output::put_record_output::Builder { crate::output::put_record_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct MergeShardsOutput {} impl std::fmt::Debug for MergeShardsOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("MergeShardsOutput"); formatter.finish() } } /// See [`MergeShardsOutput`](crate::output::MergeShardsOutput) pub mod merge_shards_output { /// A builder for [`MergeShardsOutput`](crate::output::MergeShardsOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder {} impl Builder { /// Consumes the builder and constructs a [`MergeShardsOutput`](crate::output::MergeShardsOutput) pub fn build(self) -> crate::output::MergeShardsOutput { crate::output::MergeShardsOutput {} } } } impl MergeShardsOutput { /// Creates a new builder-style object to manufacture [`MergeShardsOutput`](crate::output::MergeShardsOutput) pub fn builder() -> crate::output::merge_shards_output::Builder { crate::output::merge_shards_output::Builder::default() } } /// <p>Represents the output for <code>ListTagsForStream</code>.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListTagsForStreamOutput { /// <p>A list of tags associated with <code>StreamName</code>, starting with the first tag after <code>ExclusiveStartTagKey</code> and up to the specified <code>Limit</code>. </p> pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>, /// <p>If set to <code>true</code>, more tags are available. To request additional tags, set <code>ExclusiveStartTagKey</code> to the key of the last tag returned.</p> pub has_more_tags: std::option::Option<bool>, } impl ListTagsForStreamOutput { /// <p>A list of tags associated with <code>StreamName</code>, starting with the first tag after <code>ExclusiveStartTagKey</code> and up to the specified <code>Limit</code>. </p> pub fn tags(&self) -> std::option::Option<&[crate::model::Tag]> { self.tags.as_deref() } /// <p>If set to <code>true</code>, more tags are available. To request additional tags, set <code>ExclusiveStartTagKey</code> to the key of the last tag returned.</p> pub fn has_more_tags(&self) -> std::option::Option<bool> { self.has_more_tags } } impl std::fmt::Debug for ListTagsForStreamOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListTagsForStreamOutput"); formatter.field("tags", &self.tags); formatter.field("has_more_tags", &self.has_more_tags); formatter.finish() } } /// See [`ListTagsForStreamOutput`](crate::output::ListTagsForStreamOutput) pub mod list_tags_for_stream_output { /// A builder for [`ListTagsForStreamOutput`](crate::output::ListTagsForStreamOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>, pub(crate) has_more_tags: std::option::Option<bool>, } impl Builder { /// Appends an item to `tags`. /// /// To override the contents of this collection use [`set_tags`](Self::set_tags). /// /// <p>A list of tags associated with <code>StreamName</code>, starting with the first tag after <code>ExclusiveStartTagKey</code> and up to the specified <code>Limit</code>. </p> pub fn tags(mut self, input: crate::model::Tag) -> Self { let mut v = self.tags.unwrap_or_default(); v.push(input); self.tags = Some(v); self } /// <p>A list of tags associated with <code>StreamName</code>, starting with the first tag after <code>ExclusiveStartTagKey</code> and up to the specified <code>Limit</code>. </p> pub fn set_tags( mut self, input: std::option::Option<std::vec::Vec<crate::model::Tag>>, ) -> Self { self.tags = input; self } /// <p>If set to <code>true</code>, more tags are available. To request additional tags, set <code>ExclusiveStartTagKey</code> to the key of the last tag returned.</p> pub fn has_more_tags(mut self, input: bool) -> Self { self.has_more_tags = Some(input); self } /// <p>If set to <code>true</code>, more tags are available. To request additional tags, set <code>ExclusiveStartTagKey</code> to the key of the last tag returned.</p> pub fn set_has_more_tags(mut self, input: std::option::Option<bool>) -> Self { self.has_more_tags = input; self } /// Consumes the builder and constructs a [`ListTagsForStreamOutput`](crate::output::ListTagsForStreamOutput) pub fn build(self) -> crate::output::ListTagsForStreamOutput { crate::output::ListTagsForStreamOutput { tags: self.tags, has_more_tags: self.has_more_tags, } } } } impl ListTagsForStreamOutput { /// Creates a new builder-style object to manufacture [`ListTagsForStreamOutput`](crate::output::ListTagsForStreamOutput) pub fn builder() -> crate::output::list_tags_for_stream_output::Builder { crate::output::list_tags_for_stream_output::Builder::default() } } /// <p>Represents the output for <code>ListStreams</code>.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListStreamsOutput { /// <p>The names of the streams that are associated with the Amazon Web Services account making the <code>ListStreams</code> request.</p> pub stream_names: std::option::Option<std::vec::Vec<std::string::String>>, /// <p>If set to <code>true</code>, there are more streams available to list.</p> pub has_more_streams: std::option::Option<bool>, } impl ListStreamsOutput { /// <p>The names of the streams that are associated with the Amazon Web Services account making the <code>ListStreams</code> request.</p> pub fn stream_names(&self) -> std::option::Option<&[std::string::String]> { self.stream_names.as_deref() } /// <p>If set to <code>true</code>, there are more streams available to list.</p> pub fn has_more_streams(&self) -> std::option::Option<bool> { self.has_more_streams } } impl std::fmt::Debug for ListStreamsOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListStreamsOutput"); formatter.field("stream_names", &self.stream_names); formatter.field("has_more_streams", &self.has_more_streams); formatter.finish() } } /// See [`ListStreamsOutput`](crate::output::ListStreamsOutput) pub mod list_streams_output { /// A builder for [`ListStreamsOutput`](crate::output::ListStreamsOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) stream_names: std::option::Option<std::vec::Vec<std::string::String>>, pub(crate) has_more_streams: std::option::Option<bool>, } impl Builder { /// Appends an item to `stream_names`. /// /// To override the contents of this collection use [`set_stream_names`](Self::set_stream_names). /// /// <p>The names of the streams that are associated with the Amazon Web Services account making the <code>ListStreams</code> request.</p> pub fn stream_names(mut self, input: impl Into<std::string::String>) -> Self { let mut v = self.stream_names.unwrap_or_default(); v.push(input.into()); self.stream_names = Some(v); self } /// <p>The names of the streams that are associated with the Amazon Web Services account making the <code>ListStreams</code> request.</p> pub fn set_stream_names( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.stream_names = input; self } /// <p>If set to <code>true</code>, there are more streams available to list.</p> pub fn has_more_streams(mut self, input: bool) -> Self { self.has_more_streams = Some(input); self } /// <p>If set to <code>true</code>, there are more streams available to list.</p> pub fn set_has_more_streams(mut self, input: std::option::Option<bool>) -> Self { self.has_more_streams = input; self } /// Consumes the builder and constructs a [`ListStreamsOutput`](crate::output::ListStreamsOutput) pub fn build(self) -> crate::output::ListStreamsOutput { crate::output::ListStreamsOutput { stream_names: self.stream_names, has_more_streams: self.has_more_streams, } } } } impl ListStreamsOutput { /// Creates a new builder-style object to manufacture [`ListStreamsOutput`](crate::output::ListStreamsOutput) pub fn builder() -> crate::output::list_streams_output::Builder { crate::output::list_streams_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListStreamConsumersOutput { /// <p>An array of JSON objects. Each object represents one registered consumer.</p> pub consumers: std::option::Option<std::vec::Vec<crate::model::Consumer>>, /// <p>When the number of consumers that are registered with the data stream is greater than the default value for the <code>MaxResults</code> parameter, or if you explicitly specify a value for <code>MaxResults</code> that is less than the number of registered consumers, the response includes a pagination token named <code>NextToken</code>. You can specify this <code>NextToken</code> value in a subsequent call to <code>ListStreamConsumers</code> to list the next set of registered consumers. For more information about the use of this pagination token when calling the <code>ListStreamConsumers</code> operation, see <code>ListStreamConsumersInput$NextToken</code>.</p> <important> /// <p>Tokens expire after 300 seconds. When you obtain a value for <code>NextToken</code> in the response to a call to <code>ListStreamConsumers</code>, you have 300 seconds to use that value. If you specify an expired token in a call to <code>ListStreamConsumers</code>, you get <code>ExpiredNextTokenException</code>.</p> /// </important> pub next_token: std::option::Option<std::string::String>, } impl ListStreamConsumersOutput { /// <p>An array of JSON objects. Each object represents one registered consumer.</p> pub fn consumers(&self) -> std::option::Option<&[crate::model::Consumer]> { self.consumers.as_deref() } /// <p>When the number of consumers that are registered with the data stream is greater than the default value for the <code>MaxResults</code> parameter, or if you explicitly specify a value for <code>MaxResults</code> that is less than the number of registered consumers, the response includes a pagination token named <code>NextToken</code>. You can specify this <code>NextToken</code> value in a subsequent call to <code>ListStreamConsumers</code> to list the next set of registered consumers. For more information about the use of this pagination token when calling the <code>ListStreamConsumers</code> operation, see <code>ListStreamConsumersInput$NextToken</code>.</p> <important> /// <p>Tokens expire after 300 seconds. When you obtain a value for <code>NextToken</code> in the response to a call to <code>ListStreamConsumers</code>, you have 300 seconds to use that value. If you specify an expired token in a call to <code>ListStreamConsumers</code>, you get <code>ExpiredNextTokenException</code>.</p> /// </important> pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } } impl std::fmt::Debug for ListStreamConsumersOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListStreamConsumersOutput"); formatter.field("consumers", &self.consumers); formatter.field("next_token", &self.next_token); formatter.finish() } } /// See [`ListStreamConsumersOutput`](crate::output::ListStreamConsumersOutput) pub mod list_stream_consumers_output { /// A builder for [`ListStreamConsumersOutput`](crate::output::ListStreamConsumersOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) consumers: std::option::Option<std::vec::Vec<crate::model::Consumer>>, pub(crate) next_token: std::option::Option<std::string::String>, } impl Builder { /// Appends an item to `consumers`. /// /// To override the contents of this collection use [`set_consumers`](Self::set_consumers). /// /// <p>An array of JSON objects. Each object represents one registered consumer.</p> pub fn consumers(mut self, input: crate::model::Consumer) -> Self { let mut v = self.consumers.unwrap_or_default(); v.push(input); self.consumers = Some(v); self } /// <p>An array of JSON objects. Each object represents one registered consumer.</p> pub fn set_consumers( mut self, input: std::option::Option<std::vec::Vec<crate::model::Consumer>>, ) -> Self { self.consumers = input; self } /// <p>When the number of consumers that are registered with the data stream is greater than the default value for the <code>MaxResults</code> parameter, or if you explicitly specify a value for <code>MaxResults</code> that is less than the number of registered consumers, the response includes a pagination token named <code>NextToken</code>. You can specify this <code>NextToken</code> value in a subsequent call to <code>ListStreamConsumers</code> to list the next set of registered consumers. For more information about the use of this pagination token when calling the <code>ListStreamConsumers</code> operation, see <code>ListStreamConsumersInput$NextToken</code>.</p> <important> /// <p>Tokens expire after 300 seconds. When you obtain a value for <code>NextToken</code> in the response to a call to <code>ListStreamConsumers</code>, you have 300 seconds to use that value. If you specify an expired token in a call to <code>ListStreamConsumers</code>, you get <code>ExpiredNextTokenException</code>.</p> /// </important> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>When the number of consumers that are registered with the data stream is greater than the default value for the <code>MaxResults</code> parameter, or if you explicitly specify a value for <code>MaxResults</code> that is less than the number of registered consumers, the response includes a pagination token named <code>NextToken</code>. You can specify this <code>NextToken</code> value in a subsequent call to <code>ListStreamConsumers</code> to list the next set of registered consumers. For more information about the use of this pagination token when calling the <code>ListStreamConsumers</code> operation, see <code>ListStreamConsumersInput$NextToken</code>.</p> <important> /// <p>Tokens expire after 300 seconds. When you obtain a value for <code>NextToken</code> in the response to a call to <code>ListStreamConsumers</code>, you have 300 seconds to use that value. If you specify an expired token in a call to <code>ListStreamConsumers</code>, you get <code>ExpiredNextTokenException</code>.</p> /// </important> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Consumes the builder and constructs a [`ListStreamConsumersOutput`](crate::output::ListStreamConsumersOutput) pub fn build(self) -> crate::output::ListStreamConsumersOutput { crate::output::ListStreamConsumersOutput { consumers: self.consumers, next_token: self.next_token, } } } } impl ListStreamConsumersOutput { /// Creates a new builder-style object to manufacture [`ListStreamConsumersOutput`](crate::output::ListStreamConsumersOutput) pub fn builder() -> crate::output::list_stream_consumers_output::Builder { crate::output::list_stream_consumers_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListShardsOutput { /// <p>An array of JSON objects. Each object represents one shard and specifies the IDs of the shard, the shard's parent, and the shard that's adjacent to the shard's parent. Each object also contains the starting and ending hash keys and the starting and ending sequence numbers for the shard.</p> pub shards: std::option::Option<std::vec::Vec<crate::model::Shard>>, /// <p>When the number of shards in the data stream is greater than the default value for the <code>MaxResults</code> parameter, or if you explicitly specify a value for <code>MaxResults</code> that is less than the number of shards in the data stream, the response includes a pagination token named <code>NextToken</code>. You can specify this <code>NextToken</code> value in a subsequent call to <code>ListShards</code> to list the next set of shards. For more information about the use of this pagination token when calling the <code>ListShards</code> operation, see <code>ListShardsInput$NextToken</code>.</p> <important> /// <p>Tokens expire after 300 seconds. When you obtain a value for <code>NextToken</code> in the response to a call to <code>ListShards</code>, you have 300 seconds to use that value. If you specify an expired token in a call to <code>ListShards</code>, you get <code>ExpiredNextTokenException</code>.</p> /// </important> pub next_token: std::option::Option<std::string::String>, } impl ListShardsOutput { /// <p>An array of JSON objects. Each object represents one shard and specifies the IDs of the shard, the shard's parent, and the shard that's adjacent to the shard's parent. Each object also contains the starting and ending hash keys and the starting and ending sequence numbers for the shard.</p> pub fn shards(&self) -> std::option::Option<&[crate::model::Shard]> { self.shards.as_deref() } /// <p>When the number of shards in the data stream is greater than the default value for the <code>MaxResults</code> parameter, or if you explicitly specify a value for <code>MaxResults</code> that is less than the number of shards in the data stream, the response includes a pagination token named <code>NextToken</code>. You can specify this <code>NextToken</code> value in a subsequent call to <code>ListShards</code> to list the next set of shards. For more information about the use of this pagination token when calling the <code>ListShards</code> operation, see <code>ListShardsInput$NextToken</code>.</p> <important> /// <p>Tokens expire after 300 seconds. When you obtain a value for <code>NextToken</code> in the response to a call to <code>ListShards</code>, you have 300 seconds to use that value. If you specify an expired token in a call to <code>ListShards</code>, you get <code>ExpiredNextTokenException</code>.</p> /// </important> pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } } impl std::fmt::Debug for ListShardsOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListShardsOutput"); formatter.field("shards", &self.shards); formatter.field("next_token", &self.next_token); formatter.finish() } } /// See [`ListShardsOutput`](crate::output::ListShardsOutput) pub mod list_shards_output { /// A builder for [`ListShardsOutput`](crate::output::ListShardsOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) shards: std::option::Option<std::vec::Vec<crate::model::Shard>>, pub(crate) next_token: std::option::Option<std::string::String>, } impl Builder { /// Appends an item to `shards`. /// /// To override the contents of this collection use [`set_shards`](Self::set_shards). /// /// <p>An array of JSON objects. Each object represents one shard and specifies the IDs of the shard, the shard's parent, and the shard that's adjacent to the shard's parent. Each object also contains the starting and ending hash keys and the starting and ending sequence numbers for the shard.</p> pub fn shards(mut self, input: crate::model::Shard) -> Self { let mut v = self.shards.unwrap_or_default(); v.push(input); self.shards = Some(v); self } /// <p>An array of JSON objects. Each object represents one shard and specifies the IDs of the shard, the shard's parent, and the shard that's adjacent to the shard's parent. Each object also contains the starting and ending hash keys and the starting and ending sequence numbers for the shard.</p> pub fn set_shards( mut self, input: std::option::Option<std::vec::Vec<crate::model::Shard>>, ) -> Self { self.shards = input; self } /// <p>When the number of shards in the data stream is greater than the default value for the <code>MaxResults</code> parameter, or if you explicitly specify a value for <code>MaxResults</code> that is less than the number of shards in the data stream, the response includes a pagination token named <code>NextToken</code>. You can specify this <code>NextToken</code> value in a subsequent call to <code>ListShards</code> to list the next set of shards. For more information about the use of this pagination token when calling the <code>ListShards</code> operation, see <code>ListShardsInput$NextToken</code>.</p> <important> /// <p>Tokens expire after 300 seconds. When you obtain a value for <code>NextToken</code> in the response to a call to <code>ListShards</code>, you have 300 seconds to use that value. If you specify an expired token in a call to <code>ListShards</code>, you get <code>ExpiredNextTokenException</code>.</p> /// </important> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>When the number of shards in the data stream is greater than the default value for the <code>MaxResults</code> parameter, or if you explicitly specify a value for <code>MaxResults</code> that is less than the number of shards in the data stream, the response includes a pagination token named <code>NextToken</code>. You can specify this <code>NextToken</code> value in a subsequent call to <code>ListShards</code> to list the next set of shards. For more information about the use of this pagination token when calling the <code>ListShards</code> operation, see <code>ListShardsInput$NextToken</code>.</p> <important> /// <p>Tokens expire after 300 seconds. When you obtain a value for <code>NextToken</code> in the response to a call to <code>ListShards</code>, you have 300 seconds to use that value. If you specify an expired token in a call to <code>ListShards</code>, you get <code>ExpiredNextTokenException</code>.</p> /// </important> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Consumes the builder and constructs a [`ListShardsOutput`](crate::output::ListShardsOutput) pub fn build(self) -> crate::output::ListShardsOutput { crate::output::ListShardsOutput { shards: self.shards, next_token: self.next_token, } } } } impl ListShardsOutput { /// Creates a new builder-style object to manufacture [`ListShardsOutput`](crate::output::ListShardsOutput) pub fn builder() -> crate::output::list_shards_output::Builder { crate::output::list_shards_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct IncreaseStreamRetentionPeriodOutput {} impl std::fmt::Debug for IncreaseStreamRetentionPeriodOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("IncreaseStreamRetentionPeriodOutput"); formatter.finish() } } /// See [`IncreaseStreamRetentionPeriodOutput`](crate::output::IncreaseStreamRetentionPeriodOutput) pub mod increase_stream_retention_period_output { /// A builder for [`IncreaseStreamRetentionPeriodOutput`](crate::output::IncreaseStreamRetentionPeriodOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder {} impl Builder { /// Consumes the builder and constructs a [`IncreaseStreamRetentionPeriodOutput`](crate::output::IncreaseStreamRetentionPeriodOutput) pub fn build(self) -> crate::output::IncreaseStreamRetentionPeriodOutput { crate::output::IncreaseStreamRetentionPeriodOutput {} } } } impl IncreaseStreamRetentionPeriodOutput { /// Creates a new builder-style object to manufacture [`IncreaseStreamRetentionPeriodOutput`](crate::output::IncreaseStreamRetentionPeriodOutput) pub fn builder() -> crate::output::increase_stream_retention_period_output::Builder { crate::output::increase_stream_retention_period_output::Builder::default() } } /// <p>Represents the output for <code>GetShardIterator</code>.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct GetShardIteratorOutput { /// <p>The position in the shard from which to start reading data records sequentially. A shard iterator specifies this position using the sequence number of a data record in a shard.</p> pub shard_iterator: std::option::Option<std::string::String>, } impl GetShardIteratorOutput { /// <p>The position in the shard from which to start reading data records sequentially. A shard iterator specifies this position using the sequence number of a data record in a shard.</p> pub fn shard_iterator(&self) -> std::option::Option<&str> { self.shard_iterator.as_deref() } } impl std::fmt::Debug for GetShardIteratorOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("GetShardIteratorOutput"); formatter.field("shard_iterator", &self.shard_iterator); formatter.finish() } } /// See [`GetShardIteratorOutput`](crate::output::GetShardIteratorOutput) pub mod get_shard_iterator_output { /// A builder for [`GetShardIteratorOutput`](crate::output::GetShardIteratorOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) shard_iterator: std::option::Option<std::string::String>, } impl Builder { /// <p>The position in the shard from which to start reading data records sequentially. A shard iterator specifies this position using the sequence number of a data record in a shard.</p> pub fn shard_iterator(mut self, input: impl Into<std::string::String>) -> Self { self.shard_iterator = Some(input.into()); self } /// <p>The position in the shard from which to start reading data records sequentially. A shard iterator specifies this position using the sequence number of a data record in a shard.</p> pub fn set_shard_iterator( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.shard_iterator = input; self } /// Consumes the builder and constructs a [`GetShardIteratorOutput`](crate::output::GetShardIteratorOutput) pub fn build(self) -> crate::output::GetShardIteratorOutput { crate::output::GetShardIteratorOutput { shard_iterator: self.shard_iterator, } } } } impl GetShardIteratorOutput { /// Creates a new builder-style object to manufacture [`GetShardIteratorOutput`](crate::output::GetShardIteratorOutput) pub fn builder() -> crate::output::get_shard_iterator_output::Builder { crate::output::get_shard_iterator_output::Builder::default() } } /// <p>Represents the output for <code>GetRecords</code>.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct GetRecordsOutput { /// <p>The data records retrieved from the shard.</p> pub records: std::option::Option<std::vec::Vec<crate::model::Record>>, /// <p>The next position in the shard from which to start sequentially reading data records. If set to <code>null</code>, the shard has been closed and the requested iterator does not return any more data. </p> pub next_shard_iterator: std::option::Option<std::string::String>, /// <p>The number of milliseconds the <code>GetRecords</code> response is from the tip of the stream, indicating how far behind current time the consumer is. A value of zero indicates that record processing is caught up, and there are no new records to process at this moment.</p> pub millis_behind_latest: std::option::Option<i64>, /// <p>The list of the current shard's child shards, returned in the <code>GetRecords</code> API's response only when the end of the current shard is reached.</p> pub child_shards: std::option::Option<std::vec::Vec<crate::model::ChildShard>>, } impl GetRecordsOutput { /// <p>The data records retrieved from the shard.</p> pub fn records(&self) -> std::option::Option<&[crate::model::Record]> { self.records.as_deref() } /// <p>The next position in the shard from which to start sequentially reading data records. If set to <code>null</code>, the shard has been closed and the requested iterator does not return any more data. </p> pub fn next_shard_iterator(&self) -> std::option::Option<&str> { self.next_shard_iterator.as_deref() } /// <p>The number of milliseconds the <code>GetRecords</code> response is from the tip of the stream, indicating how far behind current time the consumer is. A value of zero indicates that record processing is caught up, and there are no new records to process at this moment.</p> pub fn millis_behind_latest(&self) -> std::option::Option<i64> { self.millis_behind_latest } /// <p>The list of the current shard's child shards, returned in the <code>GetRecords</code> API's response only when the end of the current shard is reached.</p> pub fn child_shards(&self) -> std::option::Option<&[crate::model::ChildShard]> { self.child_shards.as_deref() } } impl std::fmt::Debug for GetRecordsOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("GetRecordsOutput"); formatter.field("records", &self.records); formatter.field("next_shard_iterator", &self.next_shard_iterator); formatter.field("millis_behind_latest", &self.millis_behind_latest); formatter.field("child_shards", &self.child_shards); formatter.finish() } } /// See [`GetRecordsOutput`](crate::output::GetRecordsOutput) pub mod get_records_output { /// A builder for [`GetRecordsOutput`](crate::output::GetRecordsOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) records: std::option::Option<std::vec::Vec<crate::model::Record>>, pub(crate) next_shard_iterator: std::option::Option<std::string::String>, pub(crate) millis_behind_latest: std::option::Option<i64>, pub(crate) child_shards: std::option::Option<std::vec::Vec<crate::model::ChildShard>>, } impl Builder { /// Appends an item to `records`. /// /// To override the contents of this collection use [`set_records`](Self::set_records). /// /// <p>The data records retrieved from the shard.</p> pub fn records(mut self, input: crate::model::Record) -> Self { let mut v = self.records.unwrap_or_default(); v.push(input); self.records = Some(v); self } /// <p>The data records retrieved from the shard.</p> pub fn set_records( mut self, input: std::option::Option<std::vec::Vec<crate::model::Record>>, ) -> Self { self.records = input; self } /// <p>The next position in the shard from which to start sequentially reading data records. If set to <code>null</code>, the shard has been closed and the requested iterator does not return any more data. </p> pub fn next_shard_iterator(mut self, input: impl Into<std::string::String>) -> Self { self.next_shard_iterator = Some(input.into()); self } /// <p>The next position in the shard from which to start sequentially reading data records. If set to <code>null</code>, the shard has been closed and the requested iterator does not return any more data. </p> pub fn set_next_shard_iterator( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.next_shard_iterator = input; self } /// <p>The number of milliseconds the <code>GetRecords</code> response is from the tip of the stream, indicating how far behind current time the consumer is. A value of zero indicates that record processing is caught up, and there are no new records to process at this moment.</p> pub fn millis_behind_latest(mut self, input: i64) -> Self { self.millis_behind_latest = Some(input); self } /// <p>The number of milliseconds the <code>GetRecords</code> response is from the tip of the stream, indicating how far behind current time the consumer is. A value of zero indicates that record processing is caught up, and there are no new records to process at this moment.</p> pub fn set_millis_behind_latest(mut self, input: std::option::Option<i64>) -> Self { self.millis_behind_latest = input; self } /// Appends an item to `child_shards`. /// /// To override the contents of this collection use [`set_child_shards`](Self::set_child_shards). /// /// <p>The list of the current shard's child shards, returned in the <code>GetRecords</code> API's response only when the end of the current shard is reached.</p> pub fn child_shards(mut self, input: crate::model::ChildShard) -> Self { let mut v = self.child_shards.unwrap_or_default(); v.push(input); self.child_shards = Some(v); self } /// <p>The list of the current shard's child shards, returned in the <code>GetRecords</code> API's response only when the end of the current shard is reached.</p> pub fn set_child_shards( mut self, input: std::option::Option<std::vec::Vec<crate::model::ChildShard>>, ) -> Self { self.child_shards = input; self } /// Consumes the builder and constructs a [`GetRecordsOutput`](crate::output::GetRecordsOutput) pub fn build(self) -> crate::output::GetRecordsOutput { crate::output::GetRecordsOutput { records: self.records, next_shard_iterator: self.next_shard_iterator, millis_behind_latest: self.millis_behind_latest, child_shards: self.child_shards, } } } } impl GetRecordsOutput { /// Creates a new builder-style object to manufacture [`GetRecordsOutput`](crate::output::GetRecordsOutput) pub fn builder() -> crate::output::get_records_output::Builder { crate::output::get_records_output::Builder::default() } } /// <p>Represents the output for <code>EnableEnhancedMonitoring</code> and <code>DisableEnhancedMonitoring</code>.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct EnableEnhancedMonitoringOutput { /// <p>The name of the Kinesis data stream.</p> pub stream_name: std::option::Option<std::string::String>, /// <p>Represents the current state of the metrics that are in the enhanced state before the operation.</p> pub current_shard_level_metrics: std::option::Option<std::vec::Vec<crate::model::MetricsName>>, /// <p>Represents the list of all the metrics that would be in the enhanced state after the operation.</p> pub desired_shard_level_metrics: std::option::Option<std::vec::Vec<crate::model::MetricsName>>, } impl EnableEnhancedMonitoringOutput { /// <p>The name of the Kinesis data stream.</p> pub fn stream_name(&self) -> std::option::Option<&str> { self.stream_name.as_deref() } /// <p>Represents the current state of the metrics that are in the enhanced state before the operation.</p> pub fn current_shard_level_metrics(&self) -> std::option::Option<&[crate::model::MetricsName]> { self.current_shard_level_metrics.as_deref() } /// <p>Represents the list of all the metrics that would be in the enhanced state after the operation.</p> pub fn desired_shard_level_metrics(&self) -> std::option::Option<&[crate::model::MetricsName]> { self.desired_shard_level_metrics.as_deref() } } impl std::fmt::Debug for EnableEnhancedMonitoringOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("EnableEnhancedMonitoringOutput"); formatter.field("stream_name", &self.stream_name); formatter.field( "current_shard_level_metrics", &self.current_shard_level_metrics, ); formatter.field( "desired_shard_level_metrics", &self.desired_shard_level_metrics, ); formatter.finish() } } /// See [`EnableEnhancedMonitoringOutput`](crate::output::EnableEnhancedMonitoringOutput) pub mod enable_enhanced_monitoring_output { /// A builder for [`EnableEnhancedMonitoringOutput`](crate::output::EnableEnhancedMonitoringOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) stream_name: std::option::Option<std::string::String>, pub(crate) current_shard_level_metrics: std::option::Option<std::vec::Vec<crate::model::MetricsName>>, pub(crate) desired_shard_level_metrics: std::option::Option<std::vec::Vec<crate::model::MetricsName>>, } impl Builder { /// <p>The name of the Kinesis data stream.</p> pub fn stream_name(mut self, input: impl Into<std::string::String>) -> Self { self.stream_name = Some(input.into()); self } /// <p>The name of the Kinesis data stream.</p> pub fn set_stream_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.stream_name = input; self } /// Appends an item to `current_shard_level_metrics`. /// /// To override the contents of this collection use [`set_current_shard_level_metrics`](Self::set_current_shard_level_metrics). /// /// <p>Represents the current state of the metrics that are in the enhanced state before the operation.</p> pub fn current_shard_level_metrics(mut self, input: crate::model::MetricsName) -> Self { let mut v = self.current_shard_level_metrics.unwrap_or_default(); v.push(input); self.current_shard_level_metrics = Some(v); self } /// <p>Represents the current state of the metrics that are in the enhanced state before the operation.</p> pub fn set_current_shard_level_metrics( mut self, input: std::option::Option<std::vec::Vec<crate::model::MetricsName>>, ) -> Self { self.current_shard_level_metrics = input; self } /// Appends an item to `desired_shard_level_metrics`. /// /// To override the contents of this collection use [`set_desired_shard_level_metrics`](Self::set_desired_shard_level_metrics). /// /// <p>Represents the list of all the metrics that would be in the enhanced state after the operation.</p> pub fn desired_shard_level_metrics(mut self, input: crate::model::MetricsName) -> Self { let mut v = self.desired_shard_level_metrics.unwrap_or_default(); v.push(input); self.desired_shard_level_metrics = Some(v); self } /// <p>Represents the list of all the metrics that would be in the enhanced state after the operation.</p> pub fn set_desired_shard_level_metrics( mut self, input: std::option::Option<std::vec::Vec<crate::model::MetricsName>>, ) -> Self { self.desired_shard_level_metrics = input; self } /// Consumes the builder and constructs a [`EnableEnhancedMonitoringOutput`](crate::output::EnableEnhancedMonitoringOutput) pub fn build(self) -> crate::output::EnableEnhancedMonitoringOutput { crate::output::EnableEnhancedMonitoringOutput { stream_name: self.stream_name, current_shard_level_metrics: self.current_shard_level_metrics, desired_shard_level_metrics: self.desired_shard_level_metrics, } } } } impl EnableEnhancedMonitoringOutput { /// Creates a new builder-style object to manufacture [`EnableEnhancedMonitoringOutput`](crate::output::EnableEnhancedMonitoringOutput) pub fn builder() -> crate::output::enable_enhanced_monitoring_output::Builder { crate::output::enable_enhanced_monitoring_output::Builder::default() } } /// <p>Represents the output for <code>EnableEnhancedMonitoring</code> and <code>DisableEnhancedMonitoring</code>.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DisableEnhancedMonitoringOutput { /// <p>The name of the Kinesis data stream.</p> pub stream_name: std::option::Option<std::string::String>, /// <p>Represents the current state of the metrics that are in the enhanced state before the operation.</p> pub current_shard_level_metrics: std::option::Option<std::vec::Vec<crate::model::MetricsName>>, /// <p>Represents the list of all the metrics that would be in the enhanced state after the operation.</p> pub desired_shard_level_metrics: std::option::Option<std::vec::Vec<crate::model::MetricsName>>, } impl DisableEnhancedMonitoringOutput { /// <p>The name of the Kinesis data stream.</p> pub fn stream_name(&self) -> std::option::Option<&str> { self.stream_name.as_deref() } /// <p>Represents the current state of the metrics that are in the enhanced state before the operation.</p> pub fn current_shard_level_metrics(&self) -> std::option::Option<&[crate::model::MetricsName]> { self.current_shard_level_metrics.as_deref() } /// <p>Represents the list of all the metrics that would be in the enhanced state after the operation.</p> pub fn desired_shard_level_metrics(&self) -> std::option::Option<&[crate::model::MetricsName]> { self.desired_shard_level_metrics.as_deref() } } impl std::fmt::Debug for DisableEnhancedMonitoringOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DisableEnhancedMonitoringOutput"); formatter.field("stream_name", &self.stream_name); formatter.field( "current_shard_level_metrics", &self.current_shard_level_metrics, ); formatter.field( "desired_shard_level_metrics", &self.desired_shard_level_metrics, ); formatter.finish() } } /// See [`DisableEnhancedMonitoringOutput`](crate::output::DisableEnhancedMonitoringOutput) pub mod disable_enhanced_monitoring_output { /// A builder for [`DisableEnhancedMonitoringOutput`](crate::output::DisableEnhancedMonitoringOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) stream_name: std::option::Option<std::string::String>, pub(crate) current_shard_level_metrics: std::option::Option<std::vec::Vec<crate::model::MetricsName>>, pub(crate) desired_shard_level_metrics: std::option::Option<std::vec::Vec<crate::model::MetricsName>>, } impl Builder { /// <p>The name of the Kinesis data stream.</p> pub fn stream_name(mut self, input: impl Into<std::string::String>) -> Self { self.stream_name = Some(input.into()); self } /// <p>The name of the Kinesis data stream.</p> pub fn set_stream_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.stream_name = input; self } /// Appends an item to `current_shard_level_metrics`. /// /// To override the contents of this collection use [`set_current_shard_level_metrics`](Self::set_current_shard_level_metrics). /// /// <p>Represents the current state of the metrics that are in the enhanced state before the operation.</p> pub fn current_shard_level_metrics(mut self, input: crate::model::MetricsName) -> Self { let mut v = self.current_shard_level_metrics.unwrap_or_default(); v.push(input); self.current_shard_level_metrics = Some(v); self } /// <p>Represents the current state of the metrics that are in the enhanced state before the operation.</p> pub fn set_current_shard_level_metrics( mut self, input: std::option::Option<std::vec::Vec<crate::model::MetricsName>>, ) -> Self { self.current_shard_level_metrics = input; self } /// Appends an item to `desired_shard_level_metrics`. /// /// To override the contents of this collection use [`set_desired_shard_level_metrics`](Self::set_desired_shard_level_metrics). /// /// <p>Represents the list of all the metrics that would be in the enhanced state after the operation.</p> pub fn desired_shard_level_metrics(mut self, input: crate::model::MetricsName) -> Self { let mut v = self.desired_shard_level_metrics.unwrap_or_default(); v.push(input); self.desired_shard_level_metrics = Some(v); self } /// <p>Represents the list of all the metrics that would be in the enhanced state after the operation.</p> pub fn set_desired_shard_level_metrics( mut self, input: std::option::Option<std::vec::Vec<crate::model::MetricsName>>, ) -> Self { self.desired_shard_level_metrics = input; self } /// Consumes the builder and constructs a [`DisableEnhancedMonitoringOutput`](crate::output::DisableEnhancedMonitoringOutput) pub fn build(self) -> crate::output::DisableEnhancedMonitoringOutput { crate::output::DisableEnhancedMonitoringOutput { stream_name: self.stream_name, current_shard_level_metrics: self.current_shard_level_metrics, desired_shard_level_metrics: self.desired_shard_level_metrics, } } } } impl DisableEnhancedMonitoringOutput { /// Creates a new builder-style object to manufacture [`DisableEnhancedMonitoringOutput`](crate::output::DisableEnhancedMonitoringOutput) pub fn builder() -> crate::output::disable_enhanced_monitoring_output::Builder { crate::output::disable_enhanced_monitoring_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DescribeStreamSummaryOutput { /// <p>A <code>StreamDescriptionSummary</code> containing information about the stream.</p> pub stream_description_summary: std::option::Option<crate::model::StreamDescriptionSummary>, } impl DescribeStreamSummaryOutput { /// <p>A <code>StreamDescriptionSummary</code> containing information about the stream.</p> pub fn stream_description_summary( &self, ) -> std::option::Option<&crate::model::StreamDescriptionSummary> { self.stream_description_summary.as_ref() } } impl std::fmt::Debug for DescribeStreamSummaryOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DescribeStreamSummaryOutput"); formatter.field( "stream_description_summary", &self.stream_description_summary, ); formatter.finish() } } /// See [`DescribeStreamSummaryOutput`](crate::output::DescribeStreamSummaryOutput) pub mod describe_stream_summary_output { /// A builder for [`DescribeStreamSummaryOutput`](crate::output::DescribeStreamSummaryOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) stream_description_summary: std::option::Option<crate::model::StreamDescriptionSummary>, } impl Builder { /// <p>A <code>StreamDescriptionSummary</code> containing information about the stream.</p> pub fn stream_description_summary( mut self, input: crate::model::StreamDescriptionSummary, ) -> Self { self.stream_description_summary = Some(input); self } /// <p>A <code>StreamDescriptionSummary</code> containing information about the stream.</p> pub fn set_stream_description_summary( mut self, input: std::option::Option<crate::model::StreamDescriptionSummary>, ) -> Self { self.stream_description_summary = input; self } /// Consumes the builder and constructs a [`DescribeStreamSummaryOutput`](crate::output::DescribeStreamSummaryOutput) pub fn build(self) -> crate::output::DescribeStreamSummaryOutput { crate::output::DescribeStreamSummaryOutput { stream_description_summary: self.stream_description_summary, } } } } impl DescribeStreamSummaryOutput { /// Creates a new builder-style object to manufacture [`DescribeStreamSummaryOutput`](crate::output::DescribeStreamSummaryOutput) pub fn builder() -> crate::output::describe_stream_summary_output::Builder { crate::output::describe_stream_summary_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DescribeStreamConsumerOutput { /// <p>An object that represents the details of the consumer.</p> pub consumer_description: std::option::Option<crate::model::ConsumerDescription>, } impl DescribeStreamConsumerOutput { /// <p>An object that represents the details of the consumer.</p> pub fn consumer_description(&self) -> std::option::Option<&crate::model::ConsumerDescription> { self.consumer_description.as_ref() } } impl std::fmt::Debug for DescribeStreamConsumerOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DescribeStreamConsumerOutput"); formatter.field("consumer_description", &self.consumer_description); formatter.finish() } } /// See [`DescribeStreamConsumerOutput`](crate::output::DescribeStreamConsumerOutput) pub mod describe_stream_consumer_output { /// A builder for [`DescribeStreamConsumerOutput`](crate::output::DescribeStreamConsumerOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) consumer_description: std::option::Option<crate::model::ConsumerDescription>, } impl Builder { /// <p>An object that represents the details of the consumer.</p> pub fn consumer_description(mut self, input: crate::model::ConsumerDescription) -> Self { self.consumer_description = Some(input); self } /// <p>An object that represents the details of the consumer.</p> pub fn set_consumer_description( mut self, input: std::option::Option<crate::model::ConsumerDescription>, ) -> Self { self.consumer_description = input; self } /// Consumes the builder and constructs a [`DescribeStreamConsumerOutput`](crate::output::DescribeStreamConsumerOutput) pub fn build(self) -> crate::output::DescribeStreamConsumerOutput { crate::output::DescribeStreamConsumerOutput { consumer_description: self.consumer_description, } } } } impl DescribeStreamConsumerOutput { /// Creates a new builder-style object to manufacture [`DescribeStreamConsumerOutput`](crate::output::DescribeStreamConsumerOutput) pub fn builder() -> crate::output::describe_stream_consumer_output::Builder { crate::output::describe_stream_consumer_output::Builder::default() } } /// <p>Represents the output for <code>DescribeStream</code>.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DescribeStreamOutput { /// <p>The current status of the stream, the stream Amazon Resource Name (ARN), an array of shard objects that comprise the stream, and whether there are more shards available.</p> pub stream_description: std::option::Option<crate::model::StreamDescription>, } impl DescribeStreamOutput { /// <p>The current status of the stream, the stream Amazon Resource Name (ARN), an array of shard objects that comprise the stream, and whether there are more shards available.</p> pub fn stream_description(&self) -> std::option::Option<&crate::model::StreamDescription> { self.stream_description.as_ref() } } impl std::fmt::Debug for DescribeStreamOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DescribeStreamOutput"); formatter.field("stream_description", &self.stream_description); formatter.finish() } } /// See [`DescribeStreamOutput`](crate::output::DescribeStreamOutput) pub mod describe_stream_output { /// A builder for [`DescribeStreamOutput`](crate::output::DescribeStreamOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) stream_description: std::option::Option<crate::model::StreamDescription>, } impl Builder { /// <p>The current status of the stream, the stream Amazon Resource Name (ARN), an array of shard objects that comprise the stream, and whether there are more shards available.</p> pub fn stream_description(mut self, input: crate::model::StreamDescription) -> Self { self.stream_description = Some(input); self } /// <p>The current status of the stream, the stream Amazon Resource Name (ARN), an array of shard objects that comprise the stream, and whether there are more shards available.</p> pub fn set_stream_description( mut self, input: std::option::Option<crate::model::StreamDescription>, ) -> Self { self.stream_description = input; self } /// Consumes the builder and constructs a [`DescribeStreamOutput`](crate::output::DescribeStreamOutput) pub fn build(self) -> crate::output::DescribeStreamOutput { crate::output::DescribeStreamOutput { stream_description: self.stream_description, } } } } impl DescribeStreamOutput { /// Creates a new builder-style object to manufacture [`DescribeStreamOutput`](crate::output::DescribeStreamOutput) pub fn builder() -> crate::output::describe_stream_output::Builder { crate::output::describe_stream_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DescribeLimitsOutput { /// <p>The maximum number of shards.</p> pub shard_limit: std::option::Option<i32>, /// <p>The number of open shards.</p> pub open_shard_count: std::option::Option<i32>, /// <p> Indicates the number of data streams with the on-demand capacity mode.</p> pub on_demand_stream_count: std::option::Option<i32>, /// <p> The maximum number of data streams with the on-demand capacity mode. </p> pub on_demand_stream_count_limit: std::option::Option<i32>, } impl DescribeLimitsOutput { /// <p>The maximum number of shards.</p> pub fn shard_limit(&self) -> std::option::Option<i32> { self.shard_limit } /// <p>The number of open shards.</p> pub fn open_shard_count(&self) -> std::option::Option<i32> { self.open_shard_count } /// <p> Indicates the number of data streams with the on-demand capacity mode.</p> pub fn on_demand_stream_count(&self) -> std::option::Option<i32> { self.on_demand_stream_count } /// <p> The maximum number of data streams with the on-demand capacity mode. </p> pub fn on_demand_stream_count_limit(&self) -> std::option::Option<i32> { self.on_demand_stream_count_limit } } impl std::fmt::Debug for DescribeLimitsOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DescribeLimitsOutput"); formatter.field("shard_limit", &self.shard_limit); formatter.field("open_shard_count", &self.open_shard_count); formatter.field("on_demand_stream_count", &self.on_demand_stream_count); formatter.field( "on_demand_stream_count_limit", &self.on_demand_stream_count_limit, ); formatter.finish() } } /// See [`DescribeLimitsOutput`](crate::output::DescribeLimitsOutput) pub mod describe_limits_output { /// A builder for [`DescribeLimitsOutput`](crate::output::DescribeLimitsOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) shard_limit: std::option::Option<i32>, pub(crate) open_shard_count: std::option::Option<i32>, pub(crate) on_demand_stream_count: std::option::Option<i32>, pub(crate) on_demand_stream_count_limit: std::option::Option<i32>, } impl Builder { /// <p>The maximum number of shards.</p> pub fn shard_limit(mut self, input: i32) -> Self { self.shard_limit = Some(input); self } /// <p>The maximum number of shards.</p> pub fn set_shard_limit(mut self, input: std::option::Option<i32>) -> Self { self.shard_limit = input; self } /// <p>The number of open shards.</p> pub fn open_shard_count(mut self, input: i32) -> Self { self.open_shard_count = Some(input); self } /// <p>The number of open shards.</p> pub fn set_open_shard_count(mut self, input: std::option::Option<i32>) -> Self { self.open_shard_count = input; self } /// <p> Indicates the number of data streams with the on-demand capacity mode.</p> pub fn on_demand_stream_count(mut self, input: i32) -> Self { self.on_demand_stream_count = Some(input); self } /// <p> Indicates the number of data streams with the on-demand capacity mode.</p> pub fn set_on_demand_stream_count(mut self, input: std::option::Option<i32>) -> Self { self.on_demand_stream_count = input; self } /// <p> The maximum number of data streams with the on-demand capacity mode. </p> pub fn on_demand_stream_count_limit(mut self, input: i32) -> Self { self.on_demand_stream_count_limit = Some(input); self } /// <p> The maximum number of data streams with the on-demand capacity mode. </p> pub fn set_on_demand_stream_count_limit(mut self, input: std::option::Option<i32>) -> Self { self.on_demand_stream_count_limit = input; self } /// Consumes the builder and constructs a [`DescribeLimitsOutput`](crate::output::DescribeLimitsOutput) pub fn build(self) -> crate::output::DescribeLimitsOutput { crate::output::DescribeLimitsOutput { shard_limit: self.shard_limit, open_shard_count: self.open_shard_count, on_demand_stream_count: self.on_demand_stream_count, on_demand_stream_count_limit: self.on_demand_stream_count_limit, } } } } impl DescribeLimitsOutput { /// Creates a new builder-style object to manufacture [`DescribeLimitsOutput`](crate::output::DescribeLimitsOutput) pub fn builder() -> crate::output::describe_limits_output::Builder { crate::output::describe_limits_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DeregisterStreamConsumerOutput {} impl std::fmt::Debug for DeregisterStreamConsumerOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DeregisterStreamConsumerOutput"); formatter.finish() } } /// See [`DeregisterStreamConsumerOutput`](crate::output::DeregisterStreamConsumerOutput) pub mod deregister_stream_consumer_output { /// A builder for [`DeregisterStreamConsumerOutput`](crate::output::DeregisterStreamConsumerOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder {} impl Builder { /// Consumes the builder and constructs a [`DeregisterStreamConsumerOutput`](crate::output::DeregisterStreamConsumerOutput) pub fn build(self) -> crate::output::DeregisterStreamConsumerOutput { crate::output::DeregisterStreamConsumerOutput {} } } } impl DeregisterStreamConsumerOutput { /// Creates a new builder-style object to manufacture [`DeregisterStreamConsumerOutput`](crate::output::DeregisterStreamConsumerOutput) pub fn builder() -> crate::output::deregister_stream_consumer_output::Builder { crate::output::deregister_stream_consumer_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DeleteStreamOutput {} impl std::fmt::Debug for DeleteStreamOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DeleteStreamOutput"); formatter.finish() } } /// See [`DeleteStreamOutput`](crate::output::DeleteStreamOutput) pub mod delete_stream_output { /// A builder for [`DeleteStreamOutput`](crate::output::DeleteStreamOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder {} impl Builder { /// Consumes the builder and constructs a [`DeleteStreamOutput`](crate::output::DeleteStreamOutput) pub fn build(self) -> crate::output::DeleteStreamOutput { crate::output::DeleteStreamOutput {} } } } impl DeleteStreamOutput { /// Creates a new builder-style object to manufacture [`DeleteStreamOutput`](crate::output::DeleteStreamOutput) pub fn builder() -> crate::output::delete_stream_output::Builder { crate::output::delete_stream_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DecreaseStreamRetentionPeriodOutput {} impl std::fmt::Debug for DecreaseStreamRetentionPeriodOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DecreaseStreamRetentionPeriodOutput"); formatter.finish() } } /// See [`DecreaseStreamRetentionPeriodOutput`](crate::output::DecreaseStreamRetentionPeriodOutput) pub mod decrease_stream_retention_period_output { /// A builder for [`DecreaseStreamRetentionPeriodOutput`](crate::output::DecreaseStreamRetentionPeriodOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder {} impl Builder { /// Consumes the builder and constructs a [`DecreaseStreamRetentionPeriodOutput`](crate::output::DecreaseStreamRetentionPeriodOutput) pub fn build(self) -> crate::output::DecreaseStreamRetentionPeriodOutput { crate::output::DecreaseStreamRetentionPeriodOutput {} } } } impl DecreaseStreamRetentionPeriodOutput { /// Creates a new builder-style object to manufacture [`DecreaseStreamRetentionPeriodOutput`](crate::output::DecreaseStreamRetentionPeriodOutput) pub fn builder() -> crate::output::decrease_stream_retention_period_output::Builder { crate::output::decrease_stream_retention_period_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CreateStreamOutput {} impl std::fmt::Debug for CreateStreamOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CreateStreamOutput"); formatter.finish() } } /// See [`CreateStreamOutput`](crate::output::CreateStreamOutput) pub mod create_stream_output { /// A builder for [`CreateStreamOutput`](crate::output::CreateStreamOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder {} impl Builder { /// Consumes the builder and constructs a [`CreateStreamOutput`](crate::output::CreateStreamOutput) pub fn build(self) -> crate::output::CreateStreamOutput { crate::output::CreateStreamOutput {} } } } impl CreateStreamOutput { /// Creates a new builder-style object to manufacture [`CreateStreamOutput`](crate::output::CreateStreamOutput) pub fn builder() -> crate::output::create_stream_output::Builder { crate::output::create_stream_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct AddTagsToStreamOutput {} impl std::fmt::Debug for AddTagsToStreamOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("AddTagsToStreamOutput"); formatter.finish() } } /// See [`AddTagsToStreamOutput`](crate::output::AddTagsToStreamOutput) pub mod add_tags_to_stream_output { /// A builder for [`AddTagsToStreamOutput`](crate::output::AddTagsToStreamOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder {} impl Builder { /// Consumes the builder and constructs a [`AddTagsToStreamOutput`](crate::output::AddTagsToStreamOutput) pub fn build(self) -> crate::output::AddTagsToStreamOutput { crate::output::AddTagsToStreamOutput {} } } } impl AddTagsToStreamOutput { /// Creates a new builder-style object to manufacture [`AddTagsToStreamOutput`](crate::output::AddTagsToStreamOutput) pub fn builder() -> crate::output::add_tags_to_stream_output::Builder { crate::output::add_tags_to_stream_output::Builder::default() } }
53.493348
694
0.672971
d6957808bdf8c6084c20afd2e831fabbacb6f0da
7,386
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. #[macro_use] extern crate criterion; use criterion::Criterion; use rand::seq::SliceRandom; use rand::Rng; use std::sync::{Arc, Mutex}; use tokio::runtime::Runtime; extern crate arrow; extern crate datafusion; use arrow::{ array::Float32Array, array::Float64Array, array::StringArray, array::UInt64Array, datatypes::{DataType, Field, Schema}, record_batch::RecordBatch, }; use datafusion::datasource::MemTable; use datafusion::error::Result; use datafusion::execution::context::ExecutionContext; fn query(ctx: Arc<Mutex<ExecutionContext>>, sql: &str) { let rt = Runtime::new().unwrap(); // execute the query let df = ctx.lock().unwrap().sql(&sql).unwrap(); rt.block_on(df.collect()).unwrap(); } fn create_data(size: usize, null_density: f64) -> Vec<Option<f64>> { // use random numbers to avoid spurious compiler optimizations wrt to branching let mut rng = rand::thread_rng(); (0..size) .map(|_| { if rng.gen::<f64>() > null_density { None } else { Some(rng.gen::<f64>()) } }) .collect() } fn create_integer_data(size: usize, value_density: f64) -> Vec<Option<u64>> { // use random numbers to avoid spurious compiler optimizations wrt to branching let mut rng = rand::thread_rng(); (0..size) .map(|_| { if rng.gen::<f64>() > value_density { None } else { Some(rng.gen::<u64>()) } }) .collect() } fn create_context( partitions_len: usize, array_len: usize, batch_size: usize, ) -> Result<Arc<Mutex<ExecutionContext>>> { // define a schema. let schema = Arc::new(Schema::new(vec![ Field::new("utf8", DataType::Utf8, false), Field::new("f32", DataType::Float32, false), Field::new("f64", DataType::Float64, false), // This field will contain integers randomly selected from a large // range of values, i.e. [0, u64::MAX], such that there are none (or // very few) repeated values. Field::new("u64_wide", DataType::UInt64, false), // This field will contain integers randomly selected from a narrow // range of values such that there are a few distinct values, but they // are repeated often. Field::new("u64_narrow", DataType::UInt64, false), ])); // define data. let partitions = (0..partitions_len) .map(|_| { (0..array_len / batch_size / partitions_len) .map(|i| { // the 4 here is the number of different keys. // a higher number increase sparseness let vs = vec![0, 1, 2, 3]; let keys: Vec<String> = (0..batch_size) .map( // use random numbers to avoid spurious compiler optimizations wrt to branching |_| format!("hi{:?}", vs.choose(&mut rand::thread_rng())), ) .collect(); let keys: Vec<&str> = keys.iter().map(|e| &**e).collect(); let values = create_data(batch_size, 0.5); // Integer values between [0, u64::MAX]. let integer_values_wide = create_integer_data(batch_size, 9.0); // Integer values between [0, 9]. let integer_values_narrow_choices = (0..10).collect::<Vec<u64>>(); let integer_values_narrow = (0..batch_size) .map(|_| { *integer_values_narrow_choices .choose(&mut rand::thread_rng()) .unwrap() }) .collect::<Vec<u64>>(); RecordBatch::try_new( schema.clone(), vec![ Arc::new(StringArray::from(keys)), Arc::new(Float32Array::from(vec![i as f32; batch_size])), Arc::new(Float64Array::from(values)), Arc::new(UInt64Array::from(integer_values_wide)), Arc::new(UInt64Array::from(integer_values_narrow)), ], ) .unwrap() }) .collect::<Vec<_>>() }) .collect::<Vec<_>>(); let mut ctx = ExecutionContext::new(); // declare a table in memory. In spark API, this corresponds to createDataFrame(...). let provider = MemTable::new(schema, partitions)?; ctx.register_table("t", Box::new(provider)); Ok(Arc::new(Mutex::new(ctx))) } fn criterion_benchmark(c: &mut Criterion) { let partitions_len = 8; let array_len = 32768 * 2; // 2^16 let batch_size = 2048; // 2^11 let ctx = create_context(partitions_len, array_len, batch_size).unwrap(); c.bench_function("aggregate_query_no_group_by 15 12", |b| { b.iter(|| { query( ctx.clone(), "SELECT MIN(f64), AVG(f64), COUNT(f64) \ FROM t", ) }) }); c.bench_function( "aggregate_query_no_group_by_count_distinct_wide 15 12", |b| { b.iter(|| { query( ctx.clone(), "SELECT COUNT(DISTINCT u64_wide) \ FROM t", ) }) }, ); c.bench_function( "aggregate_query_no_group_by_count_distinct_narrow 15 12", |b| { b.iter(|| { query( ctx.clone(), "SELECT COUNT(DISTINCT u64_narrow) \ FROM t", ) }) }, ); c.bench_function("aggregate_query_group_by 15 12", |b| { b.iter(|| { query( ctx.clone(), "SELECT utf8, MIN(f64), AVG(f64), COUNT(f64) \ FROM t GROUP BY utf8", ) }) }); c.bench_function("aggregate_query_group_by_with_filter 15 12", |b| { b.iter(|| { query( ctx.clone(), "SELECT utf8, MIN(f64), AVG(f64), COUNT(f64) \ FROM t \ WHERE f32 > 10 AND f32 < 20 GROUP BY utf8", ) }) }); } criterion_group!(benches, criterion_benchmark); criterion_main!(benches);
33.121076
107
0.52762
61b8b2775f227f42517dbebf6ad21d39b1d7d46c
1,299
//! A crate for incrementally verifiable ledger system #![cfg_attr(not(feature = "std"), no_std)] #![deny(unused_import_braces, unused_qualifications, trivial_casts)] #![deny(trivial_numeric_casts, private_in_public, variant_size_differences)] #![deny(stable_features, unreachable_pub, non_shorthand_field_patterns)] #![deny(unused_attributes, unused_mut)] #![deny(missing_docs)] #![deny(unused_imports)] #![deny(renamed_and_removed_lints, stable_features, unused_allocation)] #![deny(unused_comparisons, bare_trait_objects, unused_must_use, const_err)] #![forbid(unsafe_code)] #![allow(clippy::op_ref, clippy::type_complexity, clippy::too_many_arguments)] /// building blocks (here mt) pub mod building_blocks; /// compilers pub mod compiler; /// the incrementally verifiable LS constructions pub mod ivls; /// the ledger system model pub mod ledger_system; #[macro_use] extern crate derivative; pub(crate) use ark_std::{boxed::Box, vec::Vec}; pub(crate) use ark_ff::{Field, PrimeField, ToBytes}; pub(crate) use ark_relations::r1cs::SynthesisError; pub(crate) use ark_std::rand::RngCore; pub(crate) use ark_std::{ borrow::Borrow, marker::{PhantomData, Sized}, }; /// wrapped error types pub type Error = Box<dyn ark_std::error::Error>; /// prelude for common gadgets pub mod gadgets;
31.682927
78
0.763664
e8fadfee44c901e243cf216584a15e156ff17cdc
5,345
#![feature(test)] #![allow(clippy::integer_arithmetic)] extern crate solana_core; extern crate test; use { crossbeam_channel::unbounded, log::*, rand::{thread_rng, Rng}, solana_core::{sigverify::TransactionSigVerifier, sigverify_stage::SigVerifyStage}, solana_perf::{packet::to_packet_batches, packet::PacketBatch, test_tx::test_tx}, solana_sdk::{ hash::Hash, signature::{Keypair, Signer}, system_transaction, timing::duration_as_ms, }, std::time::{Duration, Instant}, test::Bencher, }; fn run_bench_packet_discard(num_ips: usize, bencher: &mut Bencher) { solana_logger::setup(); let len = 30 * 1000; let chunk_size = 1024; let tx = test_tx(); let mut batches = to_packet_batches(&vec![tx; len], chunk_size); let mut total = 0; let ips: Vec<_> = (0..num_ips) .into_iter() .map(|_| { let mut addr = [0u16; 8]; thread_rng().fill(&mut addr); std::net::IpAddr::from(addr) }) .collect(); for batch in batches.iter_mut() { total += batch.packets.len(); for p in batch.packets.iter_mut() { let ip_index = thread_rng().gen_range(0, ips.len()); p.meta.addr = ips[ip_index]; } } info!("total packets: {}", total); bencher.iter(move || { SigVerifyStage::discard_excess_packets(&mut batches, 10_000); let mut num_packets = 0; for batch in batches.iter_mut() { for p in batch.packets.iter_mut() { if !p.meta.discard() { num_packets += 1; } p.meta.set_discard(false); } } assert_eq!(num_packets, 10_000); }); } #[bench] fn bench_packet_discard_many_senders(bencher: &mut Bencher) { run_bench_packet_discard(1000, bencher); } #[bench] fn bench_packet_discard_single_sender(bencher: &mut Bencher) { run_bench_packet_discard(1, bencher); } #[bench] fn bench_packet_discard_mixed_senders(bencher: &mut Bencher) { const SIZE: usize = 30 * 1000; const CHUNK_SIZE: usize = 1024; fn new_rand_addr<R: Rng>(rng: &mut R) -> std::net::IpAddr { let mut addr = [0u16; 8]; rng.fill(&mut addr); std::net::IpAddr::from(addr) } let mut rng = thread_rng(); let mut batches = to_packet_batches(&vec![test_tx(); SIZE], CHUNK_SIZE); let spam_addr = new_rand_addr(&mut rng); for batch in batches.iter_mut() { for packet in batch.packets.iter_mut() { // One spam address, ~1000 unique addresses. packet.meta.addr = if rng.gen_ratio(1, 30) { new_rand_addr(&mut rng) } else { spam_addr } } } bencher.iter(move || { SigVerifyStage::discard_excess_packets(&mut batches, 10_000); let mut num_packets = 0; for batch in batches.iter_mut() { for packet in batch.packets.iter_mut() { if !packet.meta.discard() { num_packets += 1; } packet.meta.set_discard(false); } } assert_eq!(num_packets, 10_000); }); } fn gen_batches(use_same_tx: bool) -> Vec<PacketBatch> { let len = 4096; let chunk_size = 1024; if use_same_tx { let tx = test_tx(); to_packet_batches(&vec![tx; len], chunk_size) } else { let from_keypair = Keypair::new(); let to_keypair = Keypair::new(); let txs: Vec<_> = (0..len) .map(|_| { let amount = thread_rng().gen(); system_transaction::transfer( &from_keypair, &to_keypair.pubkey(), amount, Hash::default(), ) }) .collect(); to_packet_batches(&txs, chunk_size) } } #[bench] fn bench_sigverify_stage(bencher: &mut Bencher) { solana_logger::setup(); trace!("start"); let (packet_s, packet_r) = unbounded(); let (verified_s, verified_r) = unbounded(); let verifier = TransactionSigVerifier::default(); let stage = SigVerifyStage::new(packet_r, verified_s, verifier); let use_same_tx = true; bencher.iter(move || { let now = Instant::now(); let mut batches = gen_batches(use_same_tx); trace!( "starting... generation took: {} ms batches: {}", duration_as_ms(&now.elapsed()), batches.len() ); let mut sent_len = 0; for _ in 0..batches.len() { if let Some(batch) = batches.pop() { sent_len += batch.packets.len(); packet_s.send(batch).unwrap(); } } let mut received = 0; trace!("sent: {}", sent_len); loop { if let Ok(mut verifieds) = verified_r.recv_timeout(Duration::from_millis(10)) { while let Some(v) = verifieds.pop() { received += v.packets.len(); batches.push(v); } if use_same_tx || received >= sent_len { break; } } } trace!("received: {}", received); }); stage.join().unwrap(); }
29.860335
91
0.539383
01f2c00f651952ca770ab92b31312cd288fc61d2
8,408
//! Panic handlers and soft reboots for the kernel use core::fmt::Write; use core::panic::PanicInfo; use core::sync::atomic::{AtomicPtr, AtomicBool, Ordering}; use crate::acpi::{self, ApicState}; use crate::apic::Apic; use serial::SerialPort; use page_table::PhysAddr; /// Holds a pointer to a pending panic. When a non-core-0 core panics, it will /// place its `PanicInfo` pointer into here, NMI the core 0, and then halt /// forever. static PANIC_PENDING: AtomicPtr<PanicInfo> = AtomicPtr::new(core::ptr::null_mut()); /// Tracks if we're currently panicing on the BSP (used to prevent recursing /// into panics on subsequent NMIs by other panicing cores) static PANICING: AtomicBool = AtomicBool::new(false); /// Records if a soft reboot has been requested. If it has been, we will /// soft reboot as soon as we can. static SOFT_REBOOT_REQUESTED: AtomicBool = AtomicBool::new(false); /// Get a reference to the current panicing state #[inline] pub unsafe fn panicing() -> &'static AtomicBool { &PANICING } /// Attempt a soft reboot by checking to see if there is a command on the /// serial port to soft reboot. pub unsafe fn attempt_soft_reboot() { // Only allow soft reboot attempts from the BSP if core!().id != 0 { return; } // Attempt to get a byte from the serial port let byte = core!().boot_args.serial.lock().as_mut().unwrap().read_byte(); // Check if we got a halt request from the serial port if let Some(b'H') = byte { panic!("Halt requested from timer"); } // Check if we got a 'Z' from the serial port. if let Some(b'Z') = byte { // Request a soft reboot SOFT_REBOOT_REQUESTED.store(true, Ordering::SeqCst); // Force a panic panic!("Soft reboot requested from timer"); } } /// Disable all cores on the system, making sure they check in when they stop pub unsafe fn disable_all_cores(apic: &mut Apic) { // Make sure we're on the BSP assert!(core!().id == 0, "Disable all cores only allowed on BSP"); // Only do this if we have a valid APIC initialized if let Some(our_apic_id) = core!().apic_id() { // Send an NMI to all cores, waiting for it to respond for apic_id in 0..acpi::MAX_CORES as u32 { // Don't NMI ourself if apic_id == our_apic_id { continue; } let state = acpi::core_state(apic_id); if state == ApicState::Online { loop { // Send this core an NMI to cause it to halt if acpi::core_state(apic_id) == ApicState::Halted { break; } apic.ipi(apic_id, (1 << 14) | (4 << 8)); crate::time::sleep(1_000); } // INIT the core apic.ipi(apic_id, 0x4500); crate::time::sleep(1_000); } } } } /// INIT all processors, shutdown the kernel, download a new kernel, and boot /// into it without resetting the actual CPU. pub unsafe fn soft_reboot(apic: &mut Apic) -> ! { // Get access to the soft reboot address as well as the trampoline page // table. let soft_reboot = core!().boot_args.soft_reboot_addr_ref() .load(Ordering::SeqCst); let trampoline_cr3 = PhysAddr(core!().boot_args.trampoline_page_table_ref() .load(Ordering::SeqCst)); // Compute the virtual address of the soft reboot entry point based // on the physical address let vaddr = boot_args::KERNEL_PHYS_WINDOW_BASE + soft_reboot; // Disable all other cores disable_all_cores(apic); // Destroy all devices which are handled by drivers crate::pci::destroy_devices(); // Reset the APIC state apic.reset(); // VMXOFF if we're in VMX root operation let vmxon_lock = core!().vmxon_region().shatter(); if (*vmxon_lock).is_some() { // Disable VMX root operation llvm_asm!("vmxoff" :::: "intel", "volatile"); } // Convert the soft reboot virtual address into a function pointer that // takes one `PhysAddr` argument, which is the trampoline cr3 let soft_reboot = *(&vaddr as *const u64 as *const extern fn(PhysAddr) -> !); // Perform the soft reboot! soft_reboot(trampoline_cr3); } /// Panic implementation for the kernel #[panic_handler] pub fn panic(info: &PanicInfo) -> ! { // Disable interrupts, we're never coming back from this point. unsafe { core!().disable_interrupts(); } if core!().id == 0 { // If we had a panic on the BSP, we handle it quite uniquely. We'll // shut down all other processors by sending them NMIs and waiting for // them to check into a halted state. PANICING.store(true, Ordering::SeqCst); let our_info: *const PanicInfo = info; let other_info: *const PanicInfo = PANIC_PENDING.load(Ordering::SeqCst); // Create our emergency serial port. We disabled all other cores so // we re-initialize the serial port to make sure it's in a sane state. let serial = unsafe { SerialPort::new( (boot_args::KERNEL_PHYS_WINDOW_BASE + 0x400) as *const u16) }; /// Structure for holding the emergency serial port which is /// reinitialized and prepared for exclusive access during this panic. pub struct EmergencySerial(SerialPort); impl core::fmt::Write for EmergencySerial { fn write_str(&mut self, st: &str) -> core::fmt::Result { self.0.write(st.as_bytes()); Ok(()) } } // Wrap up the serial driver in our writer let mut eserial = EmergencySerial(serial); // Create some space, in case we're splicing an existing line let _ = write!(eserial, "\n\n"); // Print information about the panic(s) for &(message, info) in &[ ("Panic reported by other core", other_info), ("Local panic", our_info), ] { // Skip potentially null info if info.is_null() { continue; } // Get Rust access to the panic info let info: &PanicInfo = unsafe { &*info }; let _ = write!(eserial, "=== PANIC | {} =============\n", message); if let Some(loc) = info.location() { let _ = write!(eserial, "At {}:{}:{}\n", loc.file(), loc.line(), loc.column()); } if let Some(msg) = info.message() { let _ = write!(eserial, "{}\n", msg); } } let apic = unsafe { // Forcibly get access to the current APIC. This is likely safe in // almost every situation as the APIC is not very stateful. let apic = &mut *core!().apic().shatter(); let apic = apic.as_mut().unwrap(); // Disable all other cores, waiting for them to check-in notifying // us that they've gone into a permanent halt state. disable_all_cores(apic); apic }; // Wait for a soft reboot to be requested while SOFT_REBOOT_REQUESTED.load(Ordering::SeqCst) != true { if eserial.0.read_byte() == Some(b'Z') { SOFT_REBOOT_REQUESTED.store(true, Ordering::SeqCst); } } // Start a soft reboot let _ = write!(eserial, "Starting soft reboot...\n"); unsafe { soft_reboot(apic); } } else { // Check if the BSP is already panicing, if it is not, report our // panic to it via an NMI if PANICING.compare_and_swap(false, true, Ordering::SeqCst) == false { // Save the panic info for this core PANIC_PENDING.store(info as *const _ as *mut _, Ordering::SeqCst); unsafe { // Forcibly get access to the current APIC. This is likely safe // in almost every situation as the APIC is not very stateful. let apic = &mut *core!().apic().shatter(); let apic = apic.as_mut().unwrap(); // Notify the BSP that we paniced by sending it an NMI apic.ipi(0, (1 << 14) | (4 << 8)); } } cpu::halt(); } }
35.627119
79
0.58254
e54befd85086414a7c53c724788e6c2fdb4b06cd
5,300
use rlua::{Lua, Nil, Result, Table, Value}; #[test] fn test_set_get() { Lua::new().context(|lua| { let globals = lua.globals(); globals.set("foo", "bar").unwrap(); globals.set("baz", "baf").unwrap(); assert_eq!(globals.get::<_, String>("foo").unwrap(), "bar"); assert_eq!(globals.get::<_, String>("baz").unwrap(), "baf"); }); } #[test] fn test_table() { Lua::new().context(|lua| { let globals = lua.globals(); globals.set("table", lua.create_table().unwrap()).unwrap(); let table1: Table = globals.get("table").unwrap(); let table2: Table = globals.get("table").unwrap(); table1.set("foo", "bar").unwrap(); table2.set("baz", "baf").unwrap(); assert_eq!(table2.get::<_, String>("foo").unwrap(), "bar"); assert_eq!(table1.get::<_, String>("baz").unwrap(), "baf"); lua.load( r#" table1 = {1, 2, 3, 4, 5} table2 = {} table3 = {1, 2, nil, 4, 5} "#, ) .exec() .unwrap(); let table1 = globals.get::<_, Table>("table1").unwrap(); let table2 = globals.get::<_, Table>("table2").unwrap(); let table3 = globals.get::<_, Table>("table3").unwrap(); assert_eq!(table1.len().unwrap(), 5); assert_eq!( table1 .clone() .pairs() .collect::<Result<Vec<(i64, i64)>>>() .unwrap(), vec![(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)] ); assert_eq!( table1 .clone() .sequence_values() .collect::<Result<Vec<i64>>>() .unwrap(), vec![1, 2, 3, 4, 5] ); assert_eq!(table2.len().unwrap(), 0); assert_eq!( table2 .clone() .pairs() .collect::<Result<Vec<(i64, i64)>>>() .unwrap(), vec![] ); assert_eq!( table2 .sequence_values() .collect::<Result<Vec<i64>>>() .unwrap(), vec![] ); // sequence_values should only iterate until the first border assert_eq!( table3 .sequence_values() .collect::<Result<Vec<i64>>>() .unwrap(), vec![1, 2] ); globals .set( "table4", lua.create_sequence_from(vec![1, 2, 3, 4, 5]).unwrap(), ) .unwrap(); let table4 = globals.get::<_, Table>("table4").unwrap(); assert_eq!( table4.pairs().collect::<Result<Vec<(i64, i64)>>>().unwrap(), vec![(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)] ); }); } #[test] fn test_table_scope() { Lua::new().context(|lua| { let globals = lua.globals(); lua.load( r#" touter = { tin = {1, 2, 3} } "#, ) .exec() .unwrap(); // Make sure that table gets do not borrow the table, but instead just borrow lua. let tin; { let touter = globals.get::<_, Table>("touter").unwrap(); tin = touter.get::<_, Table>("tin").unwrap(); } assert_eq!(tin.get::<_, i64>(1).unwrap(), 1); assert_eq!(tin.get::<_, i64>(2).unwrap(), 2); assert_eq!(tin.get::<_, i64>(3).unwrap(), 3); }); } #[test] fn test_metatable() { Lua::new().context(|lua| { let table = lua.create_table().unwrap(); let metatable = lua.create_table().unwrap(); metatable .set( "__index", lua.create_function(|_, ()| Ok("index_value")).unwrap(), ) .unwrap(); table.set_metatable(Some(metatable)); assert_eq!(table.get::<_, String>("any_key").unwrap(), "index_value"); match table.raw_get::<_, Value>("any_key").unwrap() { Nil => {} _ => panic!(), } table.set_metatable(None); match table.get::<_, Value>("any_key").unwrap() { Nil => {} _ => panic!(), }; }); } #[test] fn test_table_error() { Lua::new().context(|lua| { let globals = lua.globals(); lua.load( r#" table = {} setmetatable(table, { __index = function() error("lua error") end, __newindex = function() error("lua error") end, __len = function() error("lua error") end }) "#, ) .exec() .unwrap(); let bad_table: Table = globals.get("table").unwrap(); assert!(bad_table.set(1, 1).is_err()); assert!(bad_table.get::<_, i32>(1).is_err()); assert!(bad_table.len().is_err()); assert!(bad_table.raw_set(1, 1).is_ok()); assert!(bad_table.raw_get::<_, i32>(1).is_ok()); assert_eq!(bad_table.raw_len(), 1); }); }
28.648649
90
0.425283
de6365865d14a74458c37269516f3b5cbf8b2137
2,278
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use std::fmt; use crate::{ math::{self, Mat}, shapes::{ paint::{BlendMode, Color32, StrokeCap, StrokeJoin}, CommandPath, FillRule, }, }; #[derive(Clone, Debug, Default)] pub struct RenderPaint { pub fill_rule: FillRule, pub is_clipped: bool, pub color: PaintColor, pub style: Style, pub blend_mode: BlendMode, } #[derive(Clone, Debug)] pub enum PaintColor { Solid(Color32), Gradient(Gradient), } impl Default for PaintColor { fn default() -> Self { Self::Solid(Color32::default()) } } #[derive(Clone, Copy, Debug)] pub enum Style { Fill, Stroke(StrokeStyle), } #[derive(Clone, Copy, Debug)] pub struct StrokeStyle { pub thickness: f32, pub cap: StrokeCap, pub join: StrokeJoin, } impl Default for Style { fn default() -> Self { Self::Fill } } #[derive(Clone, Debug)] pub struct Gradient { pub r#type: GradientType, pub start: math::Vec, pub end: math::Vec, pub stops: Vec<(Color32, f32)>, } #[derive(Debug)] pub struct GradientBuilder { gradient: Gradient, } impl GradientBuilder { pub fn new(r#type: GradientType) -> Self { Self { gradient: Gradient { r#type, start: math::Vec::default(), end: math::Vec::default(), stops: Vec::new(), }, } } pub fn start(&mut self, start: math::Vec) -> &mut Self { self.gradient.start = start; self } pub fn end(&mut self, end: math::Vec) -> &mut Self { self.gradient.end = end; self } pub fn push_stop(&mut self, color: Color32, position: f32) -> &mut Self { self.gradient.stops.push((color, position)); self } pub fn build(self) -> Gradient { self.gradient } } #[derive(Clone, Copy, Debug)] pub enum GradientType { Linear, Radial, } pub trait Renderer: fmt::Debug { fn draw(&mut self, path: &CommandPath, transform: Mat, paint: &RenderPaint); fn clip(&mut self, path: &CommandPath, transform: Mat, layers: usize); }
20.709091
80
0.59043
5047caf2d421e5850348cd9141a87f4a5491737d
5,530
// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Mining service, gets a block to mine, and based on mining configuration //! chooses a version of the cuckoo miner to mine the block and produce a valid //! header with its proof-of-work. Any valid mined blocks are submitted to the //! network. use chrono::prelude::Utc; use std::sync::Arc; use crate::chain; use crate::common::types::StratumServerConfig; use crate::core::core::hash::{Hash, Hashed}; use crate::core::core::{Block, BlockHeader}; use crate::core::global; use crate::mining::mine_block; use crate::util::StopState; use crate::ServerTxPool; use grin_chain::SyncState; use std::thread; use std::time::Duration; pub struct Miner { config: StratumServerConfig, chain: Arc<chain::Chain>, tx_pool: ServerTxPool, stop_state: Arc<StopState>, sync_state: Arc<SyncState>, // Just to hold the port we're on, so this miner can be identified // while watching debug output debug_output_id: String, } impl Miner { // Creates a new Miner. Needs references to the chain state and its /// storage. pub fn new( config: StratumServerConfig, chain: Arc<chain::Chain>, tx_pool: ServerTxPool, stop_state: Arc<StopState>, sync_state: Arc<SyncState>, ) -> Miner { Miner { config, chain, tx_pool, debug_output_id: String::from("none"), stop_state, sync_state, } } /// Keeping this optional so setting in a separate function /// instead of in the new function pub fn set_debug_output_id(&mut self, debug_output_id: String) { self.debug_output_id = debug_output_id; } /// The inner part of mining loop for the internal miner /// kept around mostly for automated testing purposes fn inner_mining_loop( &self, b: &mut Block, head: &BlockHeader, attempt_time_per_block: u32, latest_hash: &mut Hash, ) -> bool { // look for a pow for at most 2 sec on the same block (to give a chance to new // transactions) and as long as the head hasn't changed let deadline = Utc::now().timestamp() + attempt_time_per_block as i64; debug!( "(Server ID: {}) Mining Cuckoo{} for max {}s on {} @ {} [{}].", self.debug_output_id, global::min_edge_bits(), attempt_time_per_block, b.header.total_difficulty(), b.header.height, latest_hash ); let mut iter_count = 0; while head.hash() == *latest_hash && Utc::now().timestamp() < deadline { let mut ctx = global::create_pow_context::<u32>( head.height, global::min_edge_bits(), global::proofsize(), 10, ) .unwrap(); ctx.set_header_nonce(b.header.pre_pow(), None, true) .unwrap(); if let Ok(proofs) = ctx.find_cycles() { b.header.pow.proof = proofs[0].clone(); let proof_diff = b.header.pow.to_difficulty(b.header.height); if proof_diff >= (b.header.total_difficulty() - head.total_difficulty()) { return true; } } b.header.pow.nonce += 1; *latest_hash = self.chain.head().unwrap().last_block_h; iter_count += 1; } debug!( "(Server ID: {}) No solution found after {} iterations, continuing...", self.debug_output_id, iter_count ); false } /// Starts the mining loop, building a new block on top of the existing /// chain anytime required and looking for PoW solution. pub fn run_loop(&self, wallet_listener_url: Option<String>) { info!( "(Server ID: {}) Starting test miner loop.", self.debug_output_id ); // iteration, we keep the returned derivation to provide it back when // nothing has changed. We only want to create a new key_id for each new block. let mut key_id = None; loop { if self.stop_state.is_stopped() { break; } while self.sync_state.is_syncing() { thread::sleep(Duration::from_secs(5)); } trace!("in miner loop. key_id: {:?}", key_id); // get the latest chain state and build a block on top of it let head = self.chain.head_header().unwrap(); let mut latest_hash = self.chain.head().unwrap().last_block_h; let (mut b, block_fees) = mine_block::get_block( &self.chain, &self.tx_pool, key_id.clone(), wallet_listener_url.clone(), ); let sol = self.inner_mining_loop( &mut b, &head, self.config.attempt_time_per_block, &mut latest_hash, ); // we found a solution, push our block through the chain processing pipeline if sol { info!( "(Server ID: {}) Found valid proof of work, adding block {} (prev_root {}).", self.debug_output_id, b.hash(), b.header.prev_root, ); let res = self.chain.process_block(b, chain::Options::MINE); if let Err(e) = res { error!( "(Server ID: {}) Error validating mined block: {:?}", self.debug_output_id, e ); } trace!("resetting key_id in miner to None"); key_id = None; } else { debug!( "setting pubkey in miner to pubkey from block_fees - {:?}", block_fees ); key_id = block_fees.key_id(); } } info!("(Server ID: {}) test miner exit.", self.debug_output_id); } }
28.214286
82
0.674503
7a15775ca96f4a558145ea46422521b78cfd1b8c
13,313
/* Copyright 2020 Adobe All Rights Reserved. NOTICE: Adobe permits you to use, modify, and distribute this file in accordance with the terms of the Adobe license agreement accompanying it. */ use chrono::{DateTime, Local}; use hyper::{http::request::Parts, Body, Method, Response as HResponse}; use serde_json::Value; use std::collections::HashMap; use url::Url; #[derive(Default, Debug, Clone)] /// The data values found in a COPS request. /// /// Some of these are held in headers, some in URL parameters, and /// some in the body. Where they go and which are required depend /// on whether the request is for activation or deactivation. /// /// We add our own timestamp so we can sort by it, even if the request /// does not have a device timestamp attached. pub struct Request { pub kind: Kind, pub api_key: String, pub request_id: String, pub session_id: String, pub package_id: String, pub asnp_id: String, pub device_id: String, pub device_date: String, pub is_vdi: bool, pub is_virtual: bool, pub os_name: String, pub os_version: String, pub os_user_id: String, pub is_domain_user: bool, pub app_id: String, pub app_version: String, pub ngl_version: String, pub timestamp: String, } impl Request { /// Create a COPS request from a network request received by the proxy. pub fn from_network(parts: &Parts, body: &[u8]) -> Result<Request, BadRequest> { match parts.uri.path() { ACTIVATION_ENDPOINT => { if parts.method == Method::POST { Request::from_activation(parts, body) } else { Err(BadRequest::from("Activation method must be POST")) } } DEACTIVATION_ENDPOINT => { if parts.method == Method::DELETE { Request::from_deactivation(parts) } else { Err(BadRequest::from("Deactivation method must be DELETE")) } } path => { let message = format!("Unknown endpoint path: {}", path); Err(BadRequest::from(&message)) } } } /// Create a network request which submits this COPS request to the given server. pub fn to_network(&self, scheme: &str, host: &str) -> hyper::Request<Body> { match self.kind { Kind::Activation => self.to_activation(scheme, host), Kind::Deactivation => self.to_deactivation(scheme, host), } } fn from_activation(parts: &Parts, body: &[u8]) -> Result<Request, BadRequest> { let mut req = Request { kind: Kind::Activation, timestamp: current_timestamp(), ..Default::default() }; req.update_from_headers(parts)?; let map: HashMap<String, Value> = serde_json::from_slice(body).unwrap_or_default(); if map.is_empty() { return Err(BadRequest::from("Malformed activation request body")); } if let Some(package_id) = map["npdId"].as_str() { req.package_id = package_id.to_string(); } else { return Err(BadRequest::from("Missing npdId field in request.")); } if let Some(asnp_id) = map["asnpTemplateId"].as_str() { req.asnp_id = asnp_id.to_string(); } else { return Err(BadRequest::from("Missing asnpTemplateId field in request.")); } if map.get("appDetails").is_none() { return Err(BadRequest::from("Missing appDetails object in request.")); } let app_map: HashMap<String, Value> = serde_json::from_value(map["appDetails"].clone()).unwrap_or_default(); if let Some(app_id) = app_map["nglAppId"].as_str() { req.app_id = app_id.to_string(); } else { return Err(BadRequest::from("Missing nglAppId field in request.")); } if let Some(app_version) = app_map["nglAppVersion"].as_str() { req.app_version = app_version.to_string(); } else { return Err(BadRequest::from("Missing nglAppVersion field in request.")); } if let Some(ngl_version) = app_map["nglLibVersion"].as_str() { req.ngl_version = ngl_version.to_string(); } else { return Err(BadRequest::from("Missing nglLibVersion field in request.")); } if map.get("deviceDetails").is_none() { return Err(BadRequest::from("Missing deviceDetails object in request.")); } let device_map: HashMap<String, Value> = serde_json::from_value(map["deviceDetails"].clone()).unwrap_or_default(); if let Some(device_date) = device_map["currentDate"].as_str() { req.device_date = device_date.to_string(); } else { return Err(BadRequest::from("Missing currentDate field in request.")); } if let Some(device_id) = device_map["deviceId"].as_str() { req.device_id = device_id.to_string(); } else { return Err(BadRequest::from("Missing deviceId field in request.")); } if let Some(os_user_id) = device_map["osUserId"].as_str() { req.os_user_id = os_user_id.to_string(); } else { return Err(BadRequest::from("Missing osUserId field in request.")); } if let Some(os_name) = device_map["osName"].as_str() { req.os_name = os_name.to_string(); } else { return Err(BadRequest::from("Missing osName field in request.")); } if let Some(os_version) = device_map["osVersion"].as_str() { req.os_version = os_version.to_string(); } else { return Err(BadRequest::from("Missing osVersion field in request.")); } if let Some(is_vdi) = device_map["enableVdiMarkerExists"].as_bool() { req.is_vdi = is_vdi; } else { req.is_vdi = false; } if let Some(is_domain_user) = device_map["isOsUserAccountInDomain"].as_bool() { req.is_domain_user = is_domain_user; } else { req.is_domain_user = false; } if let Some(is_virtual) = device_map["isVirtualEnvironment"].as_bool() { req.is_virtual = is_virtual; } else { req.is_virtual = false; } Ok(req) } fn from_deactivation(parts: &Parts) -> Result<Request, BadRequest> { let mut req = Request { kind: Kind::Deactivation, timestamp: current_timestamp(), ..Default::default() }; req.update_from_headers(parts)?; let request_url = format!("http://placeholder{}", &parts.uri.to_string()); let pairs: HashMap<String, String> = Url::parse(&request_url) .expect("Bad deactivation query string") .query_pairs() .map(|(k, v)| (k.to_string(), v.to_string())) .collect(); if let Some(npd_id) = pairs.get("npdId") { req.package_id = npd_id.clone(); } else { return Err(BadRequest::from("Missing 'npdId' parameter")); } if let Some(device_id) = pairs.get("deviceId") { req.device_id = device_id.clone() } else { return Err(BadRequest::from("Missing 'deviceId' parameter")); } if let Some(os_user_id) = pairs.get("osUserId") { req.os_user_id = os_user_id.clone() } else { return Err(BadRequest::from("Missing 'osUserId' parameter")); } if let Some(is_vdi) = pairs.get("enableVdiMarkerExists") { req.is_vdi = is_vdi.eq_ignore_ascii_case("true") } else { req.is_vdi = false } Ok(req) } /// Convert a COPS activation request to its network form. fn to_activation(&self, scheme: &str, host: &str) -> hyper::Request<Body> { let body = serde_json::json!({ "npdId" : &self.package_id, "asnpTemplateId" : &self.asnp_id, "appDetails" : { "nglAppId" : &self.app_id, "nglAppVersion" : &self.app_version, "nglLibVersion" : &self.ngl_version }, "deviceDetails" : { "currentDate" : &self.device_date, "deviceId" : &self.device_id, "enableVdiMarkerExists" : &self.is_vdi, "isOsUserAccountInDomain" : &self.is_domain_user, "isVirtualEnvironment" : &self.is_virtual, "osName" : &self.os_name, "osUserId" : &self.os_user_id, "osVersion" : &self.os_version } }); let builder = hyper::Request::builder() .method("POST") .uri(format!("{}://{}{}", scheme, host, ACTIVATION_ENDPOINT).as_str()) .header("host", host) .header("x-api-key", &self.api_key) .header("x-session-id", &self.session_id) .header("x-request-id", &self.request_id) .header("content-type", "application/json") .header("accept", "application/json") .header("user-agent", agent()); builder .body(Body::from(body.to_string())) .expect("Error building activation request body") } /// Convert a COPS deactivation request to its network form. fn to_deactivation(&self, scheme: &str, host: &str) -> hyper::Request<Body> { let uri = format!( "{}://{}{}?npdId={}&deviceId={}&osUserId={}&enableVdiMarkerExists={}", scheme, host, DEACTIVATION_ENDPOINT, &self.package_id, &self.device_id, &self.os_user_id, self.is_vdi, ); let builder = hyper::Request::builder() .method("DELETE") .uri(uri) .header("host", host) .header("x-api-key", &self.api_key) .header("x-request-id", &self.request_id) .header("accept", "application/json") .header("user-agent", agent()); builder.body(Body::empty()).expect("Error building deactivation request body") } /// update a request with info from network headers fn update_from_headers(&mut self, parts: &Parts) -> Result<&mut Request, BadRequest> { for (k, v) in parts.headers.iter() { if let Ok(val) = v.to_str() { match k.as_str() { "x-api-key" => self.api_key = val.to_string(), "x-request-id" => self.request_id = val.to_string(), "x-session-id" => self.session_id = val.to_string(), _ => (), } } } match self.kind { Kind::Activation => { if self.api_key.is_empty() || self.request_id.is_empty() || self.session_id.is_empty() { return Err(BadRequest::from("Missing required header field")); } } Kind::Deactivation => { if self.api_key.is_empty() || self.request_id.is_empty() { return Err(BadRequest::from("Missing required header field")); } } } Ok(self) } } pub struct Response { pub kind: Kind, pub request_id: String, pub body: Vec<u8>, pub timestamp: String, } impl Response { pub fn from_network(request: &Request, body: &[u8]) -> Response { Response { kind: request.kind.clone(), request_id: request.request_id.clone(), body: Vec::from(body), timestamp: current_timestamp(), } } pub fn to_network(&self) -> HResponse<Body> { HResponse::builder() .status(200) .header("server", agent()) .header("x-request-id", self.request_id.clone()) .header("content-type", "application/json;charset=UTF-8") .body(Body::from(self.body.clone())) .unwrap() } } const ACTIVATION_ENDPOINT: &str = "/asnp/frl_connected/values/v2"; const DEACTIVATION_ENDPOINT: &str = "/asnp/frl_connected/v1"; #[derive(Debug, Clone)] pub enum Kind { Activation, Deactivation, } impl std::fmt::Display for Kind { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Kind::Activation => "Activation".fmt(f), Kind::Deactivation => "Deactivation".fmt(f), } } } impl Default for Kind { fn default() -> Self { Kind::Activation } } #[derive(Debug, Clone)] pub struct BadRequest { pub reason: String, } impl BadRequest { pub fn from(why: &str) -> BadRequest { BadRequest { reason: why.to_string() } } } pub fn agent() -> String { format!( "FRL-Online-Proxy/{} ({}/{})", env!("CARGO_PKG_VERSION"), std::env::consts::OS, sys_info::os_release().as_deref().unwrap_or("Unknown") ) } pub fn current_timestamp() -> String { let now: DateTime<Local> = Local::now(); now.format("%Y-%m-%dT%H:%M:%S%.3f%z").to_string() }
35.981081
90
0.555097
fcb56f7fa262f8025f5ec3dce44017b58c6409f1
513
use drogue_cloud_console_backend::{run, Config}; use drogue_cloud_service_common::{endpoints::create_endpoint_source, main}; #[drogue_cloud_service_api::webapp::main] async fn main() -> anyhow::Result<()> { main!({ // the endpoint source we choose let endpoint_source = create_endpoint_source()?; log::info!("Using endpoint source: {:#?}", endpoint_source); let endpoints = endpoint_source.eval_endpoints().await?; run(Config::from_env()?, endpoints).await }); }
34.2
75
0.680312
619e80fbc068dfce693226d9191a17a475e06087
11,098
use crate::id::RequestIdGenerator; use crate::meta::{BeginRequestRec, EndRequestRec, Header, Output, OutputMap, ParamPairs, RequestType, Role}; use crate::params::Params; use crate::{ErrorKind, Result as ClientResult}; use bufstream::BufStream; use log::debug; use std::collections::HashMap; use std::io::{self, Read, Write}; #[cfg(feature = "futures")] use futures::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; /// Client for handling communication between fastcgi server. pub struct Client<S: Read + Write + Send + Sync> { keep_alive: bool, stream: Box<S>, outputs: OutputMap, } impl<S: Read + Write + Send + Sync> Client<BufStream<S>> { /// Construct a `Client` Object with stream (such as `std::net::TcpStream` or `std::os::unix::net::UnixStream`, /// with buffered read/write for stream. pub fn new(stream: S, keep_alive: bool) -> Self { Self { keep_alive, stream: Box::new(BufStream::new(stream)), outputs: HashMap::new(), } } } impl<S: Read + Write + Send + Sync> Client<S> { /// Construct a `Client` Object with stream (such as `std::net::TcpStream` or `std::os::unix::net::UnixStream`, /// without buffered read/write for stream. pub fn new_without_buffered(stream: S, keep_alive: bool) -> Self { Self { keep_alive, stream: Box::new(stream), outputs: HashMap::new(), } } /// Send request and receive response from fastcgi server. /// - `params` fastcgi params. /// - `body` always the http post or put body. /// /// return the output of fastcgi stdout and stderr. pub fn do_request<'a>(&mut self, params: &Params<'a>, body: &mut dyn Read) -> ClientResult<&mut Output> { let id = RequestIdGenerator.generate(); self.handle_request(id, params, body)?; self.handle_response(id)?; Ok(self.outputs.get_mut(&id).ok_or_else(|| ErrorKind::RequestIdNotFound(id))?) } fn handle_request<'a>(&mut self, id: u16, params: &Params<'a>, body: &mut dyn Read) -> ClientResult<()> { let write_stream = &mut self.stream; debug!("[id = {}] Start handle request.", id); let begin_request_rec = BeginRequestRec::new(id, Role::Responder, self.keep_alive)?; debug!("[id = {}] Send to stream: {:?}.", id, &begin_request_rec); begin_request_rec.write_to_stream(write_stream)?; let param_pairs = ParamPairs::new(params); debug!("[id = {}] Params will be sent: {:?}.", id, &param_pairs); Header::write_to_stream_batches( RequestType::Params, id, write_stream, &mut &param_pairs.to_content()?[..], Some(|header| { debug!("[id = {}] Send to stream for Params: {:?}.", id, &header); header }), )?; Header::write_to_stream_batches( RequestType::Params, id, write_stream, &mut io::empty(), Some(|header| { debug!("[id = {}] Send to stream for Params: {:?}.", id, &header); header }), )?; Header::write_to_stream_batches( RequestType::Stdin, id, write_stream, body, Some(|header| { debug!("[id = {}] Send to stream for Stdin: {:?}.", id, &header); header }), )?; Header::write_to_stream_batches( RequestType::Stdin, id, write_stream, &mut io::empty(), Some(|header| { debug!("[id = {}] Send to stream for Stdin: {:?}.", id, &header); header }), )?; write_stream.flush()?; Ok(()) } fn handle_response(&mut self, id: u16) -> ClientResult<()> { self.init_output(id); let global_end_request_rec: Option<EndRequestRec>; loop { let read_stream = &mut self.stream; let header = Header::new_from_stream(read_stream)?; debug!("[id = {}] Receive from stream: {:?}.", id, &header); if header.request_id != id { return Err(ErrorKind::ResponseNotFound(id).into()); } match header.r#type { RequestType::Stdout => { let content = header.read_content_from_stream(read_stream)?; self.get_output_mut(id)?.set_stdout(content) } RequestType::Stderr => { let content = header.read_content_from_stream(read_stream)?; self.get_output_mut(id)?.set_stderr(content) } RequestType::EndRequest => { let end_request_rec = EndRequestRec::from_header(&header, read_stream)?; debug!("[id = {}] Receive from stream: {:?}.", id, &end_request_rec); global_end_request_rec = Some(end_request_rec); break; } r#type => return Err(ErrorKind::UnknownRequestType(r#type).into()), } } match global_end_request_rec { Some(end_request_rec) => end_request_rec .end_request .protocol_status .convert_to_client_result(end_request_rec.end_request.app_status), None => unreachable!(), } } fn init_output(&mut self, id: u16) { self.outputs.insert(id, Default::default()); } fn get_output_mut(&mut self, id: u16) -> ClientResult<&mut Output> { self.outputs.get_mut(&id).ok_or_else(|| ErrorKind::RequestIdNotFound(id).into()) } } #[cfg(feature = "futures")] #[cfg_attr(docsrs, doc(cfg(feature = "futures")))] /// Async client for handling communication between fastcgi server. pub struct AsyncClient<S: AsyncRead + AsyncWrite + Send + Sync + Unpin> { keep_alive: bool, stream: Box<S>, outputs: OutputMap, } #[cfg(feature = "futures")] impl<S: AsyncRead + AsyncWrite + Send + Sync + Unpin> AsyncClient<S> { /// Construct a `AsyncClient` Object with stream (such as `async_std::net::TcpStream` or `async_std::os::unix::net::UnixStream`, /// with buffered read/write for stream. pub fn new(stream: S, keep_alive: bool) -> Self { Self { keep_alive, stream: Box::new(stream), outputs: HashMap::new(), } } /// Send request and receive response from fastcgi server. /// - `params` fastcgi params. /// - `body` always the http post or put body. /// /// return the output of fastcgi stdout and stderr. pub async fn do_request<'a>(&mut self, params: &Params<'a>, body: &mut (dyn AsyncRead + Unpin)) -> ClientResult<&mut Output> { let id = RequestIdGenerator.generate(); self.handle_request(id, params, body).await?; self.handle_response(id).await?; Ok(self.outputs.get_mut(&id).ok_or_else(|| ErrorKind::RequestIdNotFound(id))?) } async fn handle_request<'a>(&mut self, id: u16, params: &Params<'a>, body: &mut (dyn AsyncRead + Unpin)) -> ClientResult<()> { let write_stream = &mut self.stream; debug!("[id = {}] Start handle request.", id); let begin_request_rec = BeginRequestRec::new(id, Role::Responder, self.keep_alive)?; debug!("[id = {}] Send to stream: {:?}.", id, &begin_request_rec); begin_request_rec.async_write_to_stream(write_stream).await?; let param_pairs = ParamPairs::new(params); debug!("[id = {}] Params will be sent: {:?}.", id, &param_pairs); Header::async_write_to_stream_batches( RequestType::Params, id, write_stream, &mut &param_pairs.to_content()?[..], Some(|header| { debug!("[id = {}] Send to stream for Params: {:?}.", id, &header); header }), ) .await?; Header::async_write_to_stream_batches( RequestType::Params, id, write_stream, &mut futures::io::empty(), Some(|header| { debug!("[id = {}] Send to stream for Params: {:?}.", id, &header); header }), ) .await?; Header::async_write_to_stream_batches( RequestType::Stdin, id, write_stream, body, Some(|header| { debug!("[id = {}] Send to stream for Stdin: {:?}.", id, &header); header }), ) .await?; Header::async_write_to_stream_batches( RequestType::Stdin, id, write_stream, &mut futures::io::empty(), Some(|header| { debug!("[id = {}] Send to stream for Stdin: {:?}.", id, &header); header }), ) .await?; write_stream.flush().await?; Ok(()) } async fn handle_response(&mut self, id: u16) -> ClientResult<()> { self.init_output(id); let global_end_request_rec: Option<EndRequestRec>; loop { let read_stream = &mut self.stream; let header = Header::new_from_async_stream(read_stream).await?; debug!("[id = {}] Receive from stream: {:?}.", id, &header); if header.request_id != id { return Err(ErrorKind::ResponseNotFound(id).into()); } match header.r#type { RequestType::Stdout => { let content = header.read_content_from_async_stream(read_stream).await?; self.get_output_mut(id)?.set_stdout(content) } RequestType::Stderr => { let content = header.read_content_from_async_stream(read_stream).await?; self.get_output_mut(id)?.set_stderr(content) } RequestType::EndRequest => { let end_request_rec = EndRequestRec::from_async_header(&header, read_stream).await?; debug!("[id = {}] Receive from stream: {:?}.", id, &end_request_rec); global_end_request_rec = Some(end_request_rec); break; } r#type => return Err(ErrorKind::UnknownRequestType(r#type).into()), } } match global_end_request_rec { Some(end_request_rec) => end_request_rec .end_request .protocol_status .convert_to_client_result(end_request_rec.end_request.app_status), None => unreachable!(), } } fn init_output(&mut self, id: u16) { self.outputs.insert(id, Default::default()); } fn get_output_mut(&mut self, id: u16) -> ClientResult<&mut Output> { self.outputs.get_mut(&id).ok_or_else(|| ErrorKind::RequestIdNotFound(id).into()) } }
35.120253
132
0.54253
7a8a19cd246a627a37b017281ca96452f89e5d71
11,474
use crate::*; use graphics_server::api::*; use xous_ipc::{String, Buffer}; use num_traits::*; use core::fmt::Write; #[derive(Debug, Copy, Clone, num_derive::FromPrimitive, num_derive::ToPrimitive)] pub enum TextEntryVisibility { /// text is fully visible Visible = 0, /// only last chars are shown of text entry, the rest obscured with * LastChars = 1, /// all chars hidden as * Hidden = 2, } #[derive(Copy, Clone)] pub struct TextEntry { pub is_password: bool, pub visibility: TextEntryVisibility, pub action_conn: xous::CID, pub action_opcode: u32, pub action_payload: TextEntryPayload, // validator borrows the text entry payload, and returns an error message if something didn't go well. // validator takes as ragument the current action_payload, and the current action_opcode pub validator: Option<fn(TextEntryPayload, u32) -> Option<xous_ipc::String::<512>> >, } impl ActionApi for TextEntry { fn set_action_opcode(&mut self, op: u32) {self.action_opcode = op} fn is_password(&self) -> bool { self.is_password } /// The total canvas height is computed with this API call /// The canvas height is not dynamically adjustable for modals. fn height(&self, glyph_height: i16, margin: i16) -> i16 { /* ------------------- | **** | <-- glyph_height + 2*margin ------------------- ← 👁️ 🕶️ * → <-- glyph_height + 2 * margin top/bottom auto-closes on enter */ if self.is_password { glyph_height + 2*margin + glyph_height + 2*margin + 8 // 8 pixels extra margin because the emoji glyphs are oversized } else { glyph_height + 2*margin } } fn redraw(&self, at_height: i16, modal: &Modal) { let color = if self.is_password { PixelColor::Light } else { PixelColor::Dark }; // draw the currently entered text let mut tv = TextView::new( modal.canvas, TextBounds::BoundingBox(Rectangle::new( Point::new(modal.margin, at_height), Point::new(modal.canvas_width - modal.margin, at_height + modal.line_height)) )); tv.ellipsis = true; tv.invert = self.is_password; tv.style = modal.style; tv.margin = Point::new(0, 0); tv.draw_border = false; tv.insertion = Some(self.action_payload.0.len() as i32); tv.text.clear(); // make sure this is blank let payload_chars = self.action_payload.0.as_str().unwrap().chars().count(); // TODO: condense the "above 20" chars length path a bit -- written out "the dumb way" just to reason out the logic a bit match self.visibility { TextEntryVisibility::Visible => { log::trace!("action payload: {}", self.action_payload.0.as_str().unwrap()); if payload_chars < 20 { write!(tv.text, "{}", self.action_payload.0.as_str().unwrap()).unwrap(); } else { write!(tv.text, "...{}", &self.action_payload.0.as_str().unwrap()[payload_chars-18..]).unwrap(); } modal.gam.post_textview(&mut tv).expect("couldn't post textview"); }, TextEntryVisibility::Hidden => { if payload_chars < 20 { for _char in self.action_payload.0.as_str().unwrap().chars() { tv.text.push('*').expect("text field too long"); } } else { // just render a pure dummy string tv.text.push('.').unwrap(); tv.text.push('.').unwrap(); tv.text.push('.').unwrap(); for _ in 0..18 { tv.text.push('*').expect("text field too long"); } } modal.gam.post_textview(&mut tv).expect("couldn't post textview"); }, TextEntryVisibility::LastChars => { if payload_chars < 20 { let hide_to = if self.action_payload.0.as_str().unwrap().chars().count() >= 2 { self.action_payload.0.as_str().unwrap().chars().count() - 2 } else { 0 }; for (index, ch) in self.action_payload.0.as_str().unwrap().chars().enumerate() { if index < hide_to { tv.text.push('*').expect("text field too long"); } else { tv.text.push(ch).expect("text field too long"); } } } else { tv.text.push('.').unwrap(); tv.text.push('.').unwrap(); tv.text.push('.').unwrap(); let hide_to = if self.action_payload.0.as_str().unwrap().chars().count() >= 2 { self.action_payload.0.as_str().unwrap().chars().count() - 2 } else { 0 }; for (index, ch) in self.action_payload.0.as_str().unwrap()[payload_chars-18..].chars().enumerate() { if index + payload_chars-18 < hide_to { tv.text.push('*').expect("text field too long"); } else { tv.text.push(ch).expect("text field too long"); } } } modal.gam.post_textview(&mut tv).expect("couldn't post textview"); } } if self.is_password { // draw the visibility selection area // "<👀🤫✴️>" coded explicitly. Pasting unicode into vscode yields extra cruft that we can't parse (e.g. skin tones and color mods). let prompt = "\u{2b05} \u{1f440}\u{1f576}\u{26d4} \u{27a1}"; let select_index = match self.visibility { TextEntryVisibility::Visible => 2, TextEntryVisibility::LastChars => 3, TextEntryVisibility::Hidden => 4, }; let spacing = 38; // fixed width spacing for the array let emoji_width = 36; // center the prompt nicely, if possible let left_edge = if modal.canvas_width > prompt.chars().count() as i16 * spacing { (modal.canvas_width - prompt.chars().count() as i16 * spacing) / 2 } else { 0 }; for (i, ch) in prompt.chars().enumerate() { let mut tv = TextView::new( modal.canvas, TextBounds::BoundingBox(Rectangle::new( Point::new(left_edge + i as i16 * spacing, at_height + modal.line_height + modal.margin * 4), Point::new(left_edge + i as i16 * spacing + emoji_width, at_height + modal.line_height + 34 + modal.margin * 4)) )); tv.style = GlyphStyle::Regular; tv.margin = Point::new(0, 0); tv.draw_border = false; if i == select_index { tv.invert = !self.is_password; } else { tv.invert = self.is_password; } tv.text.clear(); write!(tv.text, "{}", ch).unwrap(); log::trace!("tv.text: {} : {}/{}", i, tv.text, ch); modal.gam.post_textview(&mut tv).expect("couldn't post textview"); } } // draw a line for where text gets entered (don't use a box, fitting could be awkward) modal.gam.draw_line(modal.canvas, Line::new_with_style( Point::new(modal.margin, at_height + modal.line_height + 4), Point::new(modal.canvas_width - modal.margin, at_height + modal.line_height + 4), DrawStyle::new(color, color, 1)) ).expect("couldn't draw entry line"); } fn key_action(&mut self, k: char) -> (Option<xous_ipc::String::<512>>, bool) { log::trace!("key_action: {}", k); match k { '←' => { if self.visibility as u32 > 0 { match FromPrimitive::from_u32(self.visibility as u32 - 1) { Some(new_visibility) => { log::trace!("new visibility: {:?}", new_visibility); self.visibility = new_visibility; }, _ => { panic!("internal error: an TextEntryVisibility did not resolve correctly"); } } } }, '→' => { if (self.visibility as u32) < (TextEntryVisibility::Hidden as u32) { match FromPrimitive::from_u32(self.visibility as u32 + 1) { Some(new_visibility) => { log::trace!("new visibility: {:?}", new_visibility); self.visibility = new_visibility }, _ => { panic!("internal error: an TextEntryVisibility did not resolve correctly"); } } } }, '∴' | '\u{d}' => { if let Some(validator) = self.validator { if let Some(err_msg) = validator(self.action_payload, self.action_opcode) { self.action_payload.0.clear(); // reset the input field return (Some(err_msg), false); } } let buf = Buffer::into_buf(self.action_payload).expect("couldn't convert message to payload"); buf.send(self.action_conn, self.action_opcode).map(|_| ()).expect("couldn't send action message"); self.action_payload.volatile_clear(); // ensure the local copy of text is zero'd out return (None, true) } '↑' | '↓' => { // ignore these navigation keys } '\u{0}' => { // ignore null messages } '\u{8}' => { // backspace // coded in a conservative manner to avoid temporary allocations that can leave the plaintext on the stack if self.action_payload.0.len() > 0 { // don't backspace if we have no string. let mut temp_str = String::<256>::from_str(self.action_payload.0.as_str().unwrap()); let cur_len = temp_str.as_str().unwrap().chars().count(); let mut c_iter = temp_str.as_str().unwrap().chars(); self.action_payload.0.clear(); for _ in 0..cur_len-1 { self.action_payload.0.push(c_iter.next().unwrap()).unwrap(); } temp_str.volatile_clear(); } } _ => { // text entry self.action_payload.0.push(k).expect("ran out of space storing password"); log::trace!("****update payload: {}", self.action_payload.0); } } (None, false) } }
45.713147
142
0.487537
5de1f334fc583baa0d283c3b73ad38d0c5afa2fe
2,105
use algebra::mnt4_753::Parameters; use algebra::mnt6_753::Fr as MNT6Fr; use algebra_core::curves::models::mnt4::MNT4Parameters; use r1cs_core::SynthesisError; use r1cs_std::mnt4_753::{G1Gadget, G2Gadget}; use r1cs_std::prelude::Boolean; use crate::gadgets::y_to_bit::YToBitGadget as Y2BG; /// A gadget that takes an elliptic curve point as input and outputs a single bit representing the /// "sign" of the y-coordinate. It is meant to aid with serialization. /// It was originally part of the Celo light client library. (https://github.com/celo-org/bls-zexe) pub type YToBitGadget = Y2BG<<Parameters as MNT4Parameters>::G1Parameters, MNT6Fr>; impl YToBitGadget { /// Outputs a boolean representing the relation: /// y > half /// where half means the half point of the modulus of the underlying field. So, half = (p-1)/2. pub fn y_to_bit_g1<CS: r1cs_core::ConstraintSystem<MNT6Fr>>(mut cs: CS, point: &G1Gadget) -> Result<Boolean, SynthesisError> { let y_bit = Self::is_greater_half(&mut cs.ns(|| "calculate y bit"), &point.y)?; Ok(y_bit) } /// Outputs a boolean representing the relation: /// (y_c1 > half) || (y_c1 == 0 && y_c0 > half) /// where half means the half point of the modulus of the underlying field. So, half = (p-1)/2. pub fn y_to_bit_g2<CS: r1cs_core::ConstraintSystem<MNT6Fr>>(mut cs: CS, point: &G2Gadget) -> Result<Boolean, SynthesisError> { // Calculate the required inputs to the formula. let y_c1_bit = Self::is_greater_half(&mut cs.ns(|| "calculate y_c1_bit"), &point.y.c1)?; let y_c0_bit = Self::is_greater_half(&mut cs.ns(|| "calculate y_c0 bit"), &point.y.c0)?; let y_c1_eq_bit = Self::is_equal_zero(&mut cs.ns(|| "calculate y_c1_eq_bit"), &point.y.c1)?; // Calculate the following formula: // (y_c1 > half) || (y_c1 == 0 && y_c0 > half) let cond0 = y_c1_bit; let cond1 = Boolean::and(cs.ns(|| "y_c1_eq_bit && y_c0_bit"), &y_c1_eq_bit, &y_c0_bit)?; let y_bit = Boolean::or(cs.ns(|| "cond0 || cond1"), &cond0, &cond1)?; Ok(y_bit) } }
44.787234
130
0.666033
91d90be619366133ff7ebda1f5def690e94474a4
4,091
//! # AML //! //! Code to parse and execute ACPI Machine Language tables. use std::collections::HashMap; use syscall::io::{Io, Pio}; use crate::acpi::{AcpiContext, AmlContainingTable, Sdt, SdtHeader}; #[macro_use] mod parsermacros; mod namespace; mod termlist; mod namespacemodifier; mod pkglength; mod namestring; mod namedobj; mod dataobj; mod type1opcode; mod type2opcode; mod parser; use self::parser::AmlExecutionContext; use self::termlist::parse_term_list; pub use self::namespace::AmlValue; #[derive(Debug)] pub enum AmlError { AmlParseError(&'static str), AmlInvalidOpCode, AmlValueError, AmlDeferredLoad, AmlFatalError(u8, u16, AmlValue), AmlHardFatal } pub fn parse_aml_table(acpi_ctx: &AcpiContext, sdt: impl AmlContainingTable) -> Result<Vec<String>, AmlError> { parse_aml_with_scope(acpi_ctx, sdt, "\\".to_owned()) } pub fn parse_aml_with_scope(acpi_ctx: &AcpiContext, sdt: impl AmlContainingTable, scope: String) -> Result<Vec<String>, AmlError> { let data = sdt.aml(); let mut ctx = AmlExecutionContext::new(acpi_ctx, scope); parse_term_list(data, &mut ctx)?; Ok(ctx.namespace_delta) } fn init_aml_table(acpi_ctx: &AcpiContext, sdt: impl AmlContainingTable) { match parse_aml_table(acpi_ctx, &sdt) { Ok(_) => log::debug!("Table {} parsed successfully", sdt.header().signature()), Err(AmlError::AmlParseError(e)) => log::error!("Table {} got parse error: {}", sdt.header().signature(), e), Err(AmlError::AmlInvalidOpCode) => log::error!("Table {} got invalid opcode", sdt.header().signature()), Err(AmlError::AmlValueError) => log::error!("For table {}: type constraints or value bounds not met", sdt.header().signature()), Err(AmlError::AmlDeferredLoad) => log::error!("For table {}: deferred load reached top level", sdt.header().signature()), Err(AmlError::AmlFatalError(ty, code, val)) => { log::error!("Fatal error occurred for table {}: type={}, code={}, val={:?}", sdt.header().signature(), ty, code, val); return; }, Err(AmlError::AmlHardFatal) => { log::error!("Hard fatal error occurred for table {}", sdt.header().signature()); return; } } } pub fn init_namespace(context: &AcpiContext) { let dsdt = context.dsdt().expect("could not find any DSDT"); log::debug!("Found DSDT."); init_aml_table(context, dsdt); let ssdts = context.ssdts(); for ssdt in ssdts { print!("Found SSDT."); init_aml_table(context, ssdt); } } pub fn set_global_s_state(context: &AcpiContext, state: u8) { if state != 5 { return; } let fadt = match context.fadt() { Some(fadt) => fadt, None => { log::error!("Cannot set global S-state due to missing FADT."); return; } }; let port = fadt.pm1a_control_block as u16; let mut val = 1 << 13; let namespace_guard = context.namespace(); let namespace = match &*namespace_guard { Some(namespace) => namespace, None => { log::error!("Cannot set global S-state due to missing ACPI namespace"); return; } }; let s5 = match namespace.get("\\_S5") { Some(s5) => s5, None => { log::error!("Cannot set global S-state due to missing \\_S5"); return; } }; let p = match s5.get_as_package() { Ok(package) => package, Err(error) => { log::error!("Cannot set global S-state due to \\_S5 not being a package: {:?}", error); return; } }; let slp_typa = p[0].get_as_integer(context).expect("SLP_TYPa is not an integer"); let slp_typb = p[1].get_as_integer(context).expect("SLP_TYPb is not an integer"); log::info!("Shutdown SLP_TYPa {:X}, SLP_TYPb {:X}", slp_typa, slp_typb); val |= slp_typa as u16; log::info!("Shutdown with ACPI outw(0x{:X}, 0x{:X})", port, val); Pio::<u16>::new(port).write(val); loop { core::hint::spin_loop(); } }
30.080882
136
0.616231
6a4e7ba7a5ad4edfefc7f657c9ea2ca123b6b87c
322
use crate::{Context, Handle, View}; /// A basic element with no interactivity. /// /// pub struct Element {} impl Element { pub fn new(cx: &mut Context) -> Handle<Self> { Self {}.build(cx) } } impl View for Element { fn element(&self) -> Option<String> { Some("element".to_string()) } }
16.947368
50
0.583851
727e634ef92db2051817ae03911fb73cbf9a6029
31,203
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Code related to match expresions. These are sufficiently complex //! to warrant their own module and submodules. :) This main module //! includes the high-level algorithm, the submodules contain the //! details. use build::{BlockAnd, BlockAndExtension, Builder}; use rustc_data_structures::fnv::FnvHashMap; use rustc_data_structures::bitvec::BitVector; use rustc::middle::const_val::ConstVal; use rustc::ty::{AdtDef, Ty}; use rustc::mir::*; use hair::*; use syntax::ast::{Name, NodeId}; use syntax_pos::Span; // helper functions, broken out by category: mod simplify; mod test; mod util; impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { pub fn match_expr(&mut self, destination: &Lvalue<'tcx>, span: Span, mut block: BasicBlock, discriminant: ExprRef<'tcx>, arms: Vec<Arm<'tcx>>) -> BlockAnd<()> { let discriminant_lvalue = unpack!(block = self.as_lvalue(block, discriminant)); let mut arm_blocks = ArmBlocks { blocks: arms.iter() .map(|_| self.cfg.start_new_block()) .collect(), }; // Get the arm bodies and their scopes, while declaring bindings. let arm_bodies: Vec<_> = arms.iter().map(|arm| { let body = self.hir.mirror(arm.body.clone()); let scope = self.declare_bindings(None, body.span, &arm.patterns[0]); (body, scope.unwrap_or(self.visibility_scope)) }).collect(); // assemble a list of candidates: there is one candidate per // pattern, which means there may be more than one candidate // *per arm*. These candidates are kept sorted such that the // highest priority candidate comes first in the list. // (i.e. same order as in source) let candidates: Vec<_> = arms.iter() .enumerate() .flat_map(|(arm_index, arm)| { arm.patterns.iter() .map(move |pat| (arm_index, pat, arm.guard.clone())) }) .map(|(arm_index, pattern, guard)| { Candidate { span: pattern.span, match_pairs: vec![MatchPair::new(discriminant_lvalue.clone(), pattern)], bindings: vec![], guard: guard, arm_index: arm_index, } }) .collect(); // this will generate code to test discriminant_lvalue and // branch to the appropriate arm block let otherwise = self.match_candidates(span, &mut arm_blocks, candidates, block); if !otherwise.is_empty() { // All matches are exhaustive. However, because some matches // only have exponentially-large exhaustive decision trees, we // sometimes generate an inexhaustive decision tree. // // In that case, the inexhaustive tips of the decision tree // can't be reached - terminate them with an `unreachable`. let source_info = self.source_info(span); let mut otherwise = otherwise; otherwise.sort(); otherwise.dedup(); // variant switches can introduce duplicate target blocks for block in otherwise { self.cfg.terminate(block, source_info, TerminatorKind::Unreachable); } } // all the arm blocks will rejoin here let end_block = self.cfg.start_new_block(); let outer_source_info = self.source_info(span); for (arm_index, (body, visibility_scope)) in arm_bodies.into_iter().enumerate() { let mut arm_block = arm_blocks.blocks[arm_index]; // Re-enter the visibility scope we created the bindings in. self.visibility_scope = visibility_scope; unpack!(arm_block = self.into(destination, arm_block, body)); self.cfg.terminate(arm_block, outer_source_info, TerminatorKind::Goto { target: end_block }); } self.visibility_scope = outer_source_info.scope; end_block.unit() } pub fn expr_into_pattern(&mut self, mut block: BasicBlock, irrefutable_pat: Pattern<'tcx>, initializer: ExprRef<'tcx>) -> BlockAnd<()> { // optimize the case of `let x = ...` match *irrefutable_pat.kind { PatternKind::Binding { mode: BindingMode::ByValue, var, subpattern: None, .. } => { self.storage_live_for_bindings(block, &irrefutable_pat); let lvalue = Lvalue::Local(self.var_indices[&var]); return self.into(&lvalue, block, initializer); } _ => {} } let lvalue = unpack!(block = self.as_lvalue(block, initializer)); self.lvalue_into_pattern(block, irrefutable_pat, &lvalue) } pub fn lvalue_into_pattern(&mut self, mut block: BasicBlock, irrefutable_pat: Pattern<'tcx>, initializer: &Lvalue<'tcx>) -> BlockAnd<()> { // create a dummy candidate let mut candidate = Candidate { span: irrefutable_pat.span, match_pairs: vec![MatchPair::new(initializer.clone(), &irrefutable_pat)], bindings: vec![], guard: None, arm_index: 0, // since we don't call `match_candidates`, this field is unused }; // Simplify the candidate. Since the pattern is irrefutable, this should // always convert all match-pairs into bindings. unpack!(block = self.simplify_candidate(block, &mut candidate)); if !candidate.match_pairs.is_empty() { span_bug!(candidate.match_pairs[0].pattern.span, "match pairs {:?} remaining after simplifying \ irrefutable pattern", candidate.match_pairs); } // now apply the bindings, which will also declare the variables self.bind_matched_candidate(block, candidate.bindings); block.unit() } /// Declares the bindings of the given pattern and returns the visibility scope /// for the bindings in this patterns, if such a scope had to be created. /// NOTE: Declaring the bindings should always be done in their drop scope. pub fn declare_bindings(&mut self, mut var_scope: Option<VisibilityScope>, scope_span: Span, pattern: &Pattern<'tcx>) -> Option<VisibilityScope> { match *pattern.kind { PatternKind::Binding { mutability, name, mode: _, var, ty, ref subpattern } => { if var_scope.is_none() { var_scope = Some(self.new_visibility_scope(scope_span)); } let source_info = SourceInfo { span: pattern.span, scope: var_scope.unwrap() }; self.declare_binding(source_info, mutability, name, var, ty); if let Some(subpattern) = subpattern.as_ref() { var_scope = self.declare_bindings(var_scope, scope_span, subpattern); } } PatternKind::Array { ref prefix, ref slice, ref suffix } | PatternKind::Slice { ref prefix, ref slice, ref suffix } => { for subpattern in prefix.iter().chain(slice).chain(suffix) { var_scope = self.declare_bindings(var_scope, scope_span, subpattern); } } PatternKind::Constant { .. } | PatternKind::Range { .. } | PatternKind::Wild => { } PatternKind::Deref { ref subpattern } => { var_scope = self.declare_bindings(var_scope, scope_span, subpattern); } PatternKind::Leaf { ref subpatterns } | PatternKind::Variant { ref subpatterns, .. } => { for subpattern in subpatterns { var_scope = self.declare_bindings(var_scope, scope_span, &subpattern.pattern); } } } var_scope } /// Emit `StorageLive` for every binding in the pattern. pub fn storage_live_for_bindings(&mut self, block: BasicBlock, pattern: &Pattern<'tcx>) { match *pattern.kind { PatternKind::Binding { var, ref subpattern, .. } => { let lvalue = Lvalue::Local(self.var_indices[&var]); let source_info = self.source_info(pattern.span); self.cfg.push(block, Statement { source_info: source_info, kind: StatementKind::StorageLive(lvalue) }); if let Some(subpattern) = subpattern.as_ref() { self.storage_live_for_bindings(block, subpattern); } } PatternKind::Array { ref prefix, ref slice, ref suffix } | PatternKind::Slice { ref prefix, ref slice, ref suffix } => { for subpattern in prefix.iter().chain(slice).chain(suffix) { self.storage_live_for_bindings(block, subpattern); } } PatternKind::Constant { .. } | PatternKind::Range { .. } | PatternKind::Wild => { } PatternKind::Deref { ref subpattern } => { self.storage_live_for_bindings(block, subpattern); } PatternKind::Leaf { ref subpatterns } | PatternKind::Variant { ref subpatterns, .. } => { for subpattern in subpatterns { self.storage_live_for_bindings(block, &subpattern.pattern); } } } } } /// List of blocks for each arm (and potentially other metadata in the /// future). struct ArmBlocks { blocks: Vec<BasicBlock>, } #[derive(Clone, Debug)] pub struct Candidate<'pat, 'tcx:'pat> { // span of the original pattern that gave rise to this candidate span: Span, // all of these must be satisfied... match_pairs: Vec<MatchPair<'pat, 'tcx>>, // ...these bindings established... bindings: Vec<Binding<'tcx>>, // ...and the guard must be evaluated... guard: Option<ExprRef<'tcx>>, // ...and then we branch to arm with this index. arm_index: usize, } #[derive(Clone, Debug)] struct Binding<'tcx> { span: Span, source: Lvalue<'tcx>, name: Name, var_id: NodeId, var_ty: Ty<'tcx>, mutability: Mutability, binding_mode: BindingMode<'tcx>, } #[derive(Clone, Debug)] pub struct MatchPair<'pat, 'tcx:'pat> { // this lvalue... lvalue: Lvalue<'tcx>, // ... must match this pattern. pattern: &'pat Pattern<'tcx>, // HACK(eddyb) This is used to toggle whether a Slice pattern // has had its length checked. This is only necessary because // the "rest" part of the pattern right now has type &[T] and // as such, it requires an Rvalue::Slice to be generated. // See RFC 495 / issue #23121 for the eventual (proper) solution. slice_len_checked: bool } #[derive(Clone, Debug, PartialEq)] enum TestKind<'tcx> { // test the branches of enum Switch { adt_def: AdtDef<'tcx>, variants: BitVector, }, // test the branches of enum SwitchInt { switch_ty: Ty<'tcx>, options: Vec<ConstVal>, indices: FnvHashMap<ConstVal, usize>, }, // test for equality Eq { value: ConstVal, ty: Ty<'tcx>, }, // test whether the value falls within an inclusive range Range { lo: Literal<'tcx>, hi: Literal<'tcx>, ty: Ty<'tcx>, }, // test length of the slice is equal to len Len { len: u64, op: BinOp, }, } #[derive(Debug)] pub struct Test<'tcx> { span: Span, kind: TestKind<'tcx>, } /////////////////////////////////////////////////////////////////////////// // Main matching algorithm impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// The main match algorithm. It begins with a set of candidates /// `candidates` and has the job of generating code to determine /// which of these candidates, if any, is the correct one. The /// candidates are sorted such that the first item in the list /// has the highest priority. When a candidate is found to match /// the value, we will generate a branch to the appropriate /// block found in `arm_blocks`. /// /// The return value is a list of "otherwise" blocks. These are /// points in execution where we found that *NONE* of the /// candidates apply. In principle, this means that the input /// list was not exhaustive, though at present we sometimes are /// not smart enough to recognize all exhaustive inputs. /// /// It might be surprising that the input can be inexhaustive. /// Indeed, initially, it is not, because all matches are /// exhaustive in Rust. But during processing we sometimes divide /// up the list of candidates and recurse with a non-exhaustive /// list. This is important to keep the size of the generated code /// under control. See `test_candidates` for more details. fn match_candidates<'pat>(&mut self, span: Span, arm_blocks: &mut ArmBlocks, mut candidates: Vec<Candidate<'pat, 'tcx>>, mut block: BasicBlock) -> Vec<BasicBlock> { debug!("matched_candidate(span={:?}, block={:?}, candidates={:?})", span, block, candidates); // Start by simplifying candidates. Once this process is // complete, all the match pairs which remain require some // form of test, whether it be a switch or pattern comparison. for candidate in &mut candidates { unpack!(block = self.simplify_candidate(block, candidate)); } // The candidates are sorted by priority. Check to see // whether the higher priority candidates (and hence at // the front of the vec) have satisfied all their match // pairs. let fully_matched = candidates.iter().take_while(|c| c.match_pairs.is_empty()).count(); debug!("match_candidates: {:?} candidates fully matched", fully_matched); let mut unmatched_candidates = candidates.split_off(fully_matched); for candidate in candidates { // If so, apply any bindings, test the guard (if any), and // branch to the arm. if let Some(b) = self.bind_and_guard_matched_candidate(block, arm_blocks, candidate) { block = b; } else { // if None is returned, then any remaining candidates // are unreachable (at least not through this path). return vec![]; } } // If there are no candidates that still need testing, we're done. // Since all matches are exhaustive, execution should never reach this point. if unmatched_candidates.is_empty() { return vec![block]; } // Test candidates where possible. let (otherwise, tested_candidates) = self.test_candidates(span, arm_blocks, &unmatched_candidates, block); // If the target candidates were exhaustive, then we are done. if otherwise.is_empty() { return vec![]; } // If all candidates were sorted into `target_candidates` somewhere, then // the initial set was inexhaustive. let untested_candidates = unmatched_candidates.split_off(tested_candidates); if untested_candidates.len() == 0 { return otherwise; } // Otherwise, let's process those remaining candidates. let join_block = self.join_otherwise_blocks(span, otherwise); self.match_candidates(span, arm_blocks, untested_candidates, join_block) } fn join_otherwise_blocks(&mut self, span: Span, mut otherwise: Vec<BasicBlock>) -> BasicBlock { let source_info = self.source_info(span); otherwise.sort(); otherwise.dedup(); // variant switches can introduce duplicate target blocks if otherwise.len() == 1 { otherwise[0] } else { let join_block = self.cfg.start_new_block(); for block in otherwise { self.cfg.terminate(block, source_info, TerminatorKind::Goto { target: join_block }); } join_block } } /// This is the most subtle part of the matching algorithm. At /// this point, the input candidates have been fully simplified, /// and so we know that all remaining match-pairs require some /// sort of test. To decide what test to do, we take the highest /// priority candidate (last one in the list) and extract the /// first match-pair from the list. From this we decide what kind /// of test is needed using `test`, defined in the `test` module. /// /// *Note:* taking the first match pair is somewhat arbitrary, and /// we might do better here by choosing more carefully what to /// test. /// /// For example, consider the following possible match-pairs: /// /// 1. `x @ Some(P)` -- we will do a `Switch` to decide what variant `x` has /// 2. `x @ 22` -- we will do a `SwitchInt` /// 3. `x @ 3..5` -- we will do a range test /// 4. etc. /// /// Once we know what sort of test we are going to perform, this /// test may also help us with other candidates. So we walk over /// the candidates (from high to low priority) and check. This /// gives us, for each outcome of the test, a transformed list of /// candidates. For example, if we are testing the current /// variant of `x.0`, and we have a candidate `{x.0 @ Some(v), x.1 /// @ 22}`, then we would have a resulting candidate of `{(x.0 as /// Some).0 @ v, x.1 @ 22}`. Note that the first match-pair is now /// simpler (and, in fact, irrefutable). /// /// But there may also be candidates that the test just doesn't /// apply to. The classical example involves wildcards: /// /// ```rust,ignore /// match (x, y, z) { /// (true, _, true) => true, // (0) /// (_, true, _) => true, // (1) /// (false, false, _) => false, // (2) /// (true, _, false) => false, // (3) /// } /// ``` /// /// In that case, after we test on `x`, there are 2 overlapping candidate /// sets: /// /// - If the outcome is that `x` is true, candidates 0, 1, and 3 /// - If the outcome is that `x` is false, candidates 1 and 2 /// /// Here, the traditional "decision tree" method would generate 2 /// separate code-paths for the 2 separate cases. /// /// In some cases, this duplication can create an exponential amount of /// code. This is most easily seen by noticing that this method terminates /// with precisely the reachable arms being reachable - but that problem /// is trivially NP-complete: /// /// ```rust /// match (var0, var1, var2, var3, ..) { /// (true, _, _, false, true, ...) => false, /// (_, true, true, false, _, ...) => false, /// (false, _, false, false, _, ...) => false, /// ... /// _ => true /// } /// ``` /// /// Here the last arm is reachable only if there is an assignment to /// the variables that does not match any of the literals. Therefore, /// compilation would take an exponential amount of time in some cases. /// /// That kind of exponential worst-case might not occur in practice, but /// our simplistic treatment of constants and guards would make it occur /// in very common situations - for example #29740: /// /// ```rust /// match x { /// "foo" if foo_guard => ..., /// "bar" if bar_guard => ..., /// "baz" if baz_guard => ..., /// ... /// } /// ``` /// /// Here we first test the match-pair `x @ "foo"`, which is an `Eq` test. /// /// It might seem that we would end up with 2 disjoint candidate /// sets, consisting of the first candidate or the other 3, but our /// algorithm doesn't reason about "foo" being distinct from the other /// constants; it considers the latter arms to potentially match after /// both outcomes, which obviously leads to an exponential amount /// of tests. /// /// To avoid these kinds of problems, our algorithm tries to ensure /// the amount of generated tests is linear. When we do a k-way test, /// we return an additional "unmatched" set alongside the obvious `k` /// sets. When we encounter a candidate that would be present in more /// than one of the sets, we put it and all candidates below it into the /// "unmatched" set. This ensures these `k+1` sets are disjoint. /// /// After we perform our test, we branch into the appropriate candidate /// set and recurse with `match_candidates`. These sub-matches are /// obviously inexhaustive - as we discarded our otherwise set - so /// we set their continuation to do `match_candidates` on the /// "unmatched" set (which is again inexhaustive). /// /// If you apply this to the above test, you basically wind up /// with an if-else-if chain, testing each candidate in turn, /// which is precisely what we want. /// /// In addition to avoiding exponential-time blowups, this algorithm /// also has nice property that each guard and arm is only generated /// once. fn test_candidates<'pat>(&mut self, span: Span, arm_blocks: &mut ArmBlocks, candidates: &[Candidate<'pat, 'tcx>], block: BasicBlock) -> (Vec<BasicBlock>, usize) { // extract the match-pair from the highest priority candidate let match_pair = &candidates.first().unwrap().match_pairs[0]; let mut test = self.test(match_pair); // most of the time, the test to perform is simply a function // of the main candidate; but for a test like SwitchInt, we // may want to add cases based on the candidates that are // available match test.kind { TestKind::SwitchInt { switch_ty, ref mut options, ref mut indices } => { for candidate in candidates.iter() { if !self.add_cases_to_switch(&match_pair.lvalue, candidate, switch_ty, options, indices) { break; } } } TestKind::Switch { adt_def: _, ref mut variants} => { for candidate in candidates.iter() { if !self.add_variants_to_switch(&match_pair.lvalue, candidate, variants) { break; } } } _ => { } } // perform the test, branching to one of N blocks. For each of // those N possible outcomes, create a (initially empty) // vector of candidates. Those are the candidates that still // apply if the test has that particular outcome. debug!("match_candidates: test={:?} match_pair={:?}", test, match_pair); let target_blocks = self.perform_test(block, &match_pair.lvalue, &test); let mut target_candidates: Vec<_> = (0..target_blocks.len()).map(|_| vec![]).collect(); // Sort the candidates into the appropriate vector in // `target_candidates`. Note that at some point we may // encounter a candidate where the test is not relevant; at // that point, we stop sorting. let tested_candidates = candidates.iter() .take_while(|c| self.sort_candidate(&match_pair.lvalue, &test, c, &mut target_candidates)) .count(); assert!(tested_candidates > 0); // at least the last candidate ought to be tested debug!("tested_candidates: {}", tested_candidates); debug!("untested_candidates: {}", candidates.len() - tested_candidates); // For each outcome of test, process the candidates that still // apply. Collect a list of blocks where control flow will // branch if one of the `target_candidate` sets is not // exhaustive. let otherwise: Vec<_> = target_blocks.into_iter() .zip(target_candidates) .flat_map(|(target_block, target_candidates)| { self.match_candidates(span, arm_blocks, target_candidates, target_block) }) .collect(); (otherwise, tested_candidates) } /// Initializes each of the bindings from the candidate by /// moving/copying/ref'ing the source as appropriate. Tests the /// guard, if any, and then branches to the arm. Returns the block /// for the case where the guard fails. /// /// Note: we check earlier that if there is a guard, there cannot /// be move bindings. This isn't really important for the /// self-consistency of this fn, but the reason for it should be /// clear: after we've done the assignments, if there were move /// bindings, further tests would be a use-after-move (which would /// in turn be detected by the borrowck code that runs on the /// MIR). fn bind_and_guard_matched_candidate<'pat>(&mut self, mut block: BasicBlock, arm_blocks: &mut ArmBlocks, candidate: Candidate<'pat, 'tcx>) -> Option<BasicBlock> { debug!("bind_and_guard_matched_candidate(block={:?}, candidate={:?})", block, candidate); debug_assert!(candidate.match_pairs.is_empty()); self.bind_matched_candidate(block, candidate.bindings); let arm_block = arm_blocks.blocks[candidate.arm_index]; if let Some(guard) = candidate.guard { // the block to branch to if the guard fails; if there is no // guard, this block is simply unreachable let guard = self.hir.mirror(guard); let source_info = self.source_info(guard.span); let cond = unpack!(block = self.as_operand(block, guard)); let otherwise = self.cfg.start_new_block(); self.cfg.terminate(block, source_info, TerminatorKind::If { cond: cond, targets: (arm_block, otherwise)}); Some(otherwise) } else { let source_info = self.source_info(candidate.span); self.cfg.terminate(block, source_info, TerminatorKind::Goto { target: arm_block }); None } } fn bind_matched_candidate(&mut self, block: BasicBlock, bindings: Vec<Binding<'tcx>>) { debug!("bind_matched_candidate(block={:?}, bindings={:?})", block, bindings); // Assign each of the bindings. This may trigger moves out of the candidate. for binding in bindings { // Find the variable for the `var_id` being bound. It // should have been created by a previous call to // `declare_bindings`. let var_index = self.var_indices[&binding.var_id]; let rvalue = match binding.binding_mode { BindingMode::ByValue => Rvalue::Use(Operand::Consume(binding.source)), BindingMode::ByRef(region, borrow_kind) => Rvalue::Ref(region, borrow_kind, binding.source), }; let source_info = self.source_info(binding.span); self.cfg.push(block, Statement { source_info: source_info, kind: StatementKind::StorageLive(Lvalue::Local(var_index)) }); self.cfg.push_assign(block, source_info, &Lvalue::Local(var_index), rvalue); } } fn declare_binding(&mut self, source_info: SourceInfo, mutability: Mutability, name: Name, var_id: NodeId, var_ty: Ty<'tcx>) -> Local { debug!("declare_binding(var_id={:?}, name={:?}, var_ty={:?}, source_info={:?})", var_id, name, var_ty, source_info); let var = self.local_decls.push(LocalDecl::<'tcx> { mutability: mutability, ty: var_ty.clone(), name: Some(name), source_info: Some(source_info), }); let extent = self.extent_of_innermost_scope(); self.schedule_drop(source_info.span, extent, &Lvalue::Local(var), var_ty); self.var_indices.insert(var_id, var); debug!("declare_binding: var={:?}", var); var } }
42.109312
98
0.552158
89a4becba85bbe9392671f6cbdcbbb9e98077a58
6,351
mod brin; mod gin; mod gist; mod spgist; use crate::{common::*, with_header, Provider}; #[test] fn hash_index() { let dml = indoc! {r#" model A { id Int @id a Int @@index([a], type: Hash) } "#}; let schema = with_header(dml, Provider::Postgres, &["extendedIndexes"]); let schema = parse(&schema); schema.assert_has_model("A").assert_has_index(IndexDefinition { name: None, db_name: Some("A_a_idx".to_string()), fields: vec![IndexField::new_in_model("a")], tpe: IndexType::Normal, defined_on_field: false, algorithm: Some(IndexAlgorithm::Hash), clustered: None, }); } #[test] fn hash_index_disallows_ops() { let dml = indoc! {r#" model A { id Int @id a Int @@index([a(ops: Int4MinMaxOps)], type: Hash) } "#}; let dml = with_header(dml, Provider::Postgres, &["extendedIndexes"]); let error = datamodel::parse_schema(&dml).map(drop).unwrap_err(); let expectation = expect![[r#" error: Error parsing attribute "@@index": The given operator class `Int4MinMaxOps` is not supported with the `Hash` index type. --> schema.prisma:15  |  14 |  15 |  @@index([a(ops: Int4MinMaxOps)], type: Hash)  |  "#]]; expectation.assert_eq(&error) } #[test] fn btree_index_disallows_ops() { let dml = indoc! {r#" model A { id Int @id a Int @@index([a(ops: Int4MinMaxOps)], type: BTree) } "#}; let dml = with_header(dml, Provider::Postgres, &["extendedIndexes"]); let error = datamodel::parse_schema(&dml).map(drop).unwrap_err(); let expectation = expect![[r#" error: Error parsing attribute "@@index": The given operator class `Int4MinMaxOps` is not supported with the `BTree` index type. --> schema.prisma:15  |  14 |  15 |  @@index([a(ops: Int4MinMaxOps)], type: BTree)  |  "#]]; expectation.assert_eq(&error) } #[test] fn unique_sort_order() { let dml = indoc! {r#" model A { id String @unique(sort: Desc) } "#}; let schema = with_header(dml, Provider::Postgres, &["extendedIndexes"]); assert!(datamodel::parse_schema(&schema).is_ok()); } #[test] fn compound_unique_sort_order() { let dml = indoc! {r#" model A { a String b String @@unique([a(sort: Desc), b(sort: Asc)]) } "#}; let schema = with_header(dml, Provider::Postgres, &["extendedIndexes"]); assert!(datamodel::parse_schema(&schema).is_ok()); } #[test] fn index_sort_order() { let dml = indoc! {r#" model A { id Int @id a String @@index([a(sort: Desc)]) } "#}; let schema = with_header(dml, Provider::Postgres, &["extendedIndexes"]); assert!(datamodel::parse_schema(&schema).is_ok()); } #[test] fn disallows_unique_length_prefix() { let dml = indoc! {r#" model A { id String @unique(length: 30) @test.VarChar(255) } "#}; let dml = with_header(dml, Provider::Postgres, &["extendedIndexes"]); let error = datamodel::parse_schema(&dml).map(drop).unwrap_err(); let expectation = expect![[r#" error: Error parsing attribute "@unique": The length argument is not supported in an index definition with the current connector --> schema.prisma:12  |  11 | model A { 12 |  id String @unique(length: 30) @test.VarChar(255)  |  "#]]; expectation.assert_eq(&error) } #[test] fn disallows_compound_unique_length_prefix() { let dml = indoc! {r#" model A { a String b String @@unique([a(length: 10), b(length: 30)]) } "#}; let dml = with_header(dml, Provider::Postgres, &["extendedIndexes"]); let error = datamodel::parse_schema(&dml).map(drop).unwrap_err(); let expectation = expect![[r#" error: Error parsing attribute "@unique": The length argument is not supported in an index definition with the current connector --> schema.prisma:14  |  13 |  b String 14 |  @@unique([a(length: 10), b(length: 30)])  |  "#]]; expectation.assert_eq(&error) } #[test] fn disallows_index_length_prefix() { let dml = indoc! {r#" model A { id Int @id a String @@index([a(length: 10)]) } "#}; let dml = with_header(dml, Provider::Postgres, &["extendedIndexes"]); let error = datamodel::parse_schema(&dml).map(drop).unwrap_err(); let expectation = expect![[r#" error: Error parsing attribute "@index": The length argument is not supported in an index definition with the current connector --> schema.prisma:15  |  14 |  15 |  @@index([a(length: 10)])  |  "#]]; expectation.assert_eq(&error) } #[test] fn operator_classes_not_allowed_with_unique() { let dml = indoc! {r#" model A { id Int @id a String b String @@unique([a(ops: raw("foo")), b(ops: raw("bar"))]) } "#}; let dml = with_header(dml, Provider::Postgres, &["extendedIndexes"]); let error = datamodel::parse_schema(&dml).map(drop).unwrap_err(); let expectation = expect![[r#" error: Error parsing attribute "@unique": Operator classes can only be defined to fields in an @@index attribute. --> schema.prisma:16  |  15 |  16 |  @@unique([a(ops: raw("foo")), b(ops: raw("bar"))])  |  "#]]; expectation.assert_eq(&error) }
28.10177
155
0.553614
bfbfc4dea7a1d295acc6b56ad4d895e2eaff06cb
7,698
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::Ccmr2Input { #[doc = r" Modifies the contents of the register"] #[inline(always)] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline(always)] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline(always)] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } } #[doc = r" Value of the field"] pub struct Ic4fR { bits: u8, } impl Ic4fR { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Value of the field"] pub struct Ic4pscR { bits: u8, } impl Ic4pscR { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Value of the field"] pub struct Cc4sR { bits: u8, } impl Cc4sR { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Value of the field"] pub struct Ic3fR { bits: u8, } impl Ic3fR { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Value of the field"] pub struct Ic3pscR { bits: u8, } impl Ic3pscR { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Value of the field"] pub struct Cc3sR { bits: u8, } impl Cc3sR { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Proxy"] pub struct _Ic4fW<'a> { w: &'a mut W, } impl<'a> _Ic4fW<'a> { #[doc = r" Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 15; const OFFSET: u8 = 12; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _Ic4pscW<'a> { w: &'a mut W, } impl<'a> _Ic4pscW<'a> { #[doc = r" Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 3; const OFFSET: u8 = 10; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _Cc4sW<'a> { w: &'a mut W, } impl<'a> _Cc4sW<'a> { #[doc = r" Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 3; const OFFSET: u8 = 8; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _Ic3fW<'a> { w: &'a mut W, } impl<'a> _Ic3fW<'a> { #[doc = r" Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 15; const OFFSET: u8 = 4; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _Ic3pscW<'a> { w: &'a mut W, } impl<'a> _Ic3pscW<'a> { #[doc = r" Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 3; const OFFSET: u8 = 2; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _Cc3sW<'a> { w: &'a mut W, } impl<'a> _Cc3sW<'a> { #[doc = r" Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 3; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline(always)] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bits 12:15 - Input capture 4 filter"] #[inline(always)] pub fn ic4f(&self) -> Ic4fR { let bits = { const MASK: u8 = 15; const OFFSET: u8 = 12; ((self.bits >> OFFSET) & MASK as u32) as u8 }; Ic4fR { bits } } #[doc = "Bits 10:11 - Input capture 4 prescaler"] #[inline(always)] pub fn ic4psc(&self) -> Ic4pscR { let bits = { const MASK: u8 = 3; const OFFSET: u8 = 10; ((self.bits >> OFFSET) & MASK as u32) as u8 }; Ic4pscR { bits } } #[doc = "Bits 8:9 - Capture/Compare 4 selection"] #[inline(always)] pub fn cc4s(&self) -> Cc4sR { let bits = { const MASK: u8 = 3; const OFFSET: u8 = 8; ((self.bits >> OFFSET) & MASK as u32) as u8 }; Cc4sR { bits } } #[doc = "Bits 4:7 - Input capture 3 filter"] #[inline(always)] pub fn ic3f(&self) -> Ic3fR { let bits = { const MASK: u8 = 15; const OFFSET: u8 = 4; ((self.bits >> OFFSET) & MASK as u32) as u8 }; Ic3fR { bits } } #[doc = "Bits 2:3 - Input capture 3 prescaler"] #[inline(always)] pub fn ic3psc(&self) -> Ic3pscR { let bits = { const MASK: u8 = 3; const OFFSET: u8 = 2; ((self.bits >> OFFSET) & MASK as u32) as u8 }; Ic3pscR { bits } } #[doc = "Bits 0:1 - Capture/compare 3 selection"] #[inline(always)] pub fn cc3s(&self) -> Cc3sR { let bits = { const MASK: u8 = 3; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) as u8 }; Cc3sR { bits } } } impl W { #[doc = r" Reset value of the register"] #[inline(always)] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bits 12:15 - Input capture 4 filter"] #[inline(always)] pub fn ic4f(&mut self) -> _Ic4fW { _Ic4fW { w: self } } #[doc = "Bits 10:11 - Input capture 4 prescaler"] #[inline(always)] pub fn ic4psc(&mut self) -> _Ic4pscW { _Ic4pscW { w: self } } #[doc = "Bits 8:9 - Capture/Compare 4 selection"] #[inline(always)] pub fn cc4s(&mut self) -> _Cc4sW { _Cc4sW { w: self } } #[doc = "Bits 4:7 - Input capture 3 filter"] #[inline(always)] pub fn ic3f(&mut self) -> _Ic3fW { _Ic3fW { w: self } } #[doc = "Bits 2:3 - Input capture 3 prescaler"] #[inline(always)] pub fn ic3psc(&mut self) -> _Ic3pscW { _Ic3pscW { w: self } } #[doc = "Bits 0:1 - Capture/compare 3 selection"] #[inline(always)] pub fn cc3s(&mut self) -> _Cc3sW { _Cc3sW { w: self } } }
25.156863
59
0.497792
758ac7b0730ace80ff570db43a19f67a271f27dd
2,693
use bytes::BytesMut; use http::{HeaderMap, Method}; use httparse::ParserConfig; use crate::body::DecodedLength; use crate::proto::{BodyLength, MessageHead}; pub(crate) use self::conn::Conn; pub(crate) use self::decode::Decoder; pub(crate) use self::dispatch::Dispatcher; pub(crate) use self::encode::{EncodedBuf, Encoder}; //TODO: move out of h1::io pub(crate) use self::io::MINIMUM_MAX_BUFFER_SIZE; mod conn; mod decode; pub(crate) mod dispatch; mod encode; mod io; mod role; cfg_client! { pub(crate) type ClientTransaction = role::Client; } cfg_server! { pub(crate) type ServerTransaction = role::Server; } pub(crate) trait Http1Transaction { type Incoming; type Outgoing: Default; const LOG: &'static str; fn parse(bytes: &mut BytesMut, ctx: ParseContext<'_>) -> ParseResult<Self::Incoming>; fn encode(enc: Encode<'_, Self::Outgoing>, dst: &mut Vec<u8>) -> crate::Result<Encoder>; fn on_error(err: &crate::Error) -> Option<MessageHead<Self::Outgoing>>; fn is_client() -> bool { !Self::is_server() } fn is_server() -> bool { !Self::is_client() } fn should_error_on_parse_eof() -> bool { Self::is_client() } fn should_read_first() -> bool { Self::is_server() } fn update_date() {} } /// Result newtype for Http1Transaction::parse. pub(crate) type ParseResult<T> = Result<Option<ParsedMessage<T>>, crate::error::Parse>; #[derive(Debug)] pub(crate) struct ParsedMessage<T> { head: MessageHead<T>, decode: DecodedLength, expect_continue: bool, keep_alive: bool, wants_upgrade: bool, } pub(crate) struct ParseContext<'a> { cached_headers: &'a mut Option<HeaderMap>, req_method: &'a mut Option<Method>, h1_parser_config: ParserConfig, preserve_header_case: bool, h09_responses: bool, #[cfg(feature = "ffi")] on_informational: &'a mut Option<crate::ffi::OnInformational>, #[cfg(feature = "ffi")] raw_headers: bool, } /// Passed to Http1Transaction::encode pub(crate) struct Encode<'a, T> { head: &'a mut MessageHead<T>, body: Option<BodyLength>, #[cfg(feature = "server")] keep_alive: bool, req_method: &'a mut Option<Method>, title_case_headers: bool, } /// Extra flags that a request "wants", like expect-continue or upgrades. #[derive(Clone, Copy, Debug)] struct Wants(u8); impl Wants { const EMPTY: Wants = Wants(0b00); const EXPECT: Wants = Wants(0b01); const UPGRADE: Wants = Wants(0b10); #[must_use] fn add(self, other: Wants) -> Wants { Wants(self.0 | other.0) } fn contains(&self, other: Wants) -> bool { (self.0 & other.0) == other.0 } }
24.261261
92
0.652804
165107df81a063fd5fc7f3b21cc74ee63e2e5b48
332
mod config { pub trait ConfigType: Sized { fn doc_hint() -> String; } } #[allow(dead_code)] #[allow(unused_imports)] mod tests { use rustfmt_config_proc_macro::config_type; #[config_type] enum Bar { Foo, Bar, #[doc_hint = "foo_bar"] FooBar, FooFoo(i32), } }
15.809524
47
0.548193
503fa48dfbf6fee347cd044880cb6538eb5b33e4
1,003
use beatrix::{ beatrix_macro::MongoModel, bson::{doc, oid::ObjectId, DateTime as UtcDateTime}, mongo::MongoModel, mongodb::options::FindOptions, }; use futures::stream::StreamExt; use serde::{self, Deserialize, Serialize}; use crate::{db::get_db, error::ServiceError, user::User}; #[derive(MongoModel, Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] pub struct Group { #[serde(rename = "_id")] pub id: Option<ObjectId>, pub name: String, pub permissions: Vec<String>, pub features: Vec<String>, pub created_by_id: ObjectId, pub updated_at: UtcDateTime, } impl Group { pub async fn users(&self, options: Option<FindOptions>) -> Result<Vec<User>, ServiceError> { Ok(User::filter( get_db().await?, Some(doc!("groups": &self.id.clone().unwrap())), options, ) .await? .collect::<Vec<_>>() .await .into_iter() .map(|v| v.unwrap()) .collect()) } }
25.717949
96
0.601196
0311783398f65233ec6fc72e6c4f8a86a219cbdd
910
use crate::checkers::{ Checkers, Tile, Player, Piece, }; use Tile::*; use Player::*; use Piece::*; // TODO: Exercise to create a score function for checkers fn piece_value(piece : Piece) -> i64 { match piece { Pawn => 1, King => 2, } } fn tile_value(tile : Tile, player : Player) { } fn piece_count(checkers : &Checkers) -> (i32, i32) { let mut white = 0; let mut black = 0; for &tile in checkers.tiles.iter() { if let Occupied(player, piece) = tile { let value = piece_value(piece); match player { Black => black += value, White => white += value, } } } (white, black) } fn player_score(checkers : &Checkers, player : Player) -> f64 { let (white, black) = piece_count(checkers); match player { White => (white - black) as f64, Black => (black - white) as f64, } } // TODO: Exercise to complete chess implementation
18.571429
63
0.596703
7a3aa81a0047cb5e414580fe9487c735572661f2
3,104
/// Errors that this crate throws. #[derive(Debug)] pub enum Error { /// Error related to rusb RusbError(rusb::Error), /// For text printing, the replaced sequence could not be found CP437Error(String), /// Error regarding image treatment ImageError(image::ImageError), /// This means no bulk endpoint could be found NoBulkEndpoint, /// No replacement string for an instruction was found NoReplacementFound(String), /// PrintData should've been supplied. NoPrintData, /// The specified font does not seem to be supported by the printer profile UnsupportedFont, /// At least one font needs to be available in the profile NoFontFound, /// Indicates that a builder method was called on the wrong printer connection UnsupportedForPrinterConnection, PrinterError(String), WrongMarkdown, NoTables, NoTableFound(String), NoWidth, NoQrContent(String), NoQrContents, Encoding, IoError(std::io::Error), } impl std::fmt::Display for Error { fn fmt(&self, formatter: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> { let content = match self { Error::RusbError(e) => format!("rusb error: {}", e), Error::CP437Error(detail) => format!("CP437 error: {}", detail), Error::ImageError(e) => format!("Image error: {}", e), Error::NoBulkEndpoint => "No bulk endpoint could be found".to_string(), Error::NoReplacementFound(replacement) => { format!("Could not find replacement for tag {{{}}}", replacement) } Error::NoPrintData => "Print data must be supplied for this instruction".to_string(), Error::UnsupportedFont => { "The specified font does not seem to be supported by the printer profile" .to_string() } Error::NoFontFound => "No Font was found in the profile".to_string(), Error::UnsupportedForPrinterConnection => { "The called method does not work with the current printer connection".to_string() } Error::PrinterError(detail) => format!("An error occured while printing, {}", detail), Error::WrongMarkdown => "Incorrect markdown structure".to_string(), Error::NoTables => { "Not a single table was found in the PrintData structure".to_string() } Error::NoTableFound(table) => format!("No table was found for id {{{}}}", table), Error::NoWidth => "No width was found for the selected font".to_string(), Error::NoQrContent(name) => format!("Could not find qr code content for \"{}\"", name), Error::NoQrContents => "Could not find qr contents".to_string(), Error::Encoding => { "An unsupported utf-8 character was found when passing to cp437".to_string() } Error::IoError(err) => format!("I/O Error: {}", err), }; write!(formatter, "{}", content) } } impl std::error::Error for Error {}
43.71831
99
0.609214
112cba3efda84a400dbe3dab5814167f19a5dc49
42,900
//! Check the validity invariant of a given value, and tell the user //! where in the value it got violated. //! In const context, this goes even further and tries to approximate const safety. //! That's useful because it means other passes (e.g. promotion) can rely on `const`s //! to be const-safe. use std::convert::TryFrom; use std::fmt::Write; use std::num::NonZeroUsize; use std::ops::RangeInclusive; use rustc_data_structures::fx::FxHashSet; use rustc_hir as hir; use rustc_middle::mir::interpret::InterpError; use rustc_middle::ty; use rustc_middle::ty::layout::TyAndLayout; use rustc_span::symbol::{sym, Symbol}; use rustc_target::abi::{Abi, LayoutOf, Scalar as ScalarAbi, Size, VariantIdx, Variants}; use std::hash::Hash; use super::{ alloc_range, CheckInAllocMsg, GlobalAlloc, InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, Scalar, ScalarMaybeUninit, ValueVisitor, }; macro_rules! throw_validation_failure { ($where:expr, { $( $what_fmt:expr ),+ } $( expected { $( $expected_fmt:expr ),+ } )?) => {{ let mut msg = String::new(); msg.push_str("encountered "); write!(&mut msg, $($what_fmt),+).unwrap(); $( msg.push_str(", but expected "); write!(&mut msg, $($expected_fmt),+).unwrap(); )? let path = rustc_middle::ty::print::with_no_trimmed_paths(|| { let where_ = &$where; if !where_.is_empty() { let mut path = String::new(); write_path(&mut path, where_); Some(path) } else { None } }); throw_ub!(ValidationFailure { path, msg }) }}; } /// If $e throws an error matching the pattern, throw a validation failure. /// Other errors are passed back to the caller, unchanged -- and if they reach the root of /// the visitor, we make sure only validation errors and `InvalidProgram` errors are left. /// This lets you use the patterns as a kind of validation list, asserting which errors /// can possibly happen: /// /// ``` /// let v = try_validation!(some_fn(), some_path, { /// Foo | Bar | Baz => { "some failure" }, /// }); /// ``` /// /// An additional expected parameter can also be added to the failure message: /// /// ``` /// let v = try_validation!(some_fn(), some_path, { /// Foo | Bar | Baz => { "some failure" } expected { "something that wasn't a failure" }, /// }); /// ``` /// /// An additional nicety is that both parameters actually take format args, so you can just write /// the format string in directly: /// /// ``` /// let v = try_validation!(some_fn(), some_path, { /// Foo | Bar | Baz => { "{:?}", some_failure } expected { "{}", expected_value }, /// }); /// ``` /// macro_rules! try_validation { ($e:expr, $where:expr, $( $( $p:pat )|+ => { $( $what_fmt:expr ),+ } $( expected { $( $expected_fmt:expr ),+ } )? ),+ $(,)? ) => {{ match $e { Ok(x) => x, // We catch the error and turn it into a validation failure. We are okay with // allocation here as this can only slow down builds that fail anyway. Err(e) => match e.kind() { $( $($p)|+ => throw_validation_failure!( $where, { $( $what_fmt ),+ } $( expected { $( $expected_fmt ),+ } )? ) ),+, #[allow(unreachable_patterns)] _ => Err::<!, _>(e)?, } } }}; } /// We want to show a nice path to the invalid field for diagnostics, /// but avoid string operations in the happy case where no error happens. /// So we track a `Vec<PathElem>` where `PathElem` contains all the data we /// need to later print something for the user. #[derive(Copy, Clone, Debug)] pub enum PathElem { Field(Symbol), Variant(Symbol), GeneratorState(VariantIdx), CapturedVar(Symbol), ArrayElem(usize), TupleElem(usize), Deref, EnumTag, GeneratorTag, DynDowncast, } /// Extra things to check for during validation of CTFE results. pub enum CtfeValidationMode { /// Regular validation, nothing special happening. Regular, /// Validation of a `const`. /// `inner` says if this is an inner, indirect allocation (as opposed to the top-level const /// allocation). Being an inner allocation makes a difference because the top-level allocation /// of a `const` is copied for each use, but the inner allocations are implicitly shared. /// `allow_static_ptrs` says if pointers to statics are permitted (which is the case for promoteds in statics). Const { inner: bool, allow_static_ptrs: bool }, } /// State for tracking recursive validation of references pub struct RefTracking<T, PATH = ()> { pub seen: FxHashSet<T>, pub todo: Vec<(T, PATH)>, } impl<T: Copy + Eq + Hash + std::fmt::Debug, PATH: Default> RefTracking<T, PATH> { pub fn empty() -> Self { RefTracking { seen: FxHashSet::default(), todo: vec![] } } pub fn new(op: T) -> Self { let mut ref_tracking_for_consts = RefTracking { seen: FxHashSet::default(), todo: vec![(op, PATH::default())] }; ref_tracking_for_consts.seen.insert(op); ref_tracking_for_consts } pub fn track(&mut self, op: T, path: impl FnOnce() -> PATH) { if self.seen.insert(op) { trace!("Recursing below ptr {:#?}", op); let path = path(); // Remember to come back to this later. self.todo.push((op, path)); } } } /// Format a path fn write_path(out: &mut String, path: &[PathElem]) { use self::PathElem::*; for elem in path.iter() { match elem { Field(name) => write!(out, ".{}", name), EnumTag => write!(out, ".<enum-tag>"), Variant(name) => write!(out, ".<enum-variant({})>", name), GeneratorTag => write!(out, ".<generator-tag>"), GeneratorState(idx) => write!(out, ".<generator-state({})>", idx.index()), CapturedVar(name) => write!(out, ".<captured-var({})>", name), TupleElem(idx) => write!(out, ".{}", idx), ArrayElem(idx) => write!(out, "[{}]", idx), // `.<deref>` does not match Rust syntax, but it is more readable for long paths -- and // some of the other items here also are not Rust syntax. Actually we can't // even use the usual syntax because we are just showing the projections, // not the root. Deref => write!(out, ".<deref>"), DynDowncast => write!(out, ".<dyn-downcast>"), } .unwrap() } } // Test if a range that wraps at overflow contains `test` fn wrapping_range_contains(r: &RangeInclusive<u128>, test: u128) -> bool { let (lo, hi) = r.clone().into_inner(); if lo > hi { // Wrapped (..=hi).contains(&test) || (lo..).contains(&test) } else { // Normal r.contains(&test) } } // Formats such that a sentence like "expected something {}" to mean // "expected something <in the given range>" makes sense. fn wrapping_range_format(r: &RangeInclusive<u128>, max_hi: u128) -> String { let (lo, hi) = r.clone().into_inner(); assert!(hi <= max_hi); if lo > hi { format!("less or equal to {}, or greater or equal to {}", hi, lo) } else if lo == hi { format!("equal to {}", lo) } else if lo == 0 { assert!(hi < max_hi, "should not be printing if the range covers everything"); format!("less or equal to {}", hi) } else if hi == max_hi { assert!(lo > 0, "should not be printing if the range covers everything"); format!("greater or equal to {}", lo) } else { format!("in the range {:?}", r) } } struct ValidityVisitor<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> { /// The `path` may be pushed to, but the part that is present when a function /// starts must not be changed! `visit_fields` and `visit_array` rely on /// this stack discipline. path: Vec<PathElem>, ref_tracking: Option<&'rt mut RefTracking<MPlaceTy<'tcx, M::PointerTag>, Vec<PathElem>>>, /// `None` indicates this is not validating for CTFE (but for runtime). ctfe_mode: Option<CtfeValidationMode>, ecx: &'rt InterpCx<'mir, 'tcx, M>, } impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, 'tcx, M> { fn aggregate_field_path_elem(&mut self, layout: TyAndLayout<'tcx>, field: usize) -> PathElem { // First, check if we are projecting to a variant. match layout.variants { Variants::Multiple { tag_field, .. } => { if tag_field == field { return match layout.ty.kind() { ty::Adt(def, ..) if def.is_enum() => PathElem::EnumTag, ty::Generator(..) => PathElem::GeneratorTag, _ => bug!("non-variant type {:?}", layout.ty), }; } } Variants::Single { .. } => {} } // Now we know we are projecting to a field, so figure out which one. match layout.ty.kind() { // generators and closures. ty::Closure(def_id, _) | ty::Generator(def_id, _, _) => { let mut name = None; // FIXME this should be more descriptive i.e. CapturePlace instead of CapturedVar // https://github.com/rust-lang/project-rfc-2229/issues/46 if let Some(local_def_id) = def_id.as_local() { let tables = self.ecx.tcx.typeck(local_def_id); if let Some(captured_place) = tables.closure_min_captures_flattened(*def_id).nth(field) { // Sometimes the index is beyond the number of upvars (seen // for a generator). let var_hir_id = captured_place.get_root_variable(); let node = self.ecx.tcx.hir().get(var_hir_id); if let hir::Node::Binding(pat) = node { if let hir::PatKind::Binding(_, _, ident, _) = pat.kind { name = Some(ident.name); } } } } PathElem::CapturedVar(name.unwrap_or_else(|| { // Fall back to showing the field index. sym::integer(field) })) } // tuples ty::Tuple(_) => PathElem::TupleElem(field), // enums ty::Adt(def, ..) if def.is_enum() => { // we might be projecting *to* a variant, or to a field *in* a variant. match layout.variants { Variants::Single { index } => { // Inside a variant PathElem::Field(def.variants[index].fields[field].ident.name) } Variants::Multiple { .. } => bug!("we handled variants above"), } } // other ADTs ty::Adt(def, _) => PathElem::Field(def.non_enum_variant().fields[field].ident.name), // arrays/slices ty::Array(..) | ty::Slice(..) => PathElem::ArrayElem(field), // dyn traits ty::Dynamic(..) => PathElem::DynDowncast, // nothing else has an aggregate layout _ => bug!("aggregate_field_path_elem: got non-aggregate type {:?}", layout.ty), } } fn with_elem<R>( &mut self, elem: PathElem, f: impl FnOnce(&mut Self) -> InterpResult<'tcx, R>, ) -> InterpResult<'tcx, R> { // Remember the old state let path_len = self.path.len(); // Record new element self.path.push(elem); // Perform operation let r = f(self)?; // Undo changes self.path.truncate(path_len); // Done Ok(r) } fn check_wide_ptr_meta( &mut self, meta: MemPlaceMeta<M::PointerTag>, pointee: TyAndLayout<'tcx>, ) -> InterpResult<'tcx> { let tail = self.ecx.tcx.struct_tail_erasing_lifetimes(pointee.ty, self.ecx.param_env); match tail.kind() { ty::Dynamic(..) => { let vtable = meta.unwrap_meta(); // Direct call to `check_ptr_access_align` checks alignment even on CTFE machines. try_validation!( self.ecx.memory.check_ptr_access_align( vtable, 3 * self.ecx.tcx.data_layout.pointer_size, // drop, size, align self.ecx.tcx.data_layout.pointer_align.abi, CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message ), self.path, err_ub!(DanglingIntPointer(..)) | err_ub!(PointerUseAfterFree(..)) | err_unsup!(ReadBytesAsPointer) => { "dangling vtable pointer in wide pointer" }, err_ub!(AlignmentCheckFailed { .. }) => { "unaligned vtable pointer in wide pointer" }, err_ub!(PointerOutOfBounds { .. }) => { "too small vtable" }, ); try_validation!( self.ecx.read_drop_type_from_vtable(vtable), self.path, err_ub!(DanglingIntPointer(..)) | err_ub!(InvalidFunctionPointer(..)) | err_unsup!(ReadBytesAsPointer) => { "invalid drop function pointer in vtable (not pointing to a function)" }, err_ub!(InvalidVtableDropFn(..)) => { "invalid drop function pointer in vtable (function has incompatible signature)" }, ); try_validation!( self.ecx.read_size_and_align_from_vtable(vtable), self.path, err_ub!(InvalidVtableSize) => { "invalid vtable: size is bigger than largest supported object" }, err_ub!(InvalidVtableAlignment(msg)) => { "invalid vtable: alignment {}", msg }, err_unsup!(ReadPointerAsBytes) => { "invalid size or align in vtable" }, ); // FIXME: More checks for the vtable. } ty::Slice(..) | ty::Str => { let _len = try_validation!( meta.unwrap_meta().to_machine_usize(self.ecx), self.path, err_unsup!(ReadPointerAsBytes) => { "non-integer slice length in wide pointer" }, ); // We do not check that `len * elem_size <= isize::MAX`: // that is only required for references, and there it falls out of the // "dereferenceable" check performed by Stacked Borrows. } ty::Foreign(..) => { // Unsized, but not wide. } _ => bug!("Unexpected unsized type tail: {:?}", tail), } Ok(()) } /// Check a reference or `Box`. fn check_safe_pointer( &mut self, value: &OpTy<'tcx, M::PointerTag>, kind: &str, ) -> InterpResult<'tcx> { let value = try_validation!( self.ecx.read_immediate(value), self.path, err_unsup!(ReadPointerAsBytes) => { "part of a pointer" } expected { "a proper pointer or integer value" }, ); // Handle wide pointers. // Check metadata early, for better diagnostics let place = try_validation!( self.ecx.ref_to_mplace(&value), self.path, err_ub!(InvalidUninitBytes(None)) => { "uninitialized {}", kind }, ); if place.layout.is_unsized() { self.check_wide_ptr_meta(place.meta, place.layout)?; } // Make sure this is dereferenceable and all. let size_and_align = try_validation!( self.ecx.size_and_align_of_mplace(&place), self.path, err_ub!(InvalidMeta(msg)) => { "invalid {} metadata: {}", kind, msg }, ); let (size, align) = size_and_align // for the purpose of validity, consider foreign types to have // alignment and size determined by the layout (size will be 0, // alignment should take attributes into account). .unwrap_or_else(|| (place.layout.size, place.layout.align.abi)); // Direct call to `check_ptr_access_align` checks alignment even on CTFE machines. try_validation!( self.ecx.memory.check_ptr_access_align( place.ptr, size, align, CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message ), self.path, err_ub!(AlignmentCheckFailed { required, has }) => { "an unaligned {} (required {} byte alignment but found {})", kind, required.bytes(), has.bytes() }, err_ub!(DanglingIntPointer(0, _)) => { "a null {}", kind }, err_ub!(DanglingIntPointer(i, _)) => { "a dangling {} (address 0x{:x} is unallocated)", kind, i }, err_ub!(PointerOutOfBounds { .. }) => { "a dangling {} (going beyond the bounds of its allocation)", kind }, err_unsup!(ReadBytesAsPointer) => { "a dangling {} (created from integer)", kind }, // This cannot happen during const-eval (because interning already detects // dangling pointers), but it can happen in Miri. err_ub!(PointerUseAfterFree(..)) => { "a dangling {} (use-after-free)", kind }, ); // Recursive checking if let Some(ref mut ref_tracking) = self.ref_tracking { // Proceed recursively even for ZST, no reason to skip them! // `!` is a ZST and we want to validate it. // Normalize before handing `place` to tracking because that will // check for duplicates. let place = if size.bytes() > 0 { self.ecx.force_mplace_ptr(place).expect("we already bounds-checked") } else { place }; // Skip validation entirely for some external statics if let Scalar::Ptr(ptr) = place.ptr { // not a ZST let alloc_kind = self.ecx.tcx.get_global_alloc(ptr.alloc_id); if let Some(GlobalAlloc::Static(did)) = alloc_kind { assert!(!self.ecx.tcx.is_thread_local_static(did)); assert!(self.ecx.tcx.is_static(did)); if matches!( self.ctfe_mode, Some(CtfeValidationMode::Const { allow_static_ptrs: false, .. }) ) { // See const_eval::machine::MemoryExtra::can_access_statics for why // this check is so important. // This check is reachable when the const just referenced the static, // but never read it (so we never entered `before_access_global`). throw_validation_failure!(self.path, { "a {} pointing to a static variable", kind } ); } // We skip checking other statics. These statics must be sound by // themselves, and the only way to get broken statics here is by using // unsafe code. // The reasons we don't check other statics is twofold. For one, in all // sound cases, the static was already validated on its own, and second, we // trigger cycle errors if we try to compute the value of the other static // and that static refers back to us. // We might miss const-invalid data, // but things are still sound otherwise (in particular re: consts // referring to statics). return Ok(()); } } let path = &self.path; ref_tracking.track(place, || { // We need to clone the path anyway, make sure it gets created // with enough space for the additional `Deref`. let mut new_path = Vec::with_capacity(path.len() + 1); new_path.clone_from(path); new_path.push(PathElem::Deref); new_path }); } Ok(()) } fn read_scalar( &self, op: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, ScalarMaybeUninit<M::PointerTag>> { Ok(try_validation!( self.ecx.read_scalar(op), self.path, err_unsup!(ReadPointerAsBytes) => { "(potentially part of) a pointer" } expected { "plain (non-pointer) bytes" }, )) } /// Check if this is a value of primitive type, and if yes check the validity of the value /// at that type. Return `true` if the type is indeed primitive. fn try_visit_primitive( &mut self, value: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, bool> { // Go over all the primitive types let ty = value.layout.ty; match ty.kind() { ty::Bool => { let value = self.read_scalar(value)?; try_validation!( value.to_bool(), self.path, err_ub!(InvalidBool(..)) | err_ub!(InvalidUninitBytes(None)) => { "{}", value } expected { "a boolean" }, ); Ok(true) } ty::Char => { let value = self.read_scalar(value)?; try_validation!( value.to_char(), self.path, err_ub!(InvalidChar(..)) | err_ub!(InvalidUninitBytes(None)) => { "{}", value } expected { "a valid unicode scalar value (in `0..=0x10FFFF` but not in `0xD800..=0xDFFF`)" }, ); Ok(true) } ty::Float(_) | ty::Int(_) | ty::Uint(_) => { let value = self.read_scalar(value)?; // NOTE: Keep this in sync with the array optimization for int/float // types below! if self.ctfe_mode.is_some() { // Integers/floats in CTFE: Must be scalar bits, pointers are dangerous let is_bits = value.check_init().map_or(false, |v| v.is_bits()); if !is_bits { throw_validation_failure!(self.path, { "{}", value } expected { "initialized plain (non-pointer) bytes" } ) } } else { // At run-time, for now, we accept *anything* for these types, including // uninit. We should fix that, but let's start low. } Ok(true) } ty::RawPtr(..) => { // We are conservative with uninit for integers, but try to // actually enforce the strict rules for raw pointers (mostly because // that lets us re-use `ref_to_mplace`). let place = try_validation!( self.ecx.read_immediate(value).and_then(|ref i| self.ecx.ref_to_mplace(i)), self.path, err_ub!(InvalidUninitBytes(None)) => { "uninitialized raw pointer" }, err_unsup!(ReadPointerAsBytes) => { "part of a pointer" } expected { "a proper pointer or integer value" }, ); if place.layout.is_unsized() { self.check_wide_ptr_meta(place.meta, place.layout)?; } Ok(true) } ty::Ref(_, ty, mutbl) => { if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { .. })) && *mutbl == hir::Mutability::Mut { // A mutable reference inside a const? That does not seem right (except if it is // a ZST). let layout = self.ecx.layout_of(ty)?; if !layout.is_zst() { throw_validation_failure!(self.path, { "mutable reference in a `const`" }); } } self.check_safe_pointer(value, "reference")?; Ok(true) } ty::Adt(def, ..) if def.is_box() => { self.check_safe_pointer(value, "box")?; Ok(true) } ty::FnPtr(_sig) => { let value = try_validation!( self.ecx.read_immediate(value), self.path, err_unsup!(ReadPointerAsBytes) => { "part of a pointer" } expected { "a proper pointer or integer value" }, ); // Make sure we print a `ScalarMaybeUninit` (and not an `ImmTy`) in the error // message below. let value = value.to_scalar_or_uninit(); let _fn = try_validation!( value.check_init().and_then(|ptr| self.ecx.memory.get_fn(ptr)), self.path, err_ub!(DanglingIntPointer(..)) | err_ub!(InvalidFunctionPointer(..)) | err_ub!(InvalidUninitBytes(None)) | err_unsup!(ReadBytesAsPointer) => { "{}", value } expected { "a function pointer" }, ); // FIXME: Check if the signature matches Ok(true) } ty::Never => throw_validation_failure!(self.path, { "a value of the never type `!`" }), ty::Foreign(..) | ty::FnDef(..) => { // Nothing to check. Ok(true) } // The above should be all the primitive types. The rest is compound, we // check them by visiting their fields/variants. ty::Adt(..) | ty::Tuple(..) | ty::Array(..) | ty::Slice(..) | ty::Str | ty::Dynamic(..) | ty::Closure(..) | ty::Generator(..) => Ok(false), // Some types only occur during typechecking, they have no layout. // We should not see them here and we could not check them anyway. ty::Error(_) | ty::Infer(..) | ty::Placeholder(..) | ty::Bound(..) | ty::Param(..) | ty::Opaque(..) | ty::Projection(..) | ty::GeneratorWitness(..) => bug!("Encountered invalid type {:?}", ty), } } fn visit_scalar( &mut self, op: &OpTy<'tcx, M::PointerTag>, scalar_layout: &ScalarAbi, ) -> InterpResult<'tcx> { let value = self.read_scalar(op)?; let valid_range = &scalar_layout.valid_range; let (lo, hi) = valid_range.clone().into_inner(); // Determine the allowed range // `max_hi` is as big as the size fits let max_hi = u128::MAX >> (128 - op.layout.size.bits()); assert!(hi <= max_hi); // We could also write `(hi + 1) % (max_hi + 1) == lo` but `max_hi + 1` overflows for `u128` if (lo == 0 && hi == max_hi) || (hi + 1 == lo) { // Nothing to check return Ok(()); } // At least one value is excluded. Get the bits. let value = try_validation!( value.check_init(), self.path, err_ub!(InvalidUninitBytes(None)) => { "{}", value } expected { "something {}", wrapping_range_format(valid_range, max_hi) }, ); let bits = match value.to_bits_or_ptr(op.layout.size, self.ecx) { Err(ptr) => { if lo == 1 && hi == max_hi { // Only null is the niche. So make sure the ptr is NOT null. if self.ecx.memory.ptr_may_be_null(ptr) { throw_validation_failure!(self.path, { "a potentially null pointer" } expected { "something that cannot possibly fail to be {}", wrapping_range_format(valid_range, max_hi) } ) } return Ok(()); } else { // Conservatively, we reject, because the pointer *could* have a bad // value. throw_validation_failure!(self.path, { "a pointer" } expected { "something that cannot possibly fail to be {}", wrapping_range_format(valid_range, max_hi) } ) } } Ok(data) => data, }; // Now compare. This is slightly subtle because this is a special "wrap-around" range. if wrapping_range_contains(&valid_range, bits) { Ok(()) } else { throw_validation_failure!(self.path, { "{}", bits } expected { "something {}", wrapping_range_format(valid_range, max_hi) } ) } } } impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> for ValidityVisitor<'rt, 'mir, 'tcx, M> { type V = OpTy<'tcx, M::PointerTag>; #[inline(always)] fn ecx(&self) -> &InterpCx<'mir, 'tcx, M> { &self.ecx } fn read_discriminant( &mut self, op: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, VariantIdx> { self.with_elem(PathElem::EnumTag, move |this| { Ok(try_validation!( this.ecx.read_discriminant(op), this.path, err_ub!(InvalidTag(val)) => { "{}", val } expected { "a valid enum tag" }, err_ub!(InvalidUninitBytes(None)) => { "uninitialized bytes" } expected { "a valid enum tag" }, err_unsup!(ReadPointerAsBytes) => { "a pointer" } expected { "a valid enum tag" }, ) .1) }) } #[inline] fn visit_field( &mut self, old_op: &OpTy<'tcx, M::PointerTag>, field: usize, new_op: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { let elem = self.aggregate_field_path_elem(old_op.layout, field); self.with_elem(elem, move |this| this.visit_value(new_op)) } #[inline] fn visit_variant( &mut self, old_op: &OpTy<'tcx, M::PointerTag>, variant_id: VariantIdx, new_op: &OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { let name = match old_op.layout.ty.kind() { ty::Adt(adt, _) => PathElem::Variant(adt.variants[variant_id].ident.name), // Generators also have variants ty::Generator(..) => PathElem::GeneratorState(variant_id), _ => bug!("Unexpected type with variant: {:?}", old_op.layout.ty), }; self.with_elem(name, move |this| this.visit_value(new_op)) } #[inline(always)] fn visit_union( &mut self, _op: &OpTy<'tcx, M::PointerTag>, _fields: NonZeroUsize, ) -> InterpResult<'tcx> { Ok(()) } #[inline] fn visit_value(&mut self, op: &OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> { trace!("visit_value: {:?}, {:?}", *op, op.layout); // Check primitive types -- the leafs of our recursive descend. if self.try_visit_primitive(op)? { return Ok(()); } // Sanity check: `builtin_deref` does not know any pointers that are not primitive. assert!(op.layout.ty.builtin_deref(true).is_none()); // Special check preventing `UnsafeCell` in the inner part of constants if let Some(def) = op.layout.ty.ty_adt_def() { if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { inner: true, .. })) && Some(def.did) == self.ecx.tcx.lang_items().unsafe_cell_type() { throw_validation_failure!(self.path, { "`UnsafeCell` in a `const`" }); } } // Recursively walk the value at its type. self.walk_value(op)?; // *After* all of this, check the ABI. We need to check the ABI to handle // types like `NonNull` where the `Scalar` info is more restrictive than what // the fields say (`rustc_layout_scalar_valid_range_start`). // But in most cases, this will just propagate what the fields say, // and then we want the error to point at the field -- so, first recurse, // then check ABI. // // FIXME: We could avoid some redundant checks here. For newtypes wrapping // scalars, we do the same check on every "level" (e.g., first we check // MyNewtype and then the scalar in there). match op.layout.abi { Abi::Uninhabited => { throw_validation_failure!(self.path, { "a value of uninhabited type {:?}", op.layout.ty } ); } Abi::Scalar(ref scalar_layout) => { self.visit_scalar(op, scalar_layout)?; } Abi::ScalarPair { .. } | Abi::Vector { .. } => { // These have fields that we already visited above, so we already checked // all their scalar-level restrictions. // There is also no equivalent to `rustc_layout_scalar_valid_range_start` // that would make skipping them here an issue. } Abi::Aggregate { .. } => { // Nothing to do. } } Ok(()) } fn visit_aggregate( &mut self, op: &OpTy<'tcx, M::PointerTag>, fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>, ) -> InterpResult<'tcx> { match op.layout.ty.kind() { ty::Str => { let mplace = op.assert_mem_place(self.ecx); // strings are never immediate let len = mplace.len(self.ecx)?; try_validation!( self.ecx.memory.read_bytes(mplace.ptr, Size::from_bytes(len)), self.path, err_ub!(InvalidUninitBytes(..)) => { "uninitialized data in `str`" }, err_unsup!(ReadPointerAsBytes) => { "a pointer in `str`" }, ); } ty::Array(tys, ..) | ty::Slice(tys) // This optimization applies for types that can hold arbitrary bytes (such as // integer and floating point types) or for structs or tuples with no fields. // FIXME(wesleywiser) This logic could be extended further to arbitrary structs // or tuples made up of integer/floating point types or inhabited ZSTs with no // padding. if matches!(tys.kind(), ty::Int(..) | ty::Uint(..) | ty::Float(..)) => { // Optimized handling for arrays of integer/float type. // Arrays cannot be immediate, slices are never immediate. let mplace = op.assert_mem_place(self.ecx); // This is the length of the array/slice. let len = mplace.len(self.ecx)?; // This is the element type size. let layout = self.ecx.layout_of(tys)?; // This is the size in bytes of the whole array. (This checks for overflow.) let size = layout.size * len; // Optimization: we just check the entire range at once. // NOTE: Keep this in sync with the handling of integer and float // types above, in `visit_primitive`. // In run-time mode, we accept pointers in here. This is actually more // permissive than a per-element check would be, e.g., we accept // an &[u8] that contains a pointer even though bytewise checking would // reject it. However, that's good: We don't inherently want // to reject those pointers, we just do not have the machinery to // talk about parts of a pointer. // We also accept uninit, for consistency with the slow path. let alloc = match self.ecx.memory.get(mplace.ptr, size, mplace.align)? { Some(a) => a, None => { // Size 0, nothing more to check. return Ok(()); } }; match alloc.check_bytes( alloc_range(Size::ZERO, size), /*allow_uninit_and_ptr*/ self.ctfe_mode.is_none(), ) { // In the happy case, we needn't check anything else. Ok(()) => {} // Some error happened, try to provide a more detailed description. Err(err) => { // For some errors we might be able to provide extra information. // (This custom logic does not fit the `try_validation!` macro.) match err.kind() { err_ub!(InvalidUninitBytes(Some((_alloc_id, access)))) => { // Some byte was uninitialized, determine which // element that byte belongs to so we can // provide an index. let i = usize::try_from( access.uninit_offset.bytes() / layout.size.bytes(), ) .unwrap(); self.path.push(PathElem::ArrayElem(i)); throw_validation_failure!(self.path, { "uninitialized bytes" }) } err_unsup!(ReadPointerAsBytes) => { throw_validation_failure!(self.path, { "a pointer" } expected { "plain (non-pointer) bytes" }) } // Propagate upwards (that will also check for unexpected errors). _ => return Err(err), } } } } // Fast path for arrays and slices of ZSTs. We only need to check a single ZST element // of an array and not all of them, because there's only a single value of a specific // ZST type, so either validation fails for all elements or none. ty::Array(tys, ..) | ty::Slice(tys) if self.ecx.layout_of(tys)?.is_zst() => { // Validate just the first element (if any). self.walk_aggregate(op, fields.take(1))? } _ => { self.walk_aggregate(op, fields)? // default handler } } Ok(()) } } impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { fn validate_operand_internal( &self, op: &OpTy<'tcx, M::PointerTag>, path: Vec<PathElem>, ref_tracking: Option<&mut RefTracking<MPlaceTy<'tcx, M::PointerTag>, Vec<PathElem>>>, ctfe_mode: Option<CtfeValidationMode>, ) -> InterpResult<'tcx> { trace!("validate_operand_internal: {:?}, {:?}", *op, op.layout.ty); // Construct a visitor let mut visitor = ValidityVisitor { path, ref_tracking, ctfe_mode, ecx: self }; // Try to cast to ptr *once* instead of all the time. let op = self.force_op_ptr(&op).unwrap_or(*op); // Run it. match visitor.visit_value(&op) { Ok(()) => Ok(()), // Pass through validation failures. Err(err) if matches!(err.kind(), err_ub!(ValidationFailure { .. })) => Err(err), // Also pass through InvalidProgram, those just indicate that we could not // validate and each caller will know best what to do with them. Err(err) if matches!(err.kind(), InterpError::InvalidProgram(_)) => Err(err), // Avoid other errors as those do not show *where* in the value the issue lies. Err(err) => { err.print_backtrace(); bug!("Unexpected error during validation: {}", err); } } } /// This function checks the data at `op` to be const-valid. /// `op` is assumed to cover valid memory if it is an indirect operand. /// It will error if the bits at the destination do not match the ones described by the layout. /// /// `ref_tracking` is used to record references that we encounter so that they /// can be checked recursively by an outside driving loop. /// /// `constant` controls whether this must satisfy the rules for constants: /// - no pointers to statics. /// - no `UnsafeCell` or non-ZST `&mut`. #[inline(always)] pub fn const_validate_operand( &self, op: &OpTy<'tcx, M::PointerTag>, path: Vec<PathElem>, ref_tracking: &mut RefTracking<MPlaceTy<'tcx, M::PointerTag>, Vec<PathElem>>, ctfe_mode: CtfeValidationMode, ) -> InterpResult<'tcx> { self.validate_operand_internal(op, path, Some(ref_tracking), Some(ctfe_mode)) } /// This function checks the data at `op` to be runtime-valid. /// `op` is assumed to cover valid memory if it is an indirect operand. /// It will error if the bits at the destination do not match the ones described by the layout. #[inline(always)] pub fn validate_operand(&self, op: &OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> { self.validate_operand_internal(op, vec![], None, None) } }
43.289606
133
0.515361
6a9247ce4d571aa5289b27b539e3deb49f76df55
1,928
#[doc = "Register `IC_CLR_STOP_DET` reader"] pub struct R(crate::R<IC_CLR_STOP_DET_SPEC>); impl core::ops::Deref for R { type Target = crate::R<IC_CLR_STOP_DET_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<IC_CLR_STOP_DET_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<IC_CLR_STOP_DET_SPEC>) -> Self { R(reader) } } #[doc = "Field `CLR_STOP_DET` reader - Read this register to clear the STOP_DET interrupt (bit 9) of the IC_RAW_INTR_STAT register. Reset value: 0x0"] pub struct CLR_STOP_DET_R(crate::FieldReader<bool, bool>); impl CLR_STOP_DET_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { CLR_STOP_DET_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for CLR_STOP_DET_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl R { #[doc = "Bit 0 - Read this register to clear the STOP_DET interrupt (bit 9) of the IC_RAW_INTR_STAT register. Reset value: 0x0"] #[inline(always)] pub fn clr_stop_det(&self) -> CLR_STOP_DET_R { CLR_STOP_DET_R::new((self.bits & 0x01) != 0) } } #[doc = "Clear STOP_DET Interrupt Register This register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api). For information about available fields see [ic_clr_stop_det](index.html) module"] pub struct IC_CLR_STOP_DET_SPEC; impl crate::RegisterSpec for IC_CLR_STOP_DET_SPEC { type Ux = u32; } #[doc = "`read()` method returns [ic_clr_stop_det::R](R) reader structure"] impl crate::Readable for IC_CLR_STOP_DET_SPEC { type Reader = R; } #[doc = "`reset()` method sets IC_CLR_STOP_DET to value 0"] impl crate::Resettable for IC_CLR_STOP_DET_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
31.096774
133
0.664938
d9684d9f331fb1a6613c3b84c7b36a5d3e91ee19
5,161
// Copyright 2018 sqlparser-rs contributors. All rights reserved. // Copyright Materialize, Inc. All rights reserved. // // This file is derived from the sqlparser-rs project, available at // https://github.com/andygrove/sqlparser-rs. It was incorporated // directly into Materialize on December 21, 2019. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License in the LICENSE file at the // root of this repository, or online at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::fmt; use std::str::FromStr; use ore::ascii::UncasedStr; use crate::ast::Ident; // The `Keyword` type and the keyword constants are automatically generated from // the list in keywords.txt by the crate's build script. // // We go to the trouble of code generation primarily to create a "perfect hash // function" at compile time via the phf crate, which enables very fast, // case-insensitive keyword parsing. From there it's easy to generate a few // more convenience functions and accessors. // // If the only keywords were `Insert` and `Select`, we'd generate the following // code: // // pub enum Keyword { // Insert, // Select, // } // // pub const INSERT: Keyword = Keyword::Insert; // pub const SELECT: Keyword = Keyword::Select; // // impl Keyword { // pub fn as_str(&self) -> &'static str { // match self { // Keyword::Insert => "INSERT", // Keyword::Select => "SELECT", // } // } // } // // static KEYWORDS: phf::Map<&'static UncasedStr, Keyword> = { /* ... */ }; // include!(concat!(env!("OUT_DIR"), "/keywords.rs")); impl Keyword { pub fn into_ident(self) -> Ident { Ident::new(self.as_str().to_lowercase()) } /// Reports whether this keyword requires quoting when used as an /// identifier in any context. /// /// The only exception to the rule is when the keyword follows `AS` in a /// column or table alias. pub fn is_reserved(self) -> bool { matches!( self, // Keywords that can appear at the top-level of a SELECT statement. WITH | SELECT | FROM | WHERE | GROUP | HAVING | ORDER | LIMIT | OFFSET | FETCH | OPTION | // Set operations. UNION | EXCEPT | INTERSECT ) } /// Reports whether this keyword requires quoting when used as a table /// alias. /// /// Note that this rule is only applies when the table alias is "bare"; /// i.e., when the table alias is not preceded by `AS`. /// /// Ensures that `FROM <table_name> <table_alias>` can be parsed /// unambiguously. pub fn is_reserved_in_table_alias(self) -> bool { matches!( self, // These keywords are ambiguous when used as a table alias, as they // conflict with the syntax for joins. ON | JOIN | INNER | CROSS | FULL | LEFT | RIGHT | NATURAL | USING | // `OUTER` is not strictly ambiguous, but it prevents `a OUTER JOIN // b` from parsing as `a AS outer JOIN b`, instead producing a nice // syntax error. OUTER ) || self.is_reserved() } /// Reports whether this keyword requires quoting when used as a column /// alias. /// /// /// Note that this rule is only applies when the column alias is "bare"; /// i.e., when the column alias is not preceded by `AS`. /// /// Ensures that `SELECT <column_name> <column_alias>` can be parsed /// unambiguously. pub fn is_reserved_in_column_alias(self) -> bool { matches!( self, // These timelike keywords conflict with interval timeframe // suffixes. They are not strictly ambiguous, but marking them // reserved prevents e.g. `SELECT pg_catalog.interval '1' year` from // parsing as `SELECT pg_catalog.interval '1' AS YEAR`. YEAR | MONTH | DAY | HOUR | MINUTE | SECOND ) || self.is_reserved() } /// Reports whether a keyword is considered reserved in any context: /// either in table aliases, column aliases, or in all contexts. pub fn is_sometimes_reserved(self) -> bool { self.is_reserved() || self.is_reserved_in_table_alias() || self.is_reserved_in_column_alias() } } impl FromStr for Keyword { type Err = (); fn from_str(s: &str) -> Result<Keyword, ()> { match KEYWORDS.get(UncasedStr::new(s)) { Some(kw) => Ok(*kw), None => Err(()), } } } impl fmt::Display for Keyword { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str(self.as_str()) } }
35.349315
101
0.617128
6aea130e0fd999df42c8c83f59e58655cafc16f0
26,208
//! Traits for transforming bits of IR. use crate::cast::Cast; use crate::*; use chalk_engine::context::Context; use chalk_engine::{DelayedLiteral, ExClause, FlounderedSubgoal, Literal}; use std::fmt::Debug; use std::sync::Arc; pub mod shift; mod subst; pub use self::subst::Subst; /// A "folder" is a transformer that can be used to make a copy of /// some term -- that is, some bit of IR, such as a `Goal` -- with /// certain changes applied. The idea is that it contains methods that /// let you swap types/lifetimes for new types/lifetimes; meanwhile, /// each bit of IR implements the `Fold` trait which, given a /// `Folder`, will reconstruct itself, invoking the folder's methods /// to transform each of the types/lifetimes embedded within. /// /// # Usage patterns /// /// ## Substituting for free variables /// /// Most of the time, though, we are not interested in adjust /// arbitrary types/lifetimes, but rather just free variables (even /// more often, just free existential variables) that appear within /// the term. /// /// For this reason, the `Folder` trait extends two other traits that /// contain methods that are invoked when just those particular /// /// In particular, folders can intercept references to free variables /// (either existentially or universally quantified) and replace them /// with other types/lifetimes as appropriate. /// /// To create a folder `F`, one never implements `Folder` directly, but instead /// implements one of each of these three sub-traits: /// /// - `FreeVarFolder` -- folds `BoundVar` instances that appear free /// in the term being folded (use `DefaultFreeVarFolder` to /// ignore/forbid these altogether) /// - `InferenceFolder` -- folds existential `InferenceVar` instances /// that appear in the term being folded (use /// `DefaultInferenceFolder` to ignore/forbid these altogether) /// - `PlaceholderFolder` -- folds universal `Placeholder` instances /// that appear in the term being folded (use /// `DefaultPlaceholderFolder` to ignore/forbid these altogether) /// /// To **apply** a folder, use the `Fold::fold_with` method, like so /// /// ```rust,ignore /// let x = x.fold_with(&mut folder, 0); /// ``` pub trait Folder: FreeVarFolder + InferenceFolder + PlaceholderFolder + TypeFolder { /// Returns a "dynamic" version of this trait. There is no /// **particular** reason to require this, except that I didn't /// feel like making `super_fold_ty` generic for no reason. fn to_dyn(&mut self) -> &mut dyn Folder; } pub trait TypeFolder { fn fold_ty(&mut self, ty: &Ty, binders: usize) -> Fallible<Ty>; fn fold_lifetime(&mut self, lifetime: &Lifetime, binders: usize) -> Fallible<Lifetime>; } impl<T> Folder for T where T: FreeVarFolder + InferenceFolder + PlaceholderFolder + TypeFolder, { fn to_dyn(&mut self) -> &mut dyn Folder { self } } /// A convenience trait that indicates that this folder doesn't take /// any action on types in particular, but just recursively folds /// their contents (note that free variables that are encountered in /// that process may still be substituted). The vast majority of /// folders implement this trait. pub trait DefaultTypeFolder {} impl<T> TypeFolder for T where T: FreeVarFolder + InferenceFolder + PlaceholderFolder + DefaultTypeFolder, { fn fold_ty(&mut self, ty: &Ty, binders: usize) -> Fallible<Ty> { super_fold_ty(self.to_dyn(), ty, binders) } fn fold_lifetime(&mut self, lifetime: &Lifetime, binders: usize) -> Fallible<Lifetime> { super_fold_lifetime(self.to_dyn(), lifetime, binders) } } /// The methods for folding **free variables**. These are `BoundVar` /// instances where the binder is not something we folded over. This /// is used when you are instanting previously bound things with some /// replacement. pub trait FreeVarFolder { /// Invoked for `Ty::BoundVar` instances that are not bound within the type being folded /// over: /// /// - `depth` is the depth of the `Ty::BoundVar`; this has been adjusted to account for binders /// in scope. /// - `binders` is the number of binders in scope. /// /// This should return a type suitable for a context with `binders` in scope. fn fold_free_var_ty(&mut self, depth: usize, binders: usize) -> Fallible<Ty>; /// As `fold_free_var_ty`, but for lifetimes. fn fold_free_var_lifetime(&mut self, depth: usize, binders: usize) -> Fallible<Lifetime>; } /// A convenience trait. If you implement this, you get an /// implementation of `FreeVarFolder` for free that simply ignores /// free values (that is, it replaces them with themselves). /// /// You can make it panic if a free-variable is found by overriding /// `forbid` to return true. pub trait DefaultFreeVarFolder { fn forbid() -> bool { false } } impl<T: DefaultFreeVarFolder> FreeVarFolder for T { fn fold_free_var_ty(&mut self, depth: usize, binders: usize) -> Fallible<Ty> { if T::forbid() { panic!("unexpected free variable with depth `{:?}`", depth) } else { Ok(Ty::BoundVar(depth + binders)) } } fn fold_free_var_lifetime(&mut self, depth: usize, binders: usize) -> Fallible<Lifetime> { if T::forbid() { panic!("unexpected free variable with depth `{:?}`", depth) } else { Ok(Lifetime::BoundVar(depth + binders)) } } } pub trait PlaceholderFolder { /// Invoked for each occurence of a placeholder type; these are /// used when we instantiate binders universally. Returns a type /// to use instead, which should be suitably shifted to account /// for `binders`. /// /// - `universe` is the universe of the `TypeName::ForAll` that was found /// - `binders` is the number of binders in scope fn fold_free_placeholder_ty( &mut self, universe: PlaceholderIndex, binders: usize, ) -> Fallible<Ty>; /// As with `fold_free_placeholder_ty`, but for lifetimes. fn fold_free_placeholder_lifetime( &mut self, universe: PlaceholderIndex, binders: usize, ) -> Fallible<Lifetime>; } /// A convenience trait. If you implement this, you get an /// implementation of `PlaceholderFolder` for free that simply ignores /// placeholder values (that is, it replaces them with themselves). /// /// You can make it panic if a free-variable is found by overriding /// `forbid` to return true. pub trait DefaultPlaceholderFolder { fn forbid() -> bool { false } } impl<T: DefaultPlaceholderFolder> PlaceholderFolder for T { fn fold_free_placeholder_ty( &mut self, universe: PlaceholderIndex, _binders: usize, ) -> Fallible<Ty> { if T::forbid() { panic!("unexpected placeholder type `{:?}`", universe) } else { Ok(universe.to_ty()) } } fn fold_free_placeholder_lifetime( &mut self, universe: PlaceholderIndex, _binders: usize, ) -> Fallible<Lifetime> { if T::forbid() { panic!("unexpected placeholder lifetime `{:?}`", universe) } else { Ok(universe.to_lifetime()) } } } pub trait InferenceFolder { /// Invoked for each occurence of a inference type; these are /// used when we instantiate binders universally. Returns a type /// to use instead, which should be suitably shifted to account /// for `binders`. /// /// - `universe` is the universe of the `TypeName::ForAll` that was found /// - `binders` is the number of binders in scope fn fold_inference_ty(&mut self, var: InferenceVar, binders: usize) -> Fallible<Ty>; /// As with `fold_free_inference_ty`, but for lifetimes. fn fold_inference_lifetime(&mut self, var: InferenceVar, binders: usize) -> Fallible<Lifetime>; } /// A convenience trait. If you implement this, you get an /// implementation of `InferenceFolder` for free that simply ignores /// inference values (that is, it replaces them with themselves). /// /// You can make it panic if a free-variable is found by overriding /// `forbid` to return true. pub trait DefaultInferenceFolder { fn forbid() -> bool { false } } impl<T: DefaultInferenceFolder> InferenceFolder for T { fn fold_inference_ty(&mut self, var: InferenceVar, _binders: usize) -> Fallible<Ty> { if T::forbid() { panic!("unexpected inference type `{:?}`", var) } else { Ok(var.to_ty()) } } fn fold_inference_lifetime( &mut self, var: InferenceVar, _binders: usize, ) -> Fallible<Lifetime> { if T::forbid() { panic!("unexpected inference lifetime `'{:?}`", var) } else { Ok(var.to_lifetime()) } } } /// Applies the given folder to a value. pub trait Fold: Debug { /// The type of value that will be produced once folding is done. /// Typically this is `Self`, unless `Self` contains borrowed /// values, in which case owned values are produced (for example, /// one can fold over a `&T` value where `T: Fold`, in which case /// you get back a `T`, not a `&T`). type Result: Fold; /// Apply the given folder `folder` to `self`; `binders` is the /// number of binders that are in scope when beginning the /// folder. Typically `binders` starts as 0, but is adjusted when /// we encounter `Binders<T>` in the IR or other similar /// constructs. fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible<Self::Result>; } impl<'a, T: Fold> Fold for &'a T { type Result = T::Result; fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible<Self::Result> { (**self).fold_with(folder, binders) } } impl<T: Fold> Fold for Vec<T> { type Result = Vec<T::Result>; fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible<Self::Result> { self.iter().map(|e| e.fold_with(folder, binders)).collect() } } impl<T: Fold> Fold for Box<T> { type Result = Box<T::Result>; fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible<Self::Result> { Ok(Box::new((**self).fold_with(folder, binders)?)) } } impl<T: Fold> Fold for Arc<T> { type Result = Arc<T::Result>; fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible<Self::Result> { Ok(Arc::new((**self).fold_with(folder, binders)?)) } } macro_rules! tuple_fold { ($($n:ident),*) => { impl<$($n: Fold,)*> Fold for ($($n,)*) { type Result = ($($n::Result,)*); fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible<Self::Result> { #[allow(non_snake_case)] let &($(ref $n),*) = self; Ok(($($n.fold_with(folder, binders)?,)*)) } } } } tuple_fold!(A, B); tuple_fold!(A, B, C); tuple_fold!(A, B, C, D); tuple_fold!(A, B, C, D, E); impl<T: Fold> Fold for Option<T> { type Result = Option<T::Result>; fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible<Self::Result> { match self { None => Ok(None), Some(e) => Ok(Some(e.fold_with(folder, binders)?)), } } } impl Fold for Ty { type Result = Self; fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible<Self::Result> { folder.fold_ty(self, binders) } } pub fn super_fold_ty(folder: &mut dyn Folder, ty: &Ty, binders: usize) -> Fallible<Ty> { match *ty { Ty::BoundVar(depth) => { if depth >= binders { folder.fold_free_var_ty(depth - binders, binders) } else { Ok(Ty::BoundVar(depth)) } } Ty::InferenceVar(var) => folder.fold_inference_ty(var, binders), Ty::Apply(ref apply) => { let ApplicationTy { name, ref parameters, } = *apply; match name { TypeName::Placeholder(ui) => { assert!( parameters.is_empty(), "type {:?} with parameters {:?}", ty, parameters ); folder.fold_free_placeholder_ty(ui, binders) } TypeName::TypeKindId(_) | TypeName::AssociatedType(_) => { let parameters = parameters.fold_with(folder, binders)?; Ok(ApplicationTy { name, parameters }.cast()) } } } Ty::Projection(ref proj) => Ok(Ty::Projection(proj.fold_with(folder, binders)?)), Ty::ForAll(ref quantified_ty) => Ok(Ty::ForAll(quantified_ty.fold_with(folder, binders)?)), } } impl Fold for QuantifiedTy { type Result = Self; fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible<Self::Result> { let QuantifiedTy { num_binders, ref ty, } = *self; Ok(QuantifiedTy { num_binders, ty: ty.fold_with(folder, binders + num_binders)?, }) } } impl<T> Fold for Binders<T> where T: Fold, { type Result = Binders<T::Result>; fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible<Self::Result> { let Binders { binders: ref self_binders, value: ref self_value, } = *self; let value = self_value.fold_with(folder, binders + self_binders.len())?; Ok(Binders { binders: self_binders.clone(), value: value, }) } } impl<T> Fold for Canonical<T> where T: Fold, { type Result = Canonical<T::Result>; fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible<Self::Result> { let Canonical { binders: ref self_binders, value: ref self_value, } = *self; let value = self_value.fold_with(folder, binders + self_binders.len())?; Ok(Canonical { binders: self_binders.clone(), value: value, }) } } impl Fold for Lifetime { type Result = Self; fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible<Self::Result> { folder.fold_lifetime(self, binders) } } pub fn super_fold_lifetime( folder: &mut dyn Folder, lifetime: &Lifetime, binders: usize, ) -> Fallible<Lifetime> { match *lifetime { Lifetime::BoundVar(depth) => { if depth >= binders { folder.fold_free_var_lifetime(depth - binders, binders) } else { Ok(Lifetime::BoundVar(depth)) } } Lifetime::InferenceVar(var) => folder.fold_inference_lifetime(var, binders), Lifetime::Placeholder(universe) => folder.fold_free_placeholder_lifetime(universe, binders), } } impl Fold for Substitution { type Result = Substitution; fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible<Self::Result> { let parameters = self.parameters.fold_with(folder, binders)?; Ok(Substitution { parameters }) } } impl Fold for Parameter { type Result = Parameter; fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible<Self::Result> { let inner = self.0.fold_with(folder, binders)?; Ok(Parameter(inner)) } } #[macro_export] macro_rules! copy_fold { ($t:ty) => { impl $crate::fold::Fold for $t { type Result = Self; fn fold_with( &self, _folder: &mut dyn ($crate::fold::Folder), _binders: usize, ) -> ::chalk_engine::fallible::Fallible<Self::Result> { Ok(*self) } } }; } copy_fold!(Identifier); copy_fold!(UniverseIndex); copy_fold!(ItemId); copy_fold!(ImplId); copy_fold!(StructId); copy_fold!(TraitId); copy_fold!(TypeId); copy_fold!(TypeKindId); copy_fold!(usize); copy_fold!(QuantifierKind); copy_fold!(chalk_engine::TableIndex); copy_fold!(chalk_engine::TimeStamp); // copy_fold!(TypeName); -- intentionally omitted! This is folded via `fold_ap` copy_fold!(()); #[macro_export] macro_rules! enum_fold { ($s:ident [$($n:ident),*] { $($variant:ident($($name:ident),*)),* } $($w:tt)*) => { impl<$($n),*> $crate::fold::Fold for $s<$($n),*> $($w)* { type Result = $s<$($n :: Result),*>; fn fold_with(&self, folder: &mut dyn ($crate::fold::Folder), binders: usize) -> ::chalk_engine::fallible::Fallible<Self::Result> { match self { $( $s::$variant( $($name),* ) => { Ok($s::$variant( $($name.fold_with(folder, binders)?),* )) } )* } } } }; // Hacky variant for use in slg::context::implementation ($s:ty { $p:ident :: { $($variant:ident($($name:ident),*)),* } }) => { impl $crate::fold::Fold for $s { type Result = $s; fn fold_with(&self, folder: &mut dyn ($crate::fold::Folder), binders: usize) -> ::chalk_engine::fallible::Fallible<Self::Result> { match self { $( $p::$variant( $($name),* ) => { Ok($p::$variant( $($name.fold_with(folder, binders)?),* )) } )* } } } } } enum_fold!(ParameterKind[T,L] { Ty(a), Lifetime(a) } where T: Fold, L: Fold); enum_fold!(WhereClause[] { Implemented(a), ProjectionEq(a) }); enum_fold!(WellFormed[] { Trait(a), Ty(a) }); enum_fold!(FromEnv[] { Trait(a), Ty(a) }); enum_fold!(DomainGoal[] { Holds(a), WellFormed(a), FromEnv(a), Normalize(a), InScope(a), IsLocal(a), IsUpstream(a), IsFullyVisible(a), LocalImplAllowed(a), Compatible(a), DownstreamType(a) }); enum_fold!(LeafGoal[] { EqGoal(a), DomainGoal(a) }); enum_fold!(Constraint[] { LifetimeEq(a, b) }); enum_fold!(Goal[] { Quantified(qkind, subgoal), Implies(wc, subgoal), And(g1, g2), Not(g), Leaf(wc), CannotProve(a) }); enum_fold!(ProgramClause[] { Implies(a), ForAll(a) }); #[macro_export] macro_rules! struct_fold { ($s:ident $([$($tt_args:tt)*])* { $($name:ident),* $(,)* } $($w:tt)*) => { struct_fold! { @parse_tt_args($($($tt_args)*)*) struct_name($s) parameters() self_args() result_args() field_names($($name),*) where_clauses($($w)*) } }; ( @parse_tt_args() struct_name($s:ident) parameters($($parameters:tt)*) self_args($($self_args:tt)*) result_args($($result_args:tt)*) field_names($($field_names:tt)*) where_clauses($($where_clauses:tt)*) ) => { struct_fold! { @parsed_tt_args struct_name($s) parameters($($parameters)*) self_ty($s < $($self_args)* >) result_ty($s < $($result_args)* >) field_names($($field_names)*) where_clauses($($where_clauses)*) } }; ( @parse_tt_args(, $($input:tt)*) struct_name($s:ident) parameters($($parameters:tt)*) self_args($($self_args:tt)*) result_args($($result_args:tt)*) field_names($($field_names:tt)*) where_clauses($($where_clauses:tt)*) ) => { struct_fold! { @parse_tt_args($($input)*) struct_name($s) parameters($($parameters)*,) self_args($($self_args)*,) result_args($($result_args)*,) field_names($($field_names)*) where_clauses($($where_clauses)*) } }; ( @parse_tt_args(- $n:ident $($input:tt)*) struct_name($s:ident) parameters($($parameters:tt)*) self_args($($self_args:tt)*) result_args($($result_args:tt)*) field_names($($field_names:tt)*) where_clauses($($where_clauses:tt)*) ) => { struct_fold! { @parse_tt_args($($input)*) struct_name($s) parameters($($parameters)* $n) self_args($($self_args)* $n) result_args($($result_args)* $n) field_names($($field_names)*) where_clauses($($where_clauses)*) } }; ( @parse_tt_args($n:ident $($input:tt)*) struct_name($s:ident) parameters($($parameters:tt)*) self_args($($self_args:tt)*) result_args($($result_args:tt)*) field_names($($field_names:tt)*) where_clauses($($where_clauses:tt)*) ) => { struct_fold! { @parse_tt_args($($input)*) struct_name($s) parameters($($parameters)* $n) self_args($($self_args)* $n) result_args($($result_args)* $n :: Result) field_names($($field_names)*) where_clauses($($where_clauses)*) } }; ( @parsed_tt_args struct_name($s:ident) parameters($($parameters:tt)*) self_ty($self_ty:ty) result_ty($result_ty:ty) field_names($($field_name:ident),*) where_clauses($($where_clauses:tt)*) ) => { impl<$($parameters)*> $crate::fold::Fold for $self_ty $($where_clauses)* { type Result = $result_ty; fn fold_with(&self, folder: &mut dyn ($crate::fold::Folder), binders: usize) -> ::chalk_engine::fallible::Fallible<Self::Result> { Ok($s { $($field_name: self.$field_name.fold_with(folder, binders)?),* }) } } }; } struct_fold!(ProjectionTy { associated_ty_id, parameters, }); struct_fold!(TraitRef { trait_id, parameters, }); struct_fold!(Normalize { projection, ty }); struct_fold!(ProjectionEq { projection, ty }); struct_fold!(Environment { clauses }); struct_fold!(InEnvironment[F] { environment, goal } where F: Fold<Result = F>); struct_fold!(EqGoal { a, b }); struct_fold!(ProgramClauseImplication { consequence, conditions, }); struct_fold!(ConstrainedSubst { subst, /* NB: The `is_trivial` routine relies on the fact that `subst` is folded first. */ constraints, }); // struct_fold!(ApplicationTy { name, parameters }); -- intentionally omitted, folded through Ty impl<C: Context> Fold for ExClause<C> where C: Context, C::Substitution: Fold<Result = C::Substitution>, C::RegionConstraint: Fold<Result = C::RegionConstraint>, C::CanonicalConstrainedSubst: Fold<Result = C::CanonicalConstrainedSubst>, C::GoalInEnvironment: Fold<Result = C::GoalInEnvironment>, { type Result = ExClause<C>; fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible<Self::Result> { let ExClause { subst, delayed_literals, constraints, subgoals, current_time, floundered_subgoals, } = self; Ok(ExClause { subst: subst.fold_with(folder, binders)?, delayed_literals: delayed_literals.fold_with(folder, binders)?, constraints: constraints.fold_with(folder, binders)?, subgoals: subgoals.fold_with(folder, binders)?, current_time: current_time.fold_with(folder, binders)?, floundered_subgoals: floundered_subgoals.fold_with(folder, binders)?, }) } } impl<C: Context> Fold for FlounderedSubgoal<C> where C: Context, C::Substitution: Fold<Result = C::Substitution>, C::RegionConstraint: Fold<Result = C::RegionConstraint>, C::CanonicalConstrainedSubst: Fold<Result = C::CanonicalConstrainedSubst>, C::GoalInEnvironment: Fold<Result = C::GoalInEnvironment>, { type Result = FlounderedSubgoal<C>; fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible<Self::Result> { let FlounderedSubgoal { floundered_literal, floundered_time, } = self; Ok(FlounderedSubgoal { floundered_literal: floundered_literal.fold_with(folder, binders)?, floundered_time: floundered_time.fold_with(folder, binders)?, }) } } impl<C: Context> Fold for DelayedLiteral<C> where C: Context, C::CanonicalConstrainedSubst: Fold<Result = C::CanonicalConstrainedSubst>, { type Result = DelayedLiteral<C>; fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible<Self::Result> { match self { DelayedLiteral::CannotProve(()) => Ok(DelayedLiteral::CannotProve(())), DelayedLiteral::Negative(table_index) => Ok(DelayedLiteral::Negative( table_index.fold_with(folder, binders)?, )), DelayedLiteral::Positive(table_index, subst) => Ok(DelayedLiteral::Positive( table_index.fold_with(folder, binders)?, subst.fold_with(folder, binders)?, )), } } } impl<C: Context> Fold for Literal<C> where C: Context, C::GoalInEnvironment: Fold<Result = C::GoalInEnvironment>, { type Result = Literal<C>; fn fold_with(&self, folder: &mut dyn Folder, binders: usize) -> Fallible<Self::Result> { match self { Literal::Positive(goal) => Ok(Literal::Positive(goal.fold_with(folder, binders)?)), Literal::Negative(goal) => Ok(Literal::Negative(goal.fold_with(folder, binders)?)), } } }
33.773196
100
0.58257
e8c3c9c194a385da555177696ef2dad80c35b7fe
793
// build-pass #![allow(dead_code)] // pretty-expanded FIXME #23616 mod a { pub enum Enum<T> { A(T), } pub trait X { fn dummy(&self) { } } impl X for isize {} pub struct Z<'a>(Enum<&'a (dyn X + 'a)>); fn foo() { let x: isize = 42; let z = Z(Enum::A(&x as &dyn X)); let _ = z; } } mod b { trait X { fn dummy(&self) { } } impl X for isize {} struct Y<'a>{ x:Option<&'a (dyn X + 'a)>, } fn bar() { let x: isize = 42; let _y = Y { x: Some(&x as &dyn X) }; } } mod c { pub trait X { fn f(&self); } impl X for isize { fn f(&self) {} } pub struct Z<'a>(Option<&'a (dyn X + 'a)>); fn main() { let x: isize = 42; let z = Z(Some(&x as &dyn X)); let _ = z; } } pub fn main() {}
18.880952
80
0.448928
f56d5478a68dfb9c2e1e31fecfc9247f8ff38ab3
6,302
// Copyright 2019 The Grin Developers // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Test sender transaction with no change output #[macro_use] extern crate log; extern crate grin_wallet_controller as wallet; extern crate grin_wallet_impls as impls; use grin_wallet_util::grin_core as core; use grin_wallet_libwallet as libwallet; use impls::test_framework::{self, LocalWalletClient}; use libwallet::{InitTxArgs, IssueInvoiceTxArgs, Slate}; use std::sync::atomic::Ordering; use std::thread; use std::time::Duration; #[macro_use] mod common; use common::{clean_output_dir, create_wallet_proxy, setup}; fn no_change_test_impl(test_dir: &'static str) -> Result<(), libwallet::Error> { let mut wallet_proxy = create_wallet_proxy(test_dir); let chain = wallet_proxy.chain.clone(); let stopper = wallet_proxy.running.clone(); create_wallet_and_add!( client1, wallet1, mask1_i, test_dir, "wallet1", None, &mut wallet_proxy, false ); let mask1 = (&mask1_i).as_ref(); create_wallet_and_add!( client2, wallet2, mask2_i, test_dir, "wallet2", None, &mut wallet_proxy, false ); let mask2 = (&mask2_i).as_ref(); // Set the wallet proxy listener running thread::spawn(move || { if let Err(e) = wallet_proxy.run() { error!("Wallet Proxy error: {}", e); } }); // few values to keep things shorter let reward = core::consensus::REWARD1; // Mine into wallet 1 let _ = test_framework::award_blocks_to_wallet(&chain, wallet1.clone(), mask1, 4, false); let fee = core::libtx::tx_fee(1, 1, 1); // send a single block's worth of transactions with minimal strategy let mut slate = Slate::blank(2, false); let mut stored_excess = None; wallet::controller::owner_single_use(Some(wallet1.clone()), mask1, None, |api, m| { let args = InitTxArgs { src_acct_name: None, amount: reward - fee, minimum_confirmations: 2, max_outputs: 500, num_change_outputs: 1, selection_strategy_is_use_all: false, ..Default::default() }; slate = api.init_send_tx(m, args)?; slate = client1.send_tx_slate_direct("wallet2", &slate)?; api.tx_lock_outputs(m, &slate)?; slate = api.finalize_tx(m, &slate)?; println!("Posted Slate: {:?}", slate); stored_excess = Some(slate.tx.as_ref().unwrap().body.kernels[0].excess); api.post_tx(m, &slate, false)?; Ok(()) })?; // ensure stored excess is correct in both wallets // Wallet 1 calculated the excess with the full slate // Wallet 2 only had the excess provided by // wallet 1 // Refresh and check transaction log for wallet 1 wallet::controller::owner_single_use(Some(wallet1.clone()), mask1, None, |api, m| { let (refreshed, txs) = api.retrieve_txs(m, true, None, Some(slate.id))?; assert!(refreshed); let tx = txs[0].clone(); println!("SIMPLE SEND - SENDING WALLET"); println!("{:?}", tx); println!(); assert!(tx.confirmed); assert_eq!(stored_excess, tx.kernel_excess); Ok(()) })?; // Refresh and check transaction log for wallet 2 wallet::controller::owner_single_use(Some(wallet2.clone()), mask2, None, |api, m| { let (refreshed, txs) = api.retrieve_txs(m, true, None, Some(slate.id))?; assert!(refreshed); let tx = txs[0].clone(); println!("SIMPLE SEND - RECEIVING WALLET"); println!("{:?}", tx); println!(); assert!(tx.confirmed); assert_eq!(stored_excess, tx.kernel_excess); Ok(()) })?; // ensure invoice TX works as well with no change wallet::controller::owner_single_use(Some(wallet2.clone()), mask2, None, |api, m| { // Wallet 2 inititates an invoice transaction, requesting payment let args = IssueInvoiceTxArgs { amount: reward - fee, ..Default::default() }; slate = api.issue_invoice_tx(m, args)?; Ok(()) })?; wallet::controller::owner_single_use(Some(wallet1.clone()), mask1, None, |api, m| { // Wallet 1 receives the invoice transaction let args = InitTxArgs { src_acct_name: None, amount: slate.amount, minimum_confirmations: 2, max_outputs: 500, num_change_outputs: 1, selection_strategy_is_use_all: false, ..Default::default() }; slate = api.process_invoice_tx(m, &slate, args)?; api.tx_lock_outputs(m, &slate)?; Ok(()) })?; // wallet 2 finalizes and posts wallet::controller::foreign_single_use(wallet2.clone(), mask2_i.clone(), |api| { // Wallet 2 receives the invoice transaction slate = api.finalize_tx(&slate, false)?; Ok(()) })?; wallet::controller::owner_single_use(Some(wallet2.clone()), mask1, None, |api, m| { println!("Invoice Posted TX: {}", slate); stored_excess = Some(slate.tx.as_ref().unwrap().body.kernels[0].excess); api.post_tx(m, &slate, false)?; Ok(()) })?; // check wallet 2's version wallet::controller::owner_single_use(Some(wallet2.clone()), mask2, None, |api, m| { let (refreshed, txs) = api.retrieve_txs(m, true, None, Some(slate.id))?; assert!(refreshed); for tx in txs { stored_excess = tx.kernel_excess; println!("Wallet 2: {:?}", tx); println!(); assert!(tx.confirmed); assert_eq!(stored_excess, tx.kernel_excess); } Ok(()) })?; // Refresh and check transaction log for wallet 1 wallet::controller::owner_single_use(Some(wallet1.clone()), mask1, None, |api, m| { let (refreshed, txs) = api.retrieve_txs(m, true, None, Some(slate.id))?; assert!(refreshed); for tx in txs { println!("Wallet 1: {:?}", tx); println!(); assert_eq!(stored_excess, tx.kernel_excess); assert!(tx.confirmed); } Ok(()) })?; // let logging finish stopper.store(false, Ordering::Relaxed); thread::sleep(Duration::from_millis(200)); Ok(()) } #[test] fn no_change() { let test_dir = "test_output/no_change"; setup(test_dir); if let Err(e) = no_change_test_impl(test_dir) { panic!("Libwallet Error: {} - {}", e, e.backtrace().unwrap()); } clean_output_dir(test_dir); }
29.586854
98
0.687877
48c43afa87b8f8b77fb24a45db245fb4a088b493
4,200
use ggez::*; trait MyDrawTrait { // https://doc.rust-lang.org/error-index.html#method-has-no-receiver fn new(ctx: &mut Context, xpos: f32, ypos: f32) -> Self where Self: Sized; fn draw(&self, ctx: &mut Context) -> GameResult; fn move_location(&mut self, xinc: f32, yinc: f32); } trait MyCollideTrait: MyDrawTrait { fn hit_box(&self) -> graphics::Rect; // not sure if this is right fn collision<T>(&self, other: &T) -> bool where T: MyCollideTrait; } struct Object { shape: graphics::Mesh, hit_box: graphics::Rect, x: f32, y: f32, } impl MyDrawTrait for Object { fn new(ctx: &mut Context, xpos: f32, ypos: f32) -> Object { // radius of circle let r = 50f32; // create hit box let hb = graphics::Rect::new(0.0, 0.0, r * 2.0, r * 2.0); // create mesh let circle = graphics::MeshBuilder::new() .circle( graphics::DrawMode::fill(), nalgebra::Point2::new(r, r), r, 1.0, graphics::WHITE, ) .rectangle(graphics::DrawMode::stroke(1.0), hb.clone(), graphics::WHITE) .build(ctx) .unwrap(); // return new object Object { shape: circle, hit_box: hb, x: xpos, y: ypos, } } fn draw(&self, ctx: &mut Context) -> GameResult { let dp = graphics::DrawParam::default().dest(nalgebra::Point2::new(self.x, self.y)); graphics::draw(ctx, &self.shape, dp) } fn move_location(&mut self, xinc: f32, yinc: f32) { self.x += xinc; self.y += yinc; } } impl MyCollideTrait for Object { fn hit_box(&self) -> graphics::Rect { let mut r = self.hit_box.clone(); r.x = self.x; r.y = self.y; r } fn collision<T>(&self, other: &T) -> bool where T: MyCollideTrait, { self.hit_box().overlaps(&other.hit_box()) } } struct State { player: Object, // should really be Vec<Box<MyCollideTrait>> // but that makes this example harder walls: Vec<Object>, } impl State { fn new(ctx: &mut Context) -> State { let p = Object::new(ctx, 0.0, 0.0); let mut v = Vec::new(); v.push(Object::new(ctx, 350.0, 150.0)); v.push(Object::new(ctx, 350.0, 250.0)); v.push(Object::new(ctx, 350.0, 350.0)); State { player: p, walls: v, } } } impl event::EventHandler for State { fn update(&mut self, ctx: &mut Context) -> GameResult { let mut xmov = 0f32; let mut ymov = 0f32; if input::keyboard::is_key_pressed(ctx, event::KeyCode::Right) { xmov += 5.0; } if input::keyboard::is_key_pressed(ctx, event::KeyCode::Left) { xmov += -5.0; } if input::keyboard::is_key_pressed(ctx, event::KeyCode::Up) { ymov += -5.0; } if input::keyboard::is_key_pressed(ctx, event::KeyCode::Down) { ymov += 5.0; } self.player.move_location(xmov, ymov); for wall in &self.walls { if self.player.collision(wall) { self.player.move_location(-xmov, -ymov); } } //println!("{:?}", self.player.hit_box()); Ok(()) } fn draw(&mut self, ctx: &mut Context) -> GameResult { graphics::clear(ctx, graphics::BLACK); for wall in &self.walls { wall.draw(ctx)?; } self.player.draw(ctx)?; graphics::present(ctx)?; timer::yield_now(); Ok(()) } } fn main() { // create context let (ctx, event_loop) = &mut ContextBuilder::new("collisions", "people") .window_setup(conf::WindowSetup::default().title("Collision Detection")) .build() .unwrap(); // create state and game loop let state = &mut State::new(ctx); // run loop match event::run(ctx, event_loop, state) { Ok(_) => println!("Clean loop exit"), Err(e) => println!("Error loop exit {}", e), }; println!("Goodbye!"); }
27.272727
92
0.520238
48b8f5186cf1be3a658e6a74c7871493d823d94a
1,205
// This file is part of mlnx-ofed. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/mlnx-ofed/master/COPYRIGHT. No part of mlnx-ofed, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file. // Copyright © 2016 The developers of mlnx-ofed. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/mlnx-ofed/master/COPYRIGHT. #[repr(C)] pub struct ibv_flow_spec__bindgen_ty_1 { pub hdr: __BindgenUnionField<ibv_flow_spec__bindgen_ty_1__bindgen_ty_1>, pub eth: __BindgenUnionField<ibv_flow_spec_eth>, pub ipv4: __BindgenUnionField<ibv_flow_spec_ipv4>, pub tcp_udp: __BindgenUnionField<ibv_flow_spec_tcp_udp>, pub bindgen_union_field: [u32; 10usize], } impl Default for ibv_flow_spec__bindgen_ty_1 { #[inline(always)] fn default() -> Self { unsafe { zeroed() } } } impl Debug for ibv_flow_spec__bindgen_ty_1 { #[inline(always)] fn fmt(&self, f: &mut Formatter) -> Result { write!(f, "ibv_flow_spec__bindgen_ty_1 {{ union }}") } }
37.65625
382
0.777593
39b4c77e099833fceeed0881db71d02da90593ba
140
pub use self::cx::FunctionContextArgExt; pub use self::eh::EventHandlerExt; pub use self::object::JSObjectExt; mod cx; mod eh; mod object;
17.5
40
0.757143
6a3f0fbd4eebd8d8322b8244bc10682c3fe21605
279
//! <https://www.codewars.com/kata/569b5cec755dd3534d00000f/train/rust> pub fn new_avg(arr: &[f64], newavg: f64) -> Option<i32> { match newavg * (arr.len() + 1) as f64 - arr.iter().sum::<f64>() { n if n > 0. => Some((n + 0.99999) as i32), _ => None, } }
31
71
0.551971
ef1257298698d89173049b614c1006c3559cd326
1,705
use hyper::{body::Body, Client}; use opentelemetry::{ global, sdk::export::trace::stdout, sdk::trace as sdktrace, trace::{TraceContextExt, Tracer}, Context, KeyValue, }; use opentelemetry_aws::XrayPropagator; use opentelemetry_http::HeaderInjector; fn init_tracer() -> sdktrace::Tracer { global::set_text_map_propagator(XrayPropagator::new()); // Install stdout exporter pipeline to be able to retrieve the collected spans. // For the demonstration, use `Sampler::AlwaysOn` sampler to sample all traces. In a production // application, use `Sampler::ParentBased` or `Sampler::TraceIdRatioBased` with a desired ratio. stdout::new_pipeline() .with_trace_config( sdktrace::config() .with_sampler(sdktrace::Sampler::AlwaysOn) .with_id_generator(sdktrace::XrayIdGenerator::default()), ) .install_simple() } #[tokio::main] async fn main() -> std::result::Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> { let _tracer = init_tracer(); let client = Client::new(); let span = global::tracer("example/client").start("say hello"); let cx = Context::current_with_span(span); let mut req = hyper::Request::builder().uri("http://127.0.0.1:3000"); global::get_text_map_propagator(|propagator| { propagator.inject_context(&cx, &mut HeaderInjector(req.headers_mut().unwrap())); println!("Headers: {:?}", req.headers_ref()); }); let res = client.request(req.body(Body::from("Hallo!"))?).await?; cx.span().add_event( "Got response!".to_string(), vec![KeyValue::new("status", res.status().to_string())], ); Ok(()) }
32.788462
100
0.646334
d96b3f6f224cfb35c10c0d6ef99052bdcd875e2e
861
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // FIXME(#13725) windows needs fixing. // ignore-win32 // ignore-stage1 #![feature(phase)] extern crate regex; #[phase(plugin)] extern crate regex_macros; #[deny(unused_variable)] #[deny(dead_code)] // Tests to make sure that extraneous dead code warnings aren't emitted from // the code generated by regex!. fn main() { let fubar = regex!("abc"); //~ ERROR unused variable: `fubar` }
29.689655
76
0.723577
11a9407980e448659d9eece15d4d150be9e18348
243
#![feature(thread_local, unsafe_destructor)] #![crate_name = "hamt"] #![crate_type = "lib"] #![license = "MIT"] #![experimental] //! A Concurrent, persistent, wait-free, non-blocking, hash map array trie. pub mod hamt; pub mod hp; mod bits;
20.25
75
0.683128
fcc9bfd4dd991c41c11fe63d394b341e8b7c28d2
3,288
#[doc = "Register `PRO_LEDC_INT_MAP` reader"] pub struct R(crate::R<PRO_LEDC_INT_MAP_SPEC>); impl core::ops::Deref for R { type Target = crate::R<PRO_LEDC_INT_MAP_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<PRO_LEDC_INT_MAP_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<PRO_LEDC_INT_MAP_SPEC>) -> Self { R(reader) } } #[doc = "Register `PRO_LEDC_INT_MAP` writer"] pub struct W(crate::W<PRO_LEDC_INT_MAP_SPEC>); impl core::ops::Deref for W { type Target = crate::W<PRO_LEDC_INT_MAP_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<PRO_LEDC_INT_MAP_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<PRO_LEDC_INT_MAP_SPEC>) -> Self { W(writer) } } #[doc = "Field `PRO_LEDC_INT_MAP` reader - "] pub struct PRO_LEDC_INT_MAP_R(crate::FieldReader<u8, u8>); impl PRO_LEDC_INT_MAP_R { #[inline(always)] pub(crate) fn new(bits: u8) -> Self { PRO_LEDC_INT_MAP_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PRO_LEDC_INT_MAP_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PRO_LEDC_INT_MAP` writer - "] pub struct PRO_LEDC_INT_MAP_W<'a> { w: &'a mut W, } impl<'a> PRO_LEDC_INT_MAP_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0x1f) | (value as u32 & 0x1f); self.w } } impl R { #[doc = "Bits 0:4"] #[inline(always)] pub fn pro_ledc_int_map(&self) -> PRO_LEDC_INT_MAP_R { PRO_LEDC_INT_MAP_R::new((self.bits & 0x1f) as u8) } } impl W { #[doc = "Bits 0:4"] #[inline(always)] pub fn pro_ledc_int_map(&mut self) -> PRO_LEDC_INT_MAP_W { PRO_LEDC_INT_MAP_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [pro_ledc_int_map](index.html) module"] pub struct PRO_LEDC_INT_MAP_SPEC; impl crate::RegisterSpec for PRO_LEDC_INT_MAP_SPEC { type Ux = u32; } #[doc = "`read()` method returns [pro_ledc_int_map::R](R) reader structure"] impl crate::Readable for PRO_LEDC_INT_MAP_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [pro_ledc_int_map::W](W) writer structure"] impl crate::Writable for PRO_LEDC_INT_MAP_SPEC { type Writer = W; } #[doc = "`reset()` method sets PRO_LEDC_INT_MAP to value 0x10"] impl crate::Resettable for PRO_LEDC_INT_MAP_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0x10 } }
31.615385
397
0.631995
1833228b2a9525fbd9e467077ba98bfc184d1fd4
476
use serde::{ Deserialize, Serialize, }; use ts_rs::TS; #[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, TS)] #[ts(export, export_to = "ui/src/server_types/Availability.ts")] #[serde(rename_all = "camelCase")] pub enum Availability { Unavailable, GlitchPossible, Possible, GlitchAgahnim, Agahnim, GlitchAvailable, Available, } impl Default for Availability { fn default() -> Availability { Availability::Unavailable } }
20.695652
68
0.686975
28d19cea868f5aca280656b49af17861845ee6bb
11,018
#[doc = "Register `PLL_PERI_PAT0_CTRL` reader"] pub struct R(crate::R<PLL_PERI_PAT0_CTRL_SPEC>); impl core::ops::Deref for R { type Target = crate::R<PLL_PERI_PAT0_CTRL_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<PLL_PERI_PAT0_CTRL_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<PLL_PERI_PAT0_CTRL_SPEC>) -> Self { R(reader) } } #[doc = "Register `PLL_PERI_PAT0_CTRL` writer"] pub struct W(crate::W<PLL_PERI_PAT0_CTRL_SPEC>); impl core::ops::Deref for W { type Target = crate::W<PLL_PERI_PAT0_CTRL_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<PLL_PERI_PAT0_CTRL_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<PLL_PERI_PAT0_CTRL_SPEC>) -> Self { W(writer) } } #[doc = "Field `SIG_DELT_PAT_EN` reader - Sigma-Delta Pattern Enable"] pub type SIG_DELT_PAT_EN_R = crate::BitReader<bool>; #[doc = "Field `SIG_DELT_PAT_EN` writer - Sigma-Delta Pattern Enable"] pub type SIG_DELT_PAT_EN_W<'a> = crate::BitWriter<'a, u32, PLL_PERI_PAT0_CTRL_SPEC, bool, 31>; #[doc = "Spread Frequency Mode\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum SPR_FREQ_MODE_A { #[doc = "0: `0`"] DC0 = 0, #[doc = "1: `1`"] DC1 = 1, #[doc = "2: `10`"] TRIANGULAR_1 = 2, #[doc = "3: `11`"] TRIANGULAR_N = 3, } impl From<SPR_FREQ_MODE_A> for u8 { #[inline(always)] fn from(variant: SPR_FREQ_MODE_A) -> Self { variant as _ } } #[doc = "Field `SPR_FREQ_MODE` reader - Spread Frequency Mode"] pub type SPR_FREQ_MODE_R = crate::FieldReader<u8, SPR_FREQ_MODE_A>; impl SPR_FREQ_MODE_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> SPR_FREQ_MODE_A { match self.bits { 0 => SPR_FREQ_MODE_A::DC0, 1 => SPR_FREQ_MODE_A::DC1, 2 => SPR_FREQ_MODE_A::TRIANGULAR_1, 3 => SPR_FREQ_MODE_A::TRIANGULAR_N, _ => unreachable!(), } } #[doc = "Checks if the value of the field is `DC0`"] #[inline(always)] pub fn is_dc0(&self) -> bool { *self == SPR_FREQ_MODE_A::DC0 } #[doc = "Checks if the value of the field is `DC1`"] #[inline(always)] pub fn is_dc1(&self) -> bool { *self == SPR_FREQ_MODE_A::DC1 } #[doc = "Checks if the value of the field is `TRIANGULAR_1`"] #[inline(always)] pub fn is_triangular_1(&self) -> bool { *self == SPR_FREQ_MODE_A::TRIANGULAR_1 } #[doc = "Checks if the value of the field is `TRIANGULAR_N`"] #[inline(always)] pub fn is_triangular_n(&self) -> bool { *self == SPR_FREQ_MODE_A::TRIANGULAR_N } } #[doc = "Field `SPR_FREQ_MODE` writer - Spread Frequency Mode"] pub type SPR_FREQ_MODE_W<'a> = crate::FieldWriterSafe<'a, u32, PLL_PERI_PAT0_CTRL_SPEC, u8, SPR_FREQ_MODE_A, 2, 29>; impl<'a> SPR_FREQ_MODE_W<'a> { #[doc = "`0`"] #[inline(always)] pub fn dc0(self) -> &'a mut W { self.variant(SPR_FREQ_MODE_A::DC0) } #[doc = "`1`"] #[inline(always)] pub fn dc1(self) -> &'a mut W { self.variant(SPR_FREQ_MODE_A::DC1) } #[doc = "`10`"] #[inline(always)] pub fn triangular_1(self) -> &'a mut W { self.variant(SPR_FREQ_MODE_A::TRIANGULAR_1) } #[doc = "`11`"] #[inline(always)] pub fn triangular_n(self) -> &'a mut W { self.variant(SPR_FREQ_MODE_A::TRIANGULAR_N) } } #[doc = "Field `WAVE_STEP` reader - Wave Step"] pub type WAVE_STEP_R = crate::FieldReader<u16, u16>; #[doc = "Field `WAVE_STEP` writer - Wave Step"] pub type WAVE_STEP_W<'a> = crate::FieldWriter<'a, u32, PLL_PERI_PAT0_CTRL_SPEC, u16, u16, 9, 20>; #[doc = "SDM Clock Select\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SDM_CLK_SEL_A { #[doc = "0: `0`"] F_24_M = 0, #[doc = "1: `1`"] F_12_M = 1, } impl From<SDM_CLK_SEL_A> for bool { #[inline(always)] fn from(variant: SDM_CLK_SEL_A) -> Self { variant as u8 != 0 } } #[doc = "Field `SDM_CLK_SEL` reader - SDM Clock Select"] pub type SDM_CLK_SEL_R = crate::BitReader<SDM_CLK_SEL_A>; impl SDM_CLK_SEL_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> SDM_CLK_SEL_A { match self.bits { false => SDM_CLK_SEL_A::F_24_M, true => SDM_CLK_SEL_A::F_12_M, } } #[doc = "Checks if the value of the field is `F_24_M`"] #[inline(always)] pub fn is_f_24_m(&self) -> bool { *self == SDM_CLK_SEL_A::F_24_M } #[doc = "Checks if the value of the field is `F_12_M`"] #[inline(always)] pub fn is_f_12_m(&self) -> bool { *self == SDM_CLK_SEL_A::F_12_M } } #[doc = "Field `SDM_CLK_SEL` writer - SDM Clock Select"] pub type SDM_CLK_SEL_W<'a> = crate::BitWriter<'a, u32, PLL_PERI_PAT0_CTRL_SPEC, SDM_CLK_SEL_A, 19>; impl<'a> SDM_CLK_SEL_W<'a> { #[doc = "`0`"] #[inline(always)] pub fn f_24_m(self) -> &'a mut W { self.variant(SDM_CLK_SEL_A::F_24_M) } #[doc = "`1`"] #[inline(always)] pub fn f_12_m(self) -> &'a mut W { self.variant(SDM_CLK_SEL_A::F_12_M) } } #[doc = "Frequency\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum FREQ_A { #[doc = "0: `0`"] F_31_5_K = 0, #[doc = "1: `1`"] F_32_K = 1, #[doc = "2: `10`"] F_32_5_K = 2, #[doc = "3: `11`"] F_33_K = 3, } impl From<FREQ_A> for u8 { #[inline(always)] fn from(variant: FREQ_A) -> Self { variant as _ } } #[doc = "Field `FREQ` reader - Frequency"] pub type FREQ_R = crate::FieldReader<u8, FREQ_A>; impl FREQ_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FREQ_A { match self.bits { 0 => FREQ_A::F_31_5_K, 1 => FREQ_A::F_32_K, 2 => FREQ_A::F_32_5_K, 3 => FREQ_A::F_33_K, _ => unreachable!(), } } #[doc = "Checks if the value of the field is `F_31_5_K`"] #[inline(always)] pub fn is_f_31_5_k(&self) -> bool { *self == FREQ_A::F_31_5_K } #[doc = "Checks if the value of the field is `F_32_K`"] #[inline(always)] pub fn is_f_32_k(&self) -> bool { *self == FREQ_A::F_32_K } #[doc = "Checks if the value of the field is `F_32_5_K`"] #[inline(always)] pub fn is_f_32_5_k(&self) -> bool { *self == FREQ_A::F_32_5_K } #[doc = "Checks if the value of the field is `F_33_K`"] #[inline(always)] pub fn is_f_33_k(&self) -> bool { *self == FREQ_A::F_33_K } } #[doc = "Field `FREQ` writer - Frequency"] pub type FREQ_W<'a> = crate::FieldWriterSafe<'a, u32, PLL_PERI_PAT0_CTRL_SPEC, u8, FREQ_A, 2, 17>; impl<'a> FREQ_W<'a> { #[doc = "`0`"] #[inline(always)] pub fn f_31_5_k(self) -> &'a mut W { self.variant(FREQ_A::F_31_5_K) } #[doc = "`1`"] #[inline(always)] pub fn f_32_k(self) -> &'a mut W { self.variant(FREQ_A::F_32_K) } #[doc = "`10`"] #[inline(always)] pub fn f_32_5_k(self) -> &'a mut W { self.variant(FREQ_A::F_32_5_K) } #[doc = "`11`"] #[inline(always)] pub fn f_33_k(self) -> &'a mut W { self.variant(FREQ_A::F_33_K) } } #[doc = "Field `WAVE_BOT` reader - Wave Bottom"] pub type WAVE_BOT_R = crate::FieldReader<u32, u32>; #[doc = "Field `WAVE_BOT` writer - Wave Bottom"] pub type WAVE_BOT_W<'a> = crate::FieldWriter<'a, u32, PLL_PERI_PAT0_CTRL_SPEC, u32, u32, 17, 0>; impl R { #[doc = "Bit 31 - Sigma-Delta Pattern Enable"] #[inline(always)] pub fn sig_delt_pat_en(&self) -> SIG_DELT_PAT_EN_R { SIG_DELT_PAT_EN_R::new(((self.bits >> 31) & 1) != 0) } #[doc = "Bits 29:30 - Spread Frequency Mode"] #[inline(always)] pub fn spr_freq_mode(&self) -> SPR_FREQ_MODE_R { SPR_FREQ_MODE_R::new(((self.bits >> 29) & 3) as u8) } #[doc = "Bits 20:28 - Wave Step"] #[inline(always)] pub fn wave_step(&self) -> WAVE_STEP_R { WAVE_STEP_R::new(((self.bits >> 20) & 0x01ff) as u16) } #[doc = "Bit 19 - SDM Clock Select"] #[inline(always)] pub fn sdm_clk_sel(&self) -> SDM_CLK_SEL_R { SDM_CLK_SEL_R::new(((self.bits >> 19) & 1) != 0) } #[doc = "Bits 17:18 - Frequency"] #[inline(always)] pub fn freq(&self) -> FREQ_R { FREQ_R::new(((self.bits >> 17) & 3) as u8) } #[doc = "Bits 0:16 - Wave Bottom"] #[inline(always)] pub fn wave_bot(&self) -> WAVE_BOT_R { WAVE_BOT_R::new((self.bits & 0x0001_ffff) as u32) } } impl W { #[doc = "Bit 31 - Sigma-Delta Pattern Enable"] #[inline(always)] pub fn sig_delt_pat_en(&mut self) -> SIG_DELT_PAT_EN_W { SIG_DELT_PAT_EN_W::new(self) } #[doc = "Bits 29:30 - Spread Frequency Mode"] #[inline(always)] pub fn spr_freq_mode(&mut self) -> SPR_FREQ_MODE_W { SPR_FREQ_MODE_W::new(self) } #[doc = "Bits 20:28 - Wave Step"] #[inline(always)] pub fn wave_step(&mut self) -> WAVE_STEP_W { WAVE_STEP_W::new(self) } #[doc = "Bit 19 - SDM Clock Select"] #[inline(always)] pub fn sdm_clk_sel(&mut self) -> SDM_CLK_SEL_W { SDM_CLK_SEL_W::new(self) } #[doc = "Bits 17:18 - Frequency"] #[inline(always)] pub fn freq(&mut self) -> FREQ_W { FREQ_W::new(self) } #[doc = "Bits 0:16 - Wave Bottom"] #[inline(always)] pub fn wave_bot(&mut self) -> WAVE_BOT_W { WAVE_BOT_W::new(self) } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "PLL_PERI Pattern0 Control Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [pll_peri_pat0_ctrl](index.html) module"] pub struct PLL_PERI_PAT0_CTRL_SPEC; impl crate::RegisterSpec for PLL_PERI_PAT0_CTRL_SPEC { type Ux = u32; } #[doc = "`read()` method returns [pll_peri_pat0_ctrl::R](R) reader structure"] impl crate::Readable for PLL_PERI_PAT0_CTRL_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [pll_peri_pat0_ctrl::W](W) writer structure"] impl crate::Writable for PLL_PERI_PAT0_CTRL_SPEC { type Writer = W; } #[doc = "`reset()` method sets PLL_PERI_PAT0_CTRL to value 0"] impl crate::Resettable for PLL_PERI_PAT0_CTRL_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
31.843931
433
0.594209
f9b70360aa14626b13130abe1a584321150963d7
4,739
// Kaleidoscope: RGB command-line wallet utility // Written in 2019-2020 by // Dr. Maxim Orlovsky <[email protected]> // Alekos Filini <[email protected]> // // To the extent possible under law, the author(s) have dedicated all // copyright and related and neighboring rights to this software to // the public domain worldwide. This software is distributed without // any warranty. // // You should have received a copy of the MIT License // along with this software. // If not, see <https://opensource.org/licenses/MIT>. use clap::Clap; use regex::Regex; use serde::{Deserialize, Serialize}; use std::io; use std::path::PathBuf; use bitcoin::hashes::hex::FromHex; use bitcoin::TxIn; use lnpbp::bitcoin; use lnpbp::bp; use lnpbp::rgb::prelude::*; use crate::fungible::Outcoins; use crate::util::SealSpec; use lnpbp::strict_encoding::{Error, StrictDecode}; #[derive(Clap, Clone, PartialEq, Serialize, Deserialize, Debug, Display)] #[display_from(Debug)] pub struct Issue { /// Limit for the total supply; ignored if the asset can't be inflated #[clap(short, long)] pub supply: Option<f32>, /// Enables secondary issuance/inflation; takes UTXO seal definition /// as its value #[clap(short, long, requires("supply"))] pub inflatable: Option<SealSpec>, /// Precision, i.e. number of digits reserved for fractional part #[clap(short, long, default_value = "0")] pub precision: u8, /// Dust limit for asset transfers; defaults to no limit #[clap(short = "D", long)] pub dust_limit: Option<Amount>, /// Filename to export asset genesis to; /// saves into data dir if not provided #[clap(short, long)] pub output: Option<PathBuf>, /// Asset ticker #[clap(validator=ticker_validator)] pub ticker: String, /// Asset title pub title: String, /// Asset description #[clap(short, long)] pub description: Option<String>, /// Asset allocation, in form of <amount>@<txid>:<vout> #[clap(required = true)] pub allocate: Vec<Outcoins>, } impl StrictDecode for Issue { type Error = Error; fn strict_decode<D: io::Read>(d: D) -> Result<Self, Self::Error> { unimplemented!() } } #[derive(Clap, Clone, PartialEq, Debug, Display)] #[display_from(Debug)] pub struct Transfer { /// Use custom commitment output for generated witness transaction #[clap(long)] pub commit_txout: Option<Output>, /// Adds output(s) to generated witness transaction #[clap(long)] pub txout: Vec<Output>, /// Adds input(s) to generated witness transaction #[clap(long)] pub txin: Vec<Input>, /// Allocates other assets to custom outputs #[clap(short, long)] pub allocate: Vec<Outcoins>, /// Saves witness transaction to a file instead of publishing it #[clap(short, long)] pub transaction: Option<PathBuf>, /// Saves proof data to a file instead of sending it to the remote party #[clap(short, long)] pub proof: Option<PathBuf>, /// Amount pub amount: Amount, /// Assets #[clap(parse(try_from_str=ContractId::from_hex))] pub contract_id: ContractId, /// Receiver #[clap(parse(try_from_str=bp::blind::OutpointHash::from_hex))] pub receiver: bp::blind::OutpointHash, // / Invoice to pay //pub invoice: fungible::Invoice, } fn ticker_validator(name: &str) -> Result<(), String> { let re = Regex::new(r"^[A-Z]{3,8}$").expect("Regex parse failure"); if !re.is_match(&name) { Err( "Ticker name must be between 2 and 8 chars, contain no spaces and \ consist only of capital letters\ " .to_string(), ) } else { Ok(()) } } // Helper data structures mod helpers { use super::*; use core::str::FromStr; /// Defines information required to generate bitcoin transaction output from /// command-line argument #[derive(Clone, PartialEq, Debug, Display)] #[display_from(Debug)] pub struct Output { pub amount: bitcoin::Amount, pub lock: bp::LockScript, } impl FromStr for Output { type Err = String; fn from_str(s: &str) -> Result<Self, Self::Err> { unimplemented!() } } /// Defines information required to generate bitcoin transaction input from /// command-line argument #[derive(Clone, PartialEq, Debug, Display)] #[display_from(Debug)] pub struct Input { pub txin: TxIn, pub unlock: bp::LockScript, } impl FromStr for Input { type Err = String; fn from_str(s: &str) -> Result<Self, Self::Err> { unimplemented!() } } } pub use helpers::*;
27.08
80
0.639797
79226a4541c5c082a37e5c1cd977c6218257b974
1,035
use serenity::model::user; use std::sync::Arc; use super::DiscordService; use crate::services::{User, UserId}; pub struct DiscordUser { user: user::User, service: Arc<DiscordService>, name: String, nick: String, avatar: Option<String>, } impl DiscordUser { pub fn new(user: user::User, service: Arc<DiscordService>) -> DiscordUser { DiscordUser { name: format!("{}#{:04}", user.name, user.discriminator), nick: user.name.clone(), avatar: user.avatar_url(), user, service, } } } impl User<DiscordService> for DiscordUser { fn id(&self) -> UserId { UserId::Discord(self.user.id.0) } fn name(&self) -> &str { &self.name } fn nick(&self) -> &str { &self.nick } fn avatar(&self) -> &Option<String> { &self.avatar } fn bot(&self) -> Option<bool> { Some(self.user.bot) } fn service(&self) -> &Arc<DiscordService> { &self.service } }
19.903846
79
0.54686
d6e2920f9a6ba31e1cebd591e7865f0727b710b8
3,585
use super::error::CompileError; use crate::{ast::*, debug::SourceInformation}; use std::{collections::HashMap, sync::Arc}; pub struct GlobalNameValidator {} impl GlobalNameValidator { pub fn new() -> Self { Self {} } pub fn validate(&self, module: &Module) -> Result<(), CompileError> { let mut names = HashMap::<&str, Arc<SourceInformation>>::new(); for declaration in module.import_foreigns() { if let Some(source_information) = names.get(declaration.name()) { return Err(CompileError::DuplicateNames( source_information.clone(), declaration.source_information().clone(), )); } names.insert(declaration.name(), declaration.source_information().clone()); } for definition in module.definitions() { if let Some(source_information) = names.get(definition.name()) { return Err(CompileError::DuplicateNames( source_information.clone(), definition.source_information().clone(), )); } names.insert(definition.name(), definition.source_information().clone()); } Ok(()) } } #[cfg(test)] mod tests { use super::*; use crate::{path::ModulePath, types}; #[test] fn validate_duplicate_names() { assert_eq!( GlobalNameValidator::new().validate(&Module::from_definitions(vec![ VariableDefinition::new( "x", Number::new(42.0, SourceInformation::dummy()), types::Number::new(SourceInformation::dummy()), SourceInformation::dummy(), ) .into(), VariableDefinition::new( "x", Number::new(42.0, SourceInformation::dummy()), types::Number::new(SourceInformation::dummy()), SourceInformation::dummy(), ) .into(), ])), Err(CompileError::DuplicateNames( SourceInformation::dummy().into(), SourceInformation::dummy().into() )) ); } #[test] fn validate_duplicate_names_with_import_foreigns() { assert_eq!( GlobalNameValidator::new().validate(&Module::new( ModulePath::dummy(), Export::new(Default::default()), ExportForeign::new(Default::default()), vec![], vec![ImportForeign::new( "foo", "foo", CallingConvention::Native, types::Function::new( types::Number::new(SourceInformation::dummy()), types::Number::new(SourceInformation::dummy()), SourceInformation::dummy() ), SourceInformation::dummy() )], vec![], vec![VariableDefinition::new( "foo", Number::new(42.0, SourceInformation::dummy()), types::Number::new(SourceInformation::dummy()), SourceInformation::dummy(), ) .into()] )), Err(CompileError::DuplicateNames( SourceInformation::dummy().into(), SourceInformation::dummy().into() )) ); } }
33.504673
87
0.48424
d9205e26342b183ed3c3ffb2a07d6c3a837745e9
908
use piston::input::Key; use entities::Entity; #[derive(Clone, Copy, Eq, PartialEq)] pub enum InputType { Pressed, Released } pub struct PlayerInput { up_key: Key, down_key: Key, velocity: f64 } impl PlayerInput { pub fn new(up_key: Key, down_key: Key, velocity: f64) -> PlayerInput { PlayerInput { up_key: up_key, down_key: down_key, velocity: velocity } } pub fn update(&self, entity: &mut Entity, key: Key, dir: InputType) { if dir == InputType::Released && (key == self.up_key || key == self.down_key) { entity.velocity = 0.0; } else if key == self.up_key { entity.velocity = self.velocity; entity.orientation = -90.0; } else if key == self.down_key { entity.velocity = self.velocity; entity.orientation = 90.0; } } }
22.7
87
0.559471
1d2e00375cf47e580074e53304dbcb72020b8409
3,452
// Copyright 2021. The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following // disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the // following disclaimer in the documentation and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use std::time::Duration; use tari_core::{consensus::NetworkConsensus, transactions::CryptoFactories}; use tari_p2p::{auto_update::AutoUpdateConfig, initialization::P2pConfig}; use crate::{ base_node_service::config::BaseNodeServiceConfig, output_manager_service::config::OutputManagerServiceConfig, transaction_service::config::TransactionServiceConfig, }; pub const KEY_MANAGER_COMMS_SECRET_KEY_BRANCH_KEY: &str = "comms"; #[derive(Clone)] pub struct WalletConfig { pub comms_config: P2pConfig, pub factories: CryptoFactories, pub transaction_service_config: Option<TransactionServiceConfig>, pub output_manager_service_config: Option<OutputManagerServiceConfig>, pub buffer_size: usize, pub rate_limit: usize, pub network: NetworkConsensus, pub base_node_service_config: BaseNodeServiceConfig, pub updater_config: Option<AutoUpdateConfig>, pub autoupdate_check_interval: Option<Duration>, } impl WalletConfig { pub fn new( comms_config: P2pConfig, factories: CryptoFactories, transaction_service_config: Option<TransactionServiceConfig>, output_manager_service_config: Option<OutputManagerServiceConfig>, network: NetworkConsensus, base_node_service_config: Option<BaseNodeServiceConfig>, buffer_size: Option<usize>, rate_limit: Option<usize>, updater_config: Option<AutoUpdateConfig>, autoupdate_check_interval: Option<Duration>, ) -> Self { Self { comms_config, factories, transaction_service_config, output_manager_service_config, buffer_size: buffer_size.unwrap_or(1500), rate_limit: rate_limit.unwrap_or(50), network, base_node_service_config: base_node_service_config.unwrap_or_default(), updater_config, autoupdate_check_interval, } } }
44.831169
118
0.749421
396861af2f9cdfdb3a91581f37d16179d991efeb
3,096
use proc_macro2::{Group, Ident, Literal, Span, TokenStream}; #[derive(Debug)] pub struct Grammar { pub doc: Option<TokenStream>, pub visibility: Option<TokenStream>, pub name: Ident, pub ty_params: Option<Vec<TokenStream>>, pub args: Vec<(Ident, TokenStream)>, pub items: Vec<Item>, pub input_type: TokenStream, } impl Grammar { pub fn iter_rules(&self) -> impl Iterator<Item = &Rule> { self.items.iter().filter_map(|item| match item { Item::Rule(r) => Some(r), _ => None, }) } } #[derive(Debug)] pub enum Item { Use(TokenStream), Rule(Rule), } #[derive(Debug)] pub struct Rule { pub span: Span, pub name: Ident, pub ty_params: Option<Vec<TokenStream>>, pub params: Vec<RuleParam>, pub expr: SpannedExpr, pub ret_type: Option<TokenStream>, pub doc: Option<TokenStream>, pub visibility: Option<TokenStream>, pub cached: bool, } #[derive(Debug)] pub struct RuleParam { pub name: Ident, pub ty: RuleParamTy, } #[derive(Debug)] pub enum RuleParamTy { Rust(TokenStream), Rule(TokenStream), } #[derive(Debug, Clone)] pub struct TaggedExpr { pub name: Option<Ident>, pub expr: SpannedExpr, } #[derive(Debug, Clone)] pub struct SpannedExpr { pub span: Span, pub expr: Expr, } #[derive(Debug, Clone)] pub enum Expr { LiteralExpr(Literal), PatternExpr(Group), RuleExpr(Ident, Vec<RuleArg>), MethodExpr(Ident, TokenStream), ChoiceExpr(Vec<SpannedExpr>), OptionalExpr(Box<SpannedExpr>), Repeat { inner: Box<SpannedExpr>, bound: BoundedRepeat, sep: Option<Box<SpannedExpr>> }, PosAssertExpr(Box<SpannedExpr>), NegAssertExpr(Box<SpannedExpr>), ActionExpr(Vec<TaggedExpr>, Option<Group>), MatchStrExpr(Box<SpannedExpr>), PositionExpr, QuietExpr(Box<SpannedExpr>), FailExpr(Literal), PrecedenceExpr { levels: Vec<PrecedenceLevel>, }, MarkerExpr(bool), } impl Expr { pub fn at(self, sp: Span) -> SpannedExpr { SpannedExpr { expr: self, span:sp } } } #[derive(Debug, Clone)] pub enum RuleArg { Rust(TokenStream), Peg(SpannedExpr), } #[derive(Debug, Clone)] pub struct PrecedenceLevel { pub operators: Vec<PrecedenceOperator>, } #[derive(Debug, Clone)] pub struct PrecedenceOperator { pub span: Span, pub elements: Vec<TaggedExpr>, pub action: Group, } #[derive(Debug, Clone)] pub enum BoundedRepeat { None, Plus, Exact(TokenStream), Both(Option<TokenStream>, Option<TokenStream>), } impl BoundedRepeat { pub fn has_lower_bound(&self) -> bool { match self { BoundedRepeat::None | BoundedRepeat::Both(None, _) => false, BoundedRepeat::Plus | BoundedRepeat::Exact(_) | BoundedRepeat::Both(Some(_), _) => true } } pub fn has_upper_bound(&self) -> bool { match self { BoundedRepeat::None | BoundedRepeat::Plus | BoundedRepeat::Both(_, None) => false, BoundedRepeat::Exact(_) | BoundedRepeat::Both(_, Some(_)) => true } } }
23.104478
99
0.636628
758a894d266925c1b4af89969d19fb058f7393f9
3,637
pub fn run() -> String { let title = "Day 2: I Was Told There Would Be No Math"; let parsed_input = parse_input(get_input()); format!( "{}\n{}\nPart one: {}\nPart two: {}", title, "=".repeat(title.len()), solve_part_one(&parsed_input), solve_part_two(&parsed_input), ) } fn get_input() -> &'static str { let input: &'static str = include_str!("../../input/2015-02.txt"); input } #[derive(PartialEq, Eq, Debug)] struct Present { length: u32, width: u32, height: u32, ordered: [u32; 3], } impl Present { fn new(length: u32, width: u32, height: u32) -> Present { let mut ordered = [length, width, height]; ordered.sort(); Present { length: length, width: width, height: height, ordered: ordered, } } fn surface_area(&self) -> u32 { (2 * self.length * self.width) + (2 * self.width * self.height) + (2 * self.height * self.length) } fn slack(&self) -> u32 { self.ordered[0] * self.ordered[1] } fn total_paper(&self) -> u32 { self.surface_area() + self.slack() } fn volume(&self) -> u32 { self.length * self.width * self.height } fn smallest_perimeter(&self) -> u32 { self.ordered[0] * 2 + self.ordered[1] * 2 } fn total_ribbon(&self) -> u32 { self.volume() + self.smallest_perimeter() } } fn parse_input(input: &str) -> Vec<Present> { input .lines() .map(|line: &str| { let split: Vec<u32> = line .split("x") .map(|num_str| num_str.parse::<u32>().expect("Parsing present failed")) .collect(); assert!(split.len() == 3); Present::new(split[0], split[1], split[2]) }) .collect() } fn solve_part_one(input: &Vec<Present>) -> u32 { input.iter().map(Present::total_paper).sum() } fn solve_part_two(input: &Vec<Present>) -> u32 { input.iter().map(Present::total_ribbon).sum() } mod tests { use super::*; #[test] fn test_parse() { assert_eq!( parse_input("1x2x3\n6x5x4\n"), vec![Present::new(1, 2, 3), Present::new(6, 5, 4),] ); } #[test] fn test_surface_area() { assert_eq!(Present::new(2, 3, 4).surface_area(), 52); assert_eq!(Present::new(1, 1, 10).surface_area(), 42); } #[test] fn test_slack() { assert_eq!(Present::new(2, 3, 4).slack(), 6); assert_eq!(Present::new(1, 1, 10).slack(), 1); } #[test] fn test_total_paper() { assert_eq!(Present::new(2, 3, 4).total_paper(), 58); assert_eq!(Present::new(1, 1, 10).total_paper(), 43); } #[test] fn test_known_part_one_result() { let parsed = parse_input(get_input()); assert_eq!(solve_part_one(&parsed), 1588178); } #[test] fn test_volume() { assert_eq!(Present::new(2, 3, 4).volume(), 24); assert_eq!(Present::new(1, 1, 10).volume(), 10); } #[test] fn test_smallest_perimeter() { assert_eq!(Present::new(2, 3, 4).smallest_perimeter(), 10); assert_eq!(Present::new(1, 1, 10).smallest_perimeter(), 4); } #[test] fn test_total_ribbon() { assert_eq!(Present::new(2, 3, 4).total_ribbon(), 34); assert_eq!(Present::new(1, 1, 10).total_ribbon(), 14); } #[test] fn test_known_part_two_result() { let parsed = parse_input(get_input()); assert_eq!(solve_part_two(&parsed), 3783758); } }
24.574324
87
0.531757
fe907ac3ade7252a8a15809df472492d3fb811c5
390
use generic; use interpreter::{consts, Interpreter, Module, ObjectToken}; pub fn register_bool_type(interpreter: &mut Interpreter, module: &mut Module) { generic::create_type_for::<bool, _>(interpreter, module, "Bool", |_, _, ty| { generic::impl_display_for::<bool>(ty); }); } define_core_creator!{create_bool, bool, "Bool"} define_into_native!{from_object, bool, "Bool"}
32.5
81
0.710256
6afd75a695083f5fb4de9280fcabae83cb838798
2,061
use std::io::stdout; use std::time::Duration; use std::io::Stdout; use crossterm::{ event::{poll, read, Event, KeyCode}, terminal::{self}, Result, }; mod line_buffer; mod engine; use engine::{print_message, Engine}; // this fn is totally ripped off from crossterm's examples // it's really a diagnostic routine to see if crossterm is // even seeing the events. if you press a key and no events // are printed, it's a good chance your terminal is eating // those events. fn print_events(stdout: &mut Stdout) -> Result<()> { loop { // Wait up to 5s for another event if poll(Duration::from_millis(5_000))? { // It's guaranteed that read() wont block if `poll` returns `Ok(true)` let event = read()?; // just reuse the print_message fn to show events print_message(stdout, &format!("Event::{:?}", event))?; // hit the esc key to git out if event == Event::Key(KeyCode::Esc.into()) { break; } } else { // Timeout expired, no event for 5s print_message(stdout, "Waiting for you to type...")?; } } Ok(()) } fn main() -> Result<()> { let mut stdout = stdout(); terminal::enable_raw_mode()?; // quick command like parameter handling let args: Vec<String> = std::env::args().collect(); // if -k is passed, show the events if args.len() > 1 && args[1] == "-k" { print_message(&mut stdout, "Ready to print events:")?; print_events(&mut stdout)?; terminal::disable_raw_mode()?; println!(); return Ok(()); }; let mut engine = Engine::new(); loop { if let Ok(buffer) = engine.read_line(&mut stdout) { if buffer.trim() == "exit" { break; } if !buffer.trim().is_empty() { print_message(&mut stdout, &format!("Our buffer: {}", buffer))?; } } } terminal::disable_raw_mode()?; println!(); Ok(()) }
26.423077
82
0.549733
1c8763902ba9e0151eaa0f905d6fc39e5f1784ca
737
/* * OpenAPI Petstore * * This is a sample server Petstore server. For this sample, you can use the api key `special-key` to test the authorization filters. * * The version of the OpenAPI document: 1.0.0 * * Generated by: https://openapi-generator.tech */ /// Category : A category for a pet #[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct Category { #[serde(rename = "id", skip_serializing_if = "Option::is_none")] pub id: Option<i64>, #[serde(rename = "name", skip_serializing_if = "Option::is_none")] pub name: Option<String>, } impl Category { /// A category for a pet pub fn new() -> Category { Category { id: None, name: None, } } }
22.333333
133
0.622795
fecc44b66e363a481edaceedebede3c7a6dd4109
695
use xactor::*; /// Define `Ping` message #[message(result = "usize")] struct Ping(usize); /// Actor struct MyActor { count: usize, } /// Declare actor and its context impl Actor for MyActor {} /// Handler for `Ping` message #[async_trait::async_trait] impl Handler<Ping> for MyActor { async fn handle(&mut self, _ctx: &mut Context<Self>, msg: Ping) -> usize { self.count += msg.0; self.count } } #[xactor::main] async fn main() -> Result<()> { // start new actor let addr = MyActor { count: 10 }.start().await?; // send message and get future for result let res = addr.call(Ping(10)).await?; println!("RESULT: {}", res == 20); Ok(()) }
19.857143
78
0.602878
3a1b797eeb46107fe7108ef7c47f4f04328fea3b
16,404
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! C definitions used by libnative that don't belong in liblibc #![allow(bad_style, dead_code, overflowing_literals)] use libc; pub use self::GET_FILEEX_INFO_LEVELS::*; pub use self::FILE_INFO_BY_HANDLE_CLASS::*; pub use libc::consts::os::extra::{ FILE_ATTRIBUTE_READONLY, FILE_ATTRIBUTE_DIRECTORY, WSAPROTOCOL_LEN, }; pub use libc::types::os::arch::extra::{GROUP, GUID, WSAPROTOCOLCHAIN}; pub const WSADESCRIPTION_LEN: usize = 256; pub const WSASYS_STATUS_LEN: usize = 128; pub const FIONBIO: libc::c_long = 0x8004667e; pub const FD_SETSIZE: usize = 64; pub const MSG_DONTWAIT: libc::c_int = 0; pub const ERROR_ILLEGAL_CHARACTER: libc::c_int = 582; pub const ENABLE_ECHO_INPUT: libc::DWORD = 0x4; pub const ENABLE_EXTENDED_FLAGS: libc::DWORD = 0x80; pub const ENABLE_INSERT_MODE: libc::DWORD = 0x20; pub const ENABLE_LINE_INPUT: libc::DWORD = 0x2; pub const ENABLE_PROCESSED_INPUT: libc::DWORD = 0x1; pub const ENABLE_QUICK_EDIT_MODE: libc::DWORD = 0x40; pub const WSA_INVALID_EVENT: WSAEVENT = 0 as WSAEVENT; pub const FD_ACCEPT: libc::c_long = 0x08; pub const FD_MAX_EVENTS: usize = 10; pub const WSA_INFINITE: libc::DWORD = libc::INFINITE; pub const WSA_WAIT_TIMEOUT: libc::DWORD = libc::consts::os::extra::WAIT_TIMEOUT; pub const WSA_WAIT_EVENT_0: libc::DWORD = libc::consts::os::extra::WAIT_OBJECT_0; pub const WSA_WAIT_FAILED: libc::DWORD = libc::consts::os::extra::WAIT_FAILED; pub const WSAESHUTDOWN: libc::c_int = 10058; pub const ERROR_NO_MORE_FILES: libc::DWORD = 18; pub const TOKEN_READ: libc::DWORD = 0x20008; #[repr(C)] #[cfg(target_arch = "x86")] pub struct WSADATA { pub wVersion: libc::WORD, pub wHighVersion: libc::WORD, pub szDescription: [u8; WSADESCRIPTION_LEN + 1], pub szSystemStatus: [u8; WSASYS_STATUS_LEN + 1], pub iMaxSockets: u16, pub iMaxUdpDg: u16, pub lpVendorInfo: *mut u8, } #[repr(C)] #[cfg(target_arch = "x86_64")] pub struct WSADATA { pub wVersion: libc::WORD, pub wHighVersion: libc::WORD, pub iMaxSockets: u16, pub iMaxUdpDg: u16, pub lpVendorInfo: *mut u8, pub szDescription: [u8; WSADESCRIPTION_LEN + 1], pub szSystemStatus: [u8; WSASYS_STATUS_LEN + 1], } pub type LPWSADATA = *mut WSADATA; #[repr(C)] pub struct WSANETWORKEVENTS { pub lNetworkEvents: libc::c_long, pub iErrorCode: [libc::c_int; FD_MAX_EVENTS], } pub type LPWSANETWORKEVENTS = *mut WSANETWORKEVENTS; pub type WSAEVENT = libc::HANDLE; #[repr(C)] #[derive(Copy)] pub struct WSAPROTOCOL_INFO { pub dwServiceFlags1: libc::DWORD, pub dwServiceFlags2: libc::DWORD, pub dwServiceFlags3: libc::DWORD, pub dwServiceFlags4: libc::DWORD, pub dwProviderFlags: libc::DWORD, pub ProviderId: GUID, pub dwCatalogEntryId: libc::DWORD, pub ProtocolChain: WSAPROTOCOLCHAIN, pub iVersion: libc::c_int, pub iAddressFamily: libc::c_int, pub iMaxSockAddr: libc::c_int, pub iMinSockAddr: libc::c_int, pub iSocketType: libc::c_int, pub iProtocol: libc::c_int, pub iProtocolMaxOffset: libc::c_int, pub iNetworkByteOrder: libc::c_int, pub iSecurityScheme: libc::c_int, pub dwMessageSize: libc::DWORD, pub dwProviderReserved: libc::DWORD, pub szProtocol: [u16; (WSAPROTOCOL_LEN as usize) + 1us], } pub type LPWSAPROTOCOL_INFO = *mut WSAPROTOCOL_INFO; #[repr(C)] pub struct fd_set { fd_count: libc::c_uint, fd_array: [libc::SOCKET; FD_SETSIZE], } pub fn fd_set(set: &mut fd_set, s: libc::SOCKET) { set.fd_array[set.fd_count as usize] = s; set.fd_count += 1; } pub type SHORT = libc::c_short; #[repr(C)] pub struct COORD { pub X: SHORT, pub Y: SHORT, } #[repr(C)] pub struct SMALL_RECT { pub Left: SHORT, pub Top: SHORT, pub Right: SHORT, pub Bottom: SHORT, } #[repr(C)] pub struct CONSOLE_SCREEN_BUFFER_INFO { pub dwSize: COORD, pub dwCursorPosition: COORD, pub wAttributes: libc::WORD, pub srWindow: SMALL_RECT, pub dwMaximumWindowSize: COORD, } pub type PCONSOLE_SCREEN_BUFFER_INFO = *mut CONSOLE_SCREEN_BUFFER_INFO; #[repr(C)] pub struct WIN32_FILE_ATTRIBUTE_DATA { pub dwFileAttributes: libc::DWORD, pub ftCreationTime: libc::FILETIME, pub ftLastAccessTime: libc::FILETIME, pub ftLastWriteTime: libc::FILETIME, pub nFileSizeHigh: libc::DWORD, pub nFileSizeLow: libc::DWORD, } #[repr(C)] pub struct BY_HANDLE_FILE_INFORMATION { pub dwFileAttributes: libc::DWORD, pub ftCreationTime: libc::FILETIME, pub ftLastAccessTime: libc::FILETIME, pub ftLastWriteTime: libc::FILETIME, pub dwVolumeSerialNumber: libc::DWORD, pub nFileSizeHigh: libc::DWORD, pub nFileSizeLow: libc::DWORD, pub nNumberOfLinks: libc::DWORD, pub nFileIndexHigh: libc::DWORD, pub nFileIndexLow: libc::DWORD, } pub type LPBY_HANDLE_FILE_INFORMATION = *mut BY_HANDLE_FILE_INFORMATION; #[repr(C)] pub enum GET_FILEEX_INFO_LEVELS { GetFileExInfoStandard, GetFileExMaxInfoLevel } #[repr(C)] pub enum FILE_INFO_BY_HANDLE_CLASS { FileBasicInfo = 0, FileStandardInfo = 1, FileNameInfo = 2, FileRenameInfo = 3, FileDispositionInfo = 4, FileAllocationInfo = 5, FileEndOfFileInfo = 6, FileStreamInfo = 7, FileCompressionInfo = 8, FileAttributeTagInfo = 9, FileIdBothDirectoryInfo = 10, // 0xA FileIdBothDirectoryRestartInfo = 11, // 0xB FileIoPriorityHintInfo = 12, // 0xC FileRemoteProtocolInfo = 13, // 0xD FileFullDirectoryInfo = 14, // 0xE FileFullDirectoryRestartInfo = 15, // 0xF FileStorageInfo = 16, // 0x10 FileAlignmentInfo = 17, // 0x11 FileIdInfo = 18, // 0x12 FileIdExtdDirectoryInfo = 19, // 0x13 FileIdExtdDirectoryRestartInfo = 20, // 0x14 MaximumFileInfoByHandlesClass } #[repr(C)] pub struct FILE_END_OF_FILE_INFO { pub EndOfFile: libc::LARGE_INTEGER, } #[link(name = "ws2_32")] extern "system" { pub fn WSAStartup(wVersionRequested: libc::WORD, lpWSAData: LPWSADATA) -> libc::c_int; pub fn WSACleanup() -> libc::c_int; pub fn WSAGetLastError() -> libc::c_int; pub fn WSACloseEvent(hEvent: WSAEVENT) -> libc::BOOL; pub fn WSACreateEvent() -> WSAEVENT; pub fn WSAEventSelect(s: libc::SOCKET, hEventObject: WSAEVENT, lNetworkEvents: libc::c_long) -> libc::c_int; pub fn WSASetEvent(hEvent: WSAEVENT) -> libc::BOOL; pub fn WSAWaitForMultipleEvents(cEvents: libc::DWORD, lphEvents: *const WSAEVENT, fWaitAll: libc::BOOL, dwTimeout: libc::DWORD, fAltertable: libc::BOOL) -> libc::DWORD; pub fn WSAEnumNetworkEvents(s: libc::SOCKET, hEventObject: WSAEVENT, lpNetworkEvents: LPWSANETWORKEVENTS) -> libc::c_int; pub fn WSADuplicateSocketW(s: libc::SOCKET, dwProcessId: libc::DWORD, lpProtocolInfo: LPWSAPROTOCOL_INFO) -> libc::c_int; pub fn GetCurrentProcessId() -> libc::DWORD; pub fn WSASocketW(af: libc::c_int, kind: libc::c_int, protocol: libc::c_int, lpProtocolInfo: LPWSAPROTOCOL_INFO, g: GROUP, dwFlags: libc::DWORD) -> libc::SOCKET; pub fn ioctlsocket(s: libc::SOCKET, cmd: libc::c_long, argp: *mut libc::c_ulong) -> libc::c_int; pub fn select(nfds: libc::c_int, readfds: *mut fd_set, writefds: *mut fd_set, exceptfds: *mut fd_set, timeout: *mut libc::timeval) -> libc::c_int; pub fn getsockopt(sockfd: libc::SOCKET, level: libc::c_int, optname: libc::c_int, optval: *mut libc::c_char, optlen: *mut libc::c_int) -> libc::c_int; pub fn SetEvent(hEvent: libc::HANDLE) -> libc::BOOL; pub fn WaitForMultipleObjects(nCount: libc::DWORD, lpHandles: *const libc::HANDLE, bWaitAll: libc::BOOL, dwMilliseconds: libc::DWORD) -> libc::DWORD; pub fn CancelIo(hFile: libc::HANDLE) -> libc::BOOL; pub fn CancelIoEx(hFile: libc::HANDLE, lpOverlapped: libc::LPOVERLAPPED) -> libc::BOOL; } pub mod compat { use prelude::v1::*; use ffi::CString; use libc::types::os::arch::extra::{LPCWSTR, HMODULE, LPCSTR, LPVOID}; use sync::atomic::{AtomicUsize, Ordering}; extern "system" { fn GetModuleHandleW(lpModuleName: LPCWSTR) -> HMODULE; fn GetProcAddress(hModule: HMODULE, lpProcName: LPCSTR) -> LPVOID; } fn store_func(ptr: &AtomicUsize, module: &str, symbol: &str, fallback: usize) -> usize { let mut module: Vec<u16> = module.utf16_units().collect(); module.push(0); let symbol = CString::from_slice(symbol.as_bytes()); let func = unsafe { let handle = GetModuleHandleW(module.as_ptr()); GetProcAddress(handle, symbol.as_ptr()) as usize }; let value = if func == 0 {fallback} else {func}; ptr.store(value, Ordering::SeqCst); value } /// Macro for creating a compatibility fallback for a Windows function /// /// # Example /// ``` /// compat_fn!(adll32::SomeFunctionW(_arg: LPCWSTR) { /// // Fallback implementation /// }) /// ``` /// /// Note that arguments unused by the fallback implementation should not be /// called `_` as they are used to be passed to the real function if /// available. macro_rules! compat_fn { ($module:ident::$symbol:ident($($argname:ident: $argtype:ty),*) -> $rettype:ty { $fallback:expr }) => ( #[inline(always)] pub unsafe fn $symbol($($argname: $argtype),*) -> $rettype { use sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; use mem; static PTR: AtomicUsize = ATOMIC_USIZE_INIT; fn load() -> usize { ::sys::c::compat::store_func(&PTR, stringify!($module), stringify!($symbol), fallback as usize) } extern "system" fn fallback($($argname: $argtype),*) -> $rettype { $fallback } let addr = match PTR.load(Ordering::SeqCst) { 0 => load(), n => n, }; let f: extern "system" fn($($argtype),*) -> $rettype = mem::transmute(addr); f($($argname),*) } ) } /// Compatibility layer for functions in `kernel32.dll` /// /// Latest versions of Windows this is needed for: /// /// * `CreateSymbolicLinkW`: Windows XP, Windows Server 2003 /// * `GetFinalPathNameByHandleW`: Windows XP, Windows Server 2003 pub mod kernel32 { use libc::c_uint; use libc::types::os::arch::extra::{DWORD, LPCWSTR, BOOLEAN, HANDLE}; use libc::consts::os::extra::ERROR_CALL_NOT_IMPLEMENTED; use sys::c::SetLastError; compat_fn! { kernel32::CreateSymbolicLinkW(_lpSymlinkFileName: LPCWSTR, _lpTargetFileName: LPCWSTR, _dwFlags: DWORD) -> BOOLEAN { unsafe { SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); 0 } } } compat_fn! { kernel32::GetFinalPathNameByHandleW(_hFile: HANDLE, _lpszFilePath: LPCWSTR, _cchFilePath: DWORD, _dwFlags: DWORD) -> DWORD { unsafe { SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); 0 } } } compat_fn! { kernel32::SetThreadErrorMode(_dwNewMode: DWORD, _lpOldMode: *mut DWORD) -> c_uint { unsafe { SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); 0 } } } } } extern "system" { // FIXME - pInputControl should be PCONSOLE_READCONSOLE_CONTROL pub fn ReadConsoleW(hConsoleInput: libc::HANDLE, lpBuffer: libc::LPVOID, nNumberOfCharsToRead: libc::DWORD, lpNumberOfCharsRead: libc::LPDWORD, pInputControl: libc::LPVOID) -> libc::BOOL; pub fn WriteConsoleW(hConsoleOutput: libc::HANDLE, lpBuffer: libc::types::os::arch::extra::LPCVOID, nNumberOfCharsToWrite: libc::DWORD, lpNumberOfCharsWritten: libc::LPDWORD, lpReserved: libc::LPVOID) -> libc::BOOL; pub fn GetConsoleMode(hConsoleHandle: libc::HANDLE, lpMode: libc::LPDWORD) -> libc::BOOL; pub fn SetConsoleMode(hConsoleHandle: libc::HANDLE, lpMode: libc::DWORD) -> libc::BOOL; pub fn GetConsoleScreenBufferInfo( hConsoleOutput: libc::HANDLE, lpConsoleScreenBufferInfo: PCONSOLE_SCREEN_BUFFER_INFO, ) -> libc::BOOL; pub fn GetFileAttributesExW(lpFileName: libc::LPCWSTR, fInfoLevelId: GET_FILEEX_INFO_LEVELS, lpFileInformation: libc::LPVOID) -> libc::BOOL; pub fn RemoveDirectoryW(lpPathName: libc::LPCWSTR) -> libc::BOOL; pub fn SetFileAttributesW(lpFileName: libc::LPCWSTR, dwFileAttributes: libc::DWORD) -> libc::BOOL; pub fn GetFileAttributesW(lpFileName: libc::LPCWSTR) -> libc::DWORD; pub fn GetFileInformationByHandle(hFile: libc::HANDLE, lpFileInformation: LPBY_HANDLE_FILE_INFORMATION) -> libc::BOOL; pub fn SetLastError(dwErrCode: libc::DWORD); pub fn GetCommandLineW() -> *mut libc::LPCWSTR; pub fn LocalFree(ptr: *mut libc::c_void); pub fn CommandLineToArgvW(lpCmdLine: *mut libc::LPCWSTR, pNumArgs: *mut libc::c_int) -> *mut *mut u16; pub fn SetFileTime(hFile: libc::HANDLE, lpCreationTime: *const libc::FILETIME, lpLastAccessTime: *const libc::FILETIME, lpLastWriteTime: *const libc::FILETIME) -> libc::BOOL; pub fn SetFileInformationByHandle(hFile: libc::HANDLE, FileInformationClass: FILE_INFO_BY_HANDLE_CLASS, lpFileInformation: libc::LPVOID, dwBufferSize: libc::DWORD) -> libc::BOOL; pub fn GetTempPathW(nBufferLength: libc::DWORD, lpBuffer: libc::LPCWSTR) -> libc::DWORD; pub fn OpenProcessToken(ProcessHandle: libc::HANDLE, DesiredAccess: libc::DWORD, TokenHandle: *mut libc::HANDLE) -> libc::BOOL; pub fn GetCurrentProcess() -> libc::HANDLE; } #[link(name = "userenv")] extern "system" { pub fn GetUserProfileDirectoryW(hToken: libc::HANDLE, lpProfileDir: libc::LPCWSTR, lpcchSize: *mut libc::DWORD) -> libc::BOOL; }
37.452055
95
0.587052
d54eaa91fa473c8c9a7f05df67d5f9e75a991c11
8,022
// Copyright 2019 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use super::interrupter::{Error as InterrupterError, Interrupter}; use super::xhci_backend_device::{BackendType, XhciBackendDevice}; use super::xhci_regs::{ XhciRegs, MAX_PORTS, PORTSC_CONNECT_STATUS_CHANGE, PORTSC_CURRENT_CONNECT_STATUS, PORTSC_PORT_ENABLED, PORTSC_PORT_ENABLED_DISABLED_CHANGE, USB2_PORTS_END, USB2_PORTS_START, USB3_PORTS_END, USB3_PORTS_START, USB_STS_PORT_CHANGE_DETECT, }; use crate::register_space::Register; use std::fmt::{self, Display}; use std::sync::{Arc, MutexGuard}; use sync::Mutex; #[derive(Debug)] pub enum Error { AllPortsAttached, AlreadyDetached(u8), Attach { port_id: u8, reason: InterrupterError, }, Detach { port_id: u8, reason: InterrupterError, }, NoSuchDevice { bus: u8, addr: u8, vid: u16, pid: u16, }, NoSuchPort(u8), } type Result<T> = std::result::Result<T, Error>; impl Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::Error::*; match self { AllPortsAttached => write!(f, "all suitable ports already attached"), AlreadyDetached(port_id) => write!(f, "device already detached from port {}", port_id), Attach { port_id, reason } => { write!(f, "failed to attach device to port {}: {}", port_id, reason) } Detach { port_id, reason } => write!( f, "failed to detach device from port {}: {}", port_id, reason ), NoSuchDevice { bus, addr, vid, pid, } => write!( f, "device {}:{}:{:04x}:{:04x} is not attached", bus, addr, vid, pid ), NoSuchPort(port_id) => write!(f, "port {} does not exist", port_id), } } } /// A port on usb hub. It could have a device connected to it. pub struct UsbPort { ty: BackendType, port_id: u8, portsc: Register<u32>, usbsts: Register<u32>, interrupter: Arc<Mutex<Interrupter>>, backend_device: Mutex<Option<Box<dyn XhciBackendDevice>>>, } impl UsbPort { /// Create a new usb port that has nothing connected to it. pub fn new( ty: BackendType, port_id: u8, portsc: Register<u32>, usbsts: Register<u32>, interrupter: Arc<Mutex<Interrupter>>, ) -> UsbPort { UsbPort { ty, port_id, portsc, usbsts, interrupter, backend_device: Mutex::new(None), } } fn port_id(&self) -> u8 { self.port_id } /// Detach current connected backend. Returns false when there is no backend connected. pub fn detach(&self) -> Result<()> { let mut locked = self.backend_device.lock(); if locked.is_none() { return Err(Error::AlreadyDetached(self.port_id)); } usb_debug!("device detached from port {}", self.port_id); *locked = None; self.send_device_disconnected_event() .map_err(|reason| Error::Detach { port_id: self.port_id, reason, }) } /// Get current connected backend. pub fn get_backend_device(&self) -> MutexGuard<Option<Box<dyn XhciBackendDevice>>> { self.backend_device.lock() } fn is_attached(&self) -> bool { self.backend_device.lock().is_some() } fn reset(&self) -> std::result::Result<(), InterrupterError> { if self.is_attached() { self.send_device_connected_event()?; } Ok(()) } fn attach( &self, device: Box<dyn XhciBackendDevice>, ) -> std::result::Result<(), InterrupterError> { usb_debug!("A backend is connected to port {}", self.port_id); let mut locked = self.backend_device.lock(); assert!(locked.is_none()); *locked = Some(device); self.send_device_connected_event() } /// Inform the guest kernel there is device connected to this port. It combines first few steps /// of USB device initialization process in xHCI spec 4.3. pub fn send_device_connected_event(&self) -> std::result::Result<(), InterrupterError> { // xHCI spec 4.3. self.portsc.set_bits( PORTSC_CURRENT_CONNECT_STATUS | PORTSC_PORT_ENABLED | PORTSC_CONNECT_STATUS_CHANGE | PORTSC_PORT_ENABLED_DISABLED_CHANGE, ); self.usbsts.set_bits(USB_STS_PORT_CHANGE_DETECT); self.interrupter .lock() .send_port_status_change_trb(self.port_id) } /// Inform the guest kernel that device has been detached. pub fn send_device_disconnected_event(&self) -> std::result::Result<(), InterrupterError> { // xHCI spec 4.3. self.portsc .set_bits(PORTSC_CONNECT_STATUS_CHANGE | PORTSC_PORT_ENABLED_DISABLED_CHANGE); self.portsc.clear_bits(PORTSC_CURRENT_CONNECT_STATUS); self.usbsts.set_bits(USB_STS_PORT_CHANGE_DETECT); self.interrupter .lock() .send_port_status_change_trb(self.port_id) } } /// UsbHub is a set of usb ports. pub struct UsbHub { ports: Vec<Arc<UsbPort>>, } impl UsbHub { /// Create usb hub with no device attached. pub fn new(regs: &XhciRegs, interrupter: Arc<Mutex<Interrupter>>) -> UsbHub { let mut ports = Vec::new(); // Each port should have a portsc register. assert_eq!(MAX_PORTS as usize, regs.portsc.len()); for i in USB2_PORTS_START..USB2_PORTS_END { ports.push(Arc::new(UsbPort::new( BackendType::Usb2, i + 1, regs.portsc[i as usize].clone(), regs.usbsts.clone(), interrupter.clone(), ))); } for i in USB3_PORTS_START..USB3_PORTS_END { ports.push(Arc::new(UsbPort::new( BackendType::Usb3, i + 1, regs.portsc[i as usize].clone(), regs.usbsts.clone(), interrupter.clone(), ))); } UsbHub { ports } } /// Reset all ports. pub fn reset(&self) -> Result<()> { usb_debug!("reseting usb hub"); for p in &self.ports { p.reset().map_err(|reason| Error::Detach { port_id: p.port_id(), reason, })?; } Ok(()) } /// Get a specific port of the hub. pub fn get_port(&self, port_id: u8) -> Option<Arc<UsbPort>> { if port_id == 0 || port_id > MAX_PORTS { return None; } let port_index = (port_id - 1) as usize; Some(self.ports.get(port_index)?.clone()) } /// Connect backend to next empty port. pub fn connect_backend(&self, backend: Box<dyn XhciBackendDevice>) -> Result<u8> { usb_debug!("Trying to connect backend to hub"); for port in &self.ports { if port.is_attached() { continue; } if port.ty != backend.get_backend_type() { continue; } let port_id = port.port_id(); port.attach(backend) .map_err(|reason| Error::Attach { port_id, reason })?; return Ok(port_id); } Err(Error::AllPortsAttached) } /// Disconnect device from port. Returns false if port id is not valid or could not be /// disonnected. pub fn disconnect_port(&self, port_id: u8) -> Result<()> { match self.get_port(port_id) { Some(port) => port.detach(), None => Err(Error::NoSuchPort(port_id)), } } }
31.335938
99
0.563077
4b291360b53421e5c3f1b96b3b85dfd908c5922c
1,390
use crate::core::{Cell, InternalVM, VMInstruction}; use crate::core::{RuntimeError, VirtualMachine}; use crate::gadgets; use algebra::Field; use r1cs_core::ConstraintSystem; use zinc_bytecode::instructions::Gt; impl<F, CS> VMInstruction<F, CS> for Gt where F: Field, CS: ConstraintSystem<F>, { fn execute(&self, vm: &mut VirtualMachine<F, CS>) -> Result<(), RuntimeError> { let right = vm.pop()?.value()?; let left = vm.pop()?.value()?; let cs = vm.constraint_system(); let gt = gadgets::gt(cs.ns(|| "gt"), &left, &right)?; vm.push(Cell::Value(gt)) } } #[cfg(test)] mod test { use super::*; use crate::instructions::testing_utils::{TestingError, VMTestRunner}; use zinc_bytecode::scalar::IntegerType; use zinc_bytecode::*; #[test] fn test_gt() -> Result<(), TestingError> { VMTestRunner::new() .add(PushConst::new(2.into(), IntegerType::I8.into())) .add(PushConst::new(1.into(), IntegerType::I8.into())) .add(Gt) .add(PushConst::new(2.into(), IntegerType::I8.into())) .add(PushConst::new(2.into(), IntegerType::I8.into())) .add(Gt) .add(PushConst::new(1.into(), IntegerType::I8.into())) .add(PushConst::new(2.into(), IntegerType::I8.into())) .add(Gt) .test(&[0, 0, 1]) } }
30.217391
83
0.57482
8fdf07af4650d6f6b2586831d66d073d5dcbf674
6,126
/* Copyright 2021 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use crate::component_visibles; use crate::error::compile_error; use crate::graph::{ComponentSections, Graph}; use crate::manifest::{MultibindingMapKey, TypeRoot}; use crate::nodes::node::{DependencyData, Node}; use crate::type_data::TypeData; use proc_macro2::{Ident, TokenStream}; use quote::quote; use std::any::{Any, TypeId}; use std::collections::HashMap; use std::iter::Extend; #[derive(Debug, Clone)] pub struct MapNode { pub type_: TypeData, pub bindings: HashMap<MultibindingMapKey, TypeData>, } impl MapNode { pub fn new( map_key: &MultibindingMapKey, value_type: &TypeData, ) -> Result<Box<MapNode>, TokenStream> { Ok(Box::new(MapNode { type_: map_type(&key_type(&map_key)?, value_type)?, bindings: HashMap::new(), })) } pub fn with_key_type( map_key: &TypeData, value_type: &TypeData, ) -> Result<Box<MapNode>, TokenStream> { Ok(Box::new(MapNode { type_: map_type(&map_key, value_type)?, bindings: HashMap::new(), })) } pub fn add_binding( &mut self, map_key: &MultibindingMapKey, value_type: &TypeData, ) -> &mut Self { self.bindings.insert(map_key.clone(), value_type.clone()); self } } fn key_type(map_key: &MultibindingMapKey) -> Result<TypeData, TokenStream> { Ok(match map_key { MultibindingMapKey::String(_) => string_type(), MultibindingMapKey::I32(_) => i32_type(), MultibindingMapKey::Enum(ref enum_type, _) => enum_type.clone(), _ => return compile_error("unable to handle key"), }) } fn map_type(key_type: &TypeData, value_type: &TypeData) -> Result<TypeData, TokenStream> { let mut map_type = TypeData::new(); map_type.root = TypeRoot::GLOBAL; map_type.path = "std::collections::HashMap".to_string(); map_type.args.push(key_type.clone()); map_type.args.push(value_type.clone()); map_type.qualifier = value_type.qualifier.clone(); Ok(map_type) } fn string_type() -> TypeData { let mut string_type = TypeData::new(); string_type.root = TypeRoot::GLOBAL; string_type.path = "std::string::String".to_string(); string_type } fn i32_type() -> TypeData { let mut string_type = TypeData::new(); string_type.root = TypeRoot::PRIMITIVE; string_type.path = "i32".to_string(); string_type } impl Node for MapNode { fn get_name(&self) -> String { return format!("{} (multibinding)", self.type_.readable()); } fn generate_implementation(&self, graph: &Graph) -> Result<ComponentSections, TokenStream> { let name_ident = self.get_identifier(); let provides_type = component_visibles::visible_type(graph.manifest, &self.type_).syn_type(); let mut into_maps = quote! {}; for binding in &self.bindings { let key = match binding.0 { MultibindingMapKey::String(ref key) => { quote! { #key.to_owned() } } MultibindingMapKey::I32(key) => { quote! { #key } } MultibindingMapKey::Enum(_, value_type) => { let key = component_visibles::visible_type(graph.manifest, &value_type).syn_type(); quote! { #key } } _ => return compile_error(&format!("unable to handle key {:?}", binding.0)), }; let ident = binding.1.identifier(); into_maps = quote! { #into_maps result.insert(#key, self.#ident()); } } let mut result = ComponentSections::new(); result.add_methods(quote! { #[allow(unused_mut)] #[allow(dead_code)] fn #name_ident(&'_ self) -> #provides_type{ let mut result = HashMap::new(); #into_maps result } }); Ok(result) } fn merge(&self, new_node: &dyn Node) -> Result<Box<dyn Node>, TokenStream> { if new_node.type_id() != TypeId::of::<MapNode>() { return <dyn Node>::duplicated(self, new_node); } let map_node = new_node.as_any().downcast_ref::<MapNode>().unwrap(); for key in map_node.bindings.keys() { if self.bindings.contains_key(key) { return compile_error(&format!( "found duplicated key {:?} for {}, provided by:\n\t{}", key, self.type_.readable(), new_node.get_name() )); } } let mut new_map = self.bindings.clone(); new_map.extend( map_node .bindings .iter() .map(|(k, v)| (k.clone(), v.clone())), ); Ok(Box::new(MapNode { type_: self.type_.clone(), bindings: new_map, })) } fn get_type(&self) -> &TypeData { &self.type_ } fn get_identifier(&self) -> Ident { self.type_.identifier() } fn get_dependencies(&self) -> Vec<DependencyData> { self.bindings .iter() .map(|binding| DependencyData::from_type(binding.1)) .collect() } fn clone_box(&self) -> Box<dyn Node> { Box::new(self.clone()) } fn as_any(&self) -> &dyn Any { self } fn as_mut_any(&mut self) -> &mut dyn Any { self } }
30.78392
97
0.571172
611c57d22e515cb7af28c42e102c4d7e623b32c4
1,005
use crate::sys::osdir::OsDir; use crate::wasi::Result; use std::os::unix::prelude::AsRawFd; pub(crate) fn unlink_file(dirfd: &OsDir, path: &str) -> Result<()> { use yanix::file::{unlinkat, AtFlag}; unsafe { unlinkat(dirfd.as_raw_fd(), path, AtFlag::empty())? }; Ok(()) } pub(crate) fn symlink(old_path: &str, new_dirfd: &OsDir, new_path: &str) -> Result<()> { use yanix::file::symlinkat; log::debug!("path_symlink old_path = {:?}", old_path); log::debug!( "path_symlink (new_dirfd, new_path) = ({:?}, {:?})", new_dirfd, new_path ); unsafe { symlinkat(old_path, new_dirfd.as_raw_fd(), new_path)? }; Ok(()) } pub(crate) fn rename( old_dirfd: &OsDir, old_path: &str, new_dirfd: &OsDir, new_path: &str, ) -> Result<()> { use yanix::file::renameat; unsafe { renameat( old_dirfd.as_raw_fd(), old_path, new_dirfd.as_raw_fd(), new_path, )? }; Ok(()) }
23.928571
88
0.558209
0a5eb26b602438d9c97238d7e057b2f7e7ed4504
3,026
//! Orientation tracker use crate::{ orientation::Orientation, vector::{Component, Vector, VectorExt}, }; use micromath::generic_array::typenum::U3; // Used for intra-doc-link resolution only #[allow(unused_imports)] use crate::accelerometer::Accelerometer; // Spuriously triggers unused import warnings in cases std is linked #[allow(unused_imports)] use micromath::F32Ext; /// Orientation tracker: computes a device's [`Orientation`] from accelerometer /// readings. pub struct Tracker { /// Threshold at which acceleration due to gravity is registered threshold: f32, /// Last orientation type read from the accelerometer last_orientation: Orientation, } impl Tracker { /// Create a new orientation tracker. /// /// The `threshold` value should be slightly less than the absolute value /// of the reading you get from the accelerometer when the device is lying /// in a position where two of the axes are reading 0 (i.e. getting a /// strong reading from one axis alone). It may require some /// experimentation to properly tune this threshold. /// /// For best results, set the accelerometer's sensitivity higher than ±2G, /// e.g. ±4G or ±8G. This will help reduce noise in the accelerometer data. pub fn new(threshold: impl Into<f32>) -> Self { Self { threshold: threshold.into(), last_orientation: Orientation::Unknown, } } /// Update the tracker's internal state from the given acceleration vector /// (i.e. obtained from [`Accelerometer::acceleration`]), returning a new /// computed orientation value. pub fn update<V, C>(&mut self, acceleration: V) -> Orientation where V: Vector<Axes = U3, Component = C> + VectorExt, C: Component + Into<f32>, { let components = acceleration.to_array(); let x: f32 = components[0].into(); let y: f32 = components[1].into(); let z: f32 = components[2].into(); let result = if x.abs() > self.threshold { // Landscape if x >= 0.0 { Orientation::LandscapeUp } else { Orientation::LandscapeDown } } else if y.abs() > self.threshold { // Portrait if y >= 0.0 { Orientation::PortraitUp } else { Orientation::PortraitDown } } else if z.abs() > self.threshold { // Flat if z >= 0.0 { Orientation::FaceUp } else { Orientation::FaceDown } } else { Orientation::Unknown }; if result != Orientation::Unknown { self.last_orientation = result; } result } /// Get the last known orientation reading for the device. /// /// Use [`Tracker::update`] to obtain a new reading. pub fn orientation(&self) -> Orientation { self.last_orientation } }
31.195876
79
0.593192
39d8c750816c54c04ae59d4118d68fbf2f805fc2
7,481
use super::Tag; use crate::{config::Config, layouts::Layout, Workspace}; use serde::{Deserialize, Serialize}; use std::collections::HashMap; #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)] pub enum LayoutMode { Tag, Workspace, } impl Default for LayoutMode { fn default() -> Self { Self::Workspace } } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct LayoutManager { pub mode: LayoutMode, pub layouts: Vec<Layout>, pub layouts_per_workspaces: HashMap<i32, Vec<Layout>>, } impl LayoutManager { pub fn new(config: &impl Config) -> Self { let layouts_per_workspaces = config .workspaces() .unwrap_or_default() .iter() .filter_map(|ws| ws.id.map(|id| (id, ws.layouts.clone()))) .collect(); Self { mode: config.layout_mode(), layouts: config.layouts(), layouts_per_workspaces, } } pub fn new_layout(&self, workspace_id: Option<i32>) -> Layout { *self .layouts(workspace_id) .first() .unwrap_or(&Layout::default()) } pub fn next_layout(&self, workspace: &Workspace) -> Layout { let layouts = self.layouts(workspace.id); let next = match layouts.iter().position(|&x| x == workspace.layout) { Some(index) if index == layouts.len() - 1 => layouts.first(), Some(index) => layouts.get(index + 1), None => None, }; // If no layout was found, return the first in the list, in case of a // SoftReload with a new list that does not include the current layout. *next.unwrap_or_else(|| layouts.first().unwrap_or(&workspace.layout)) } pub fn previous_layout(&self, workspace: &Workspace) -> Layout { let layouts = self.layouts(workspace.id); let next = match layouts.iter().position(|&x| x == workspace.layout) { Some(index) if index == 0 => layouts.last(), Some(index) => layouts.get(index - 1), None => None, }; // If no layout was found, return the first in the list, in case of a // SoftReload with a new list that does not include the current layout. *next.unwrap_or_else(|| layouts.first().unwrap_or(&workspace.layout)) } pub fn update_layouts( &self, workspaces: &mut Vec<Workspace>, mut tags: Vec<&mut Tag>, ) -> Option<bool> { for workspace in workspaces { let tag = tags.iter_mut().find(|t| t.id == workspace.tags[0])?; match self.mode { LayoutMode::Workspace => { tag.set_layout(workspace.layout, workspace.main_width_percentage); } LayoutMode::Tag => { workspace.layout = tag.layout; workspace.main_width_percentage = tag.main_width_percentage; } } } Some(true) } fn layouts(&self, workspace_id: Option<i32>) -> &Vec<Layout> { workspace_id .and_then(|id| self.layouts_per_workspaces.get(&id)) .and_then(|layouts| { if layouts.is_empty() { None } else { Some(layouts) } }) .unwrap_or(&self.layouts) } } #[cfg(test)] mod tests { use crate::config::TestConfig; use crate::models::BBox; use super::*; fn layout_manager() -> LayoutManager { let config = TestConfig { layouts: vec![ Layout::Monocle, Layout::EvenVertical, Layout::MainAndHorizontalStack, ], workspaces: Some(vec![ crate::config::Workspace { id: Some(0), layouts: vec![ Layout::CenterMain, Layout::CenterMainBalanced, Layout::MainAndDeck, ], ..Default::default() }, crate::config::Workspace { id: Some(1), ..Default::default() }, crate::config::Workspace { id: Some(2), layouts: vec![], ..Default::default() }, ]), ..Default::default() }; LayoutManager::new(&config) } fn workspace(id: i32, layout: Layout) -> Workspace { Workspace::new( Some(id), BBox { width: 0, height: 0, x: 0, y: 0, }, layout, None, ) } #[test] fn layouts_should_fallback_to_the_global_list() { let layout_manager = layout_manager(); assert_eq!(layout_manager.layouts(Some(1)), &layout_manager.layouts); // layouts = None assert_eq!(layout_manager.layouts(Some(2)), &layout_manager.layouts); // layouts = vec[]! assert_eq!(layout_manager.layouts(Some(3)), &layout_manager.layouts); // Non existent id assert_eq!(layout_manager.layouts(None), &layout_manager.layouts); } #[test] fn next_layout_basic() { let layout_manager = layout_manager(); let workspace = workspace(0, Layout::CenterMainBalanced); assert_eq!(layout_manager.next_layout(&workspace), Layout::MainAndDeck); } #[test] fn next_layout_should_cycle() { let layout_manager = layout_manager(); let workspace = workspace(0, Layout::MainAndDeck); assert_eq!(layout_manager.next_layout(&workspace), Layout::CenterMain); } #[test] fn next_layout_fallback_to_global_layouts() { let layout_manager = layout_manager(); let workspace = workspace(1, Layout::EvenVertical); assert_eq!( layout_manager.next_layout(&workspace), Layout::MainAndHorizontalStack ); } #[test] fn next_layout_fallback_to_the_first_layout() { let layout_manager = layout_manager(); let workspace = workspace(0, Layout::Fibonacci); assert_eq!(layout_manager.next_layout(&workspace), Layout::CenterMain); } #[test] fn prev_layout_basic() { let layout_manager = layout_manager(); let workspace = workspace(0, Layout::CenterMainBalanced); assert_eq!( layout_manager.previous_layout(&workspace), Layout::CenterMain ); } #[test] fn prev_layout_should_cycle() { let layout_manager = layout_manager(); let workspace = workspace(0, Layout::CenterMain); assert_eq!( layout_manager.previous_layout(&workspace), Layout::MainAndDeck ); } #[test] fn previous_layout_fallback_to_global_layouts() { let layout_manager = layout_manager(); let workspace = workspace(2, Layout::EvenVertical); assert_eq!(layout_manager.previous_layout(&workspace), Layout::Monocle); } #[test] fn previous_layout_fallback_to_the_first_layout() { let layout_manager = layout_manager(); let workspace = workspace(0, Layout::Fibonacci); assert_eq!( layout_manager.previous_layout(&workspace), Layout::CenterMain ); } }
29.924
97
0.548723
90c5454c3c633c0f12ae30f1bd03c17ece617ea0
512
//! `Serializer` trait implementations. //! //! Note that this module has been exported only for the documentation purpose. //! It is not intended that this module is used by users explicitly. pub use self::http_header::HttpHeaderSerializer; pub use self::rpc_request::RpcRequestSerializer; pub use self::rpc_response::RpcResponseSerializer; pub use self::url_path::UrlPathSerializer; pub use self::url_query::UrlQuerySerializer; mod http_header; mod rpc_request; mod rpc_response; mod url_path; mod url_query;
32
79
0.796875
0881684def4a097dd54a5eaf42df255fae81c3cc
7,270
#![doc(hidden)] use super::{DefaultFilter, Fetch, IntoIndexableIter, IntoView, View}; use crate::internals::{ iter::indexed::{IndexedIter, TrustedRandomAccess}, permissions::Permissions, query::{ filter::{passthrough::Passthrough, try_component::TryComponentFilter, EntityFilterTuple}, QueryResult, }, storage::{ archetype::{Archetype, ArchetypeIndex}, component::{Component, ComponentTypeId}, next_component_version, ComponentSliceMut, ComponentStorage, Components, }, subworld::ComponentAccess, }; use derivative::Derivative; use std::{any::TypeId, marker::PhantomData}; /// Writes a single entity data component type from a chunk. #[derive(Derivative, Debug, Copy, Clone)] #[derivative(Default(bound = ""))] pub struct TryWrite<T>(PhantomData<*const T>); unsafe impl<T: Send> Send for TryWrite<T> {} unsafe impl<T> Sync for TryWrite<T> {} impl<T: Component> DefaultFilter for TryWrite<T> { type Filter = EntityFilterTuple<TryComponentFilter<T>, Passthrough>; } impl<T: Component> IntoView for TryWrite<T> { type View = Self; } impl<'data, T: Component> View<'data> for TryWrite<T> { type Element = <Self::Fetch as IntoIndexableIter>::Item; type Fetch = Slice<'data, T>; type Iter = TryWriteIter<'data, T>; type Read = [ComponentTypeId; 1]; type Write = [ComponentTypeId; 1]; #[inline] fn validate() {} #[inline] fn validate_access(access: &ComponentAccess) -> bool { access.allows_write(ComponentTypeId::of::<T>()) } #[inline] fn reads_types() -> Self::Read { [ComponentTypeId::of::<T>()] } #[inline] fn writes_types() -> Self::Write { [ComponentTypeId::of::<T>()] } #[inline] fn reads<D: Component>() -> bool { TypeId::of::<T>() == TypeId::of::<D>() } #[inline] fn writes<D: Component>() -> bool { TypeId::of::<T>() == TypeId::of::<D>() } #[inline] fn requires_permissions() -> Permissions<ComponentTypeId> { let mut permissions = Permissions::default(); permissions.push(ComponentTypeId::of::<T>()); permissions } unsafe fn fetch( components: &'data Components, archetypes: &'data [Archetype], query: QueryResult<'data>, ) -> Self::Iter { let components = components.get_downcast::<T>(); let archetype_indexes = query.index().iter(); TryWriteIter { components, archetypes, archetype_indexes, } } } #[doc(hidden)] pub struct TryWriteIter<'a, T: Component> { components: Option<&'a T::Storage>, archetype_indexes: std::slice::Iter<'a, ArchetypeIndex>, archetypes: &'a [Archetype], } impl<'a, T: Component> Iterator for TryWriteIter<'a, T> { type Item = Option<Slice<'a, T>>; #[inline] fn next(&mut self) -> Option<Self::Item> { self.archetype_indexes.next().map(|i| unsafe { let slice = self .components .and_then(|components| components.get_mut(*i)) .map_or_else( || Slice::Empty(self.archetypes[*i].entities().len()), |c| c.into(), ); Some(slice) }) } } #[doc(hidden)] pub enum Slice<'a, T: Component> { Occupied { version: &'a mut u64, components: &'a mut [T], next_version: u64, }, Empty(usize), } impl<'a, T: Component> From<ComponentSliceMut<'a, T>> for Slice<'a, T> { fn from(slice: ComponentSliceMut<'a, T>) -> Self { Slice::Occupied { components: slice.components, version: slice.version, next_version: next_component_version(), } } } impl<'a, T: Component> IntoIndexableIter for Slice<'a, T> { type Item = Option<&'a mut T>; type IntoIter = IndexedIter<Data<'a, T>>; fn into_indexable_iter(self) -> Self::IntoIter { let data = match self { Self::Occupied { components, .. } => Data::Occupied(components), Self::Empty(count) => Data::Empty(count), }; IndexedIter::new(data) } } impl<'a, T: Component> IntoIterator for Slice<'a, T> { type Item = <Self as IntoIndexableIter>::Item; type IntoIter = <Self as IntoIndexableIter>::IntoIter; fn into_iter(self) -> Self::IntoIter { self.into_indexable_iter() } } impl<'a, T: Component> Fetch for Slice<'a, T> { type Data = Option<&'a mut [T]>; #[inline] fn into_components(self) -> Self::Data { match self { Self::Occupied { components, .. } => Some(components), Self::Empty(_) => None, } } #[inline] fn find<C: 'static>(&self) -> Option<&[C]> { if TypeId::of::<C>() == TypeId::of::<T>() { // safety: C and T are the same type match self { Self::Occupied { components, .. } => Some(unsafe { std::slice::from_raw_parts(components.as_ptr() as *const C, components.len()) }), Self::Empty(_) => None, } } else { None } } #[inline] fn find_mut<C: 'static>(&mut self) -> Option<&mut [C]> { if TypeId::of::<C>() == TypeId::of::<T>() { // safety: C and T are the same type match self { Self::Occupied { components, .. } => Some(unsafe { std::slice::from_raw_parts_mut( components.as_mut_ptr() as *mut C, components.len(), ) }), Self::Empty(_) => None, } } else { None } } #[inline] fn version<C: Component>(&self) -> Option<u64> { if TypeId::of::<C>() == TypeId::of::<T>() { match self { Self::Occupied { version, .. } => Some(**version), Self::Empty(_) => None, } } else { None } } #[inline] fn accepted(&mut self) { if let Self::Occupied { version, next_version, .. } = self { **version = *next_version } } } #[doc(hidden)] pub enum Data<'a, T: Component> { Occupied(&'a mut [T]), Empty(usize), } unsafe impl<'a, T: Component> TrustedRandomAccess for Data<'a, T> { type Item = Option<&'a mut T>; #[inline] fn len(&self) -> usize { match self { Self::Occupied(slice) => slice.len(), Self::Empty(len) => *len, } } #[inline] unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item { match self { Self::Occupied(slice) => Some(slice.get_unchecked(i)), Self::Empty(_) => None, } } #[inline] fn split_at(self, index: usize) -> (Self, Self) { match self { Self::Occupied(slice) => { let (left, right) = slice.split_at_mut(index); (Self::Occupied(left), Self::Occupied(right)) } Self::Empty(count) => (Self::Empty(index), Self::Empty(count - index)), } } }
27.330827
97
0.530812
1e315fc0caa8470570d37a0e66e9a9634925068a
6,401
// Copyright 2018 The Exonum Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use test::Bencher; use std::{net::SocketAddr, thread}; use events::{ network::NetworkConfiguration, tests::{raw_message, ConnectionParams, TestEvents}, }; use node::{state::SharedConnectList, ConnectList, EventsPoolCapacity}; struct BenchConfig { times: usize, len: usize, tcp_nodelay: bool, } fn test_events( cfg: &BenchConfig, listen_address: SocketAddr, connect_list: SharedConnectList, ) -> TestEvents { let network_config = NetworkConfiguration { tcp_nodelay: cfg.tcp_nodelay, ..Default::default() }; TestEvents { listen_address, network_config, events_config: EventsPoolCapacity::default(), connect_list, } } fn bench_network(b: &mut Bencher, addrs: [SocketAddr; 2], cfg: &BenchConfig) { b.iter(|| { let times = cfg.times; let len = cfg.len; let first = addrs[0]; let second = addrs[1]; let mut connect_list = ConnectList::default(); let mut params1 = ConnectionParams::from_address(first); connect_list.add(params1.connect_info.clone()); let first_key = params1.connect_info.public_key; let mut params2 = ConnectionParams::from_address(second); connect_list.add(params2.connect_info.clone()); let second_key = params2.connect_info.public_key; let connect_list = SharedConnectList::from_connect_list(connect_list); let e1 = test_events(cfg, first, connect_list.clone()); let e2 = test_events(cfg, second, connect_list.clone()); let mut t1 = params1.spawn(e1, connect_list.clone()); let mut t2 = params2.spawn(e2, connect_list); t1.connect_with(second_key, params1.connect.clone()); t2.connect_with(first_key, params2.connect.clone()); assert_eq!(t1.wait_for_connect(), params2.connect.clone()); assert_eq!(t2.wait_for_connect(), params1.connect.clone()); let t1 = thread::spawn(move || { for _ in 0..times { let msg = raw_message(len); t1.send_to(second_key, msg); t1.wait_for_message(); } t1 }); let t2 = thread::spawn(move || { for _ in 0..times { let msg = raw_message(len); t2.send_to(first_key, msg); t2.wait_for_message(); } t2 }); let mut t1 = t1.join().unwrap(); let mut t2 = t2.join().unwrap(); t1.disconnect_with(second_key); t2.disconnect_with(first_key); assert_eq!(t1.wait_for_disconnect(), second_key); assert_eq!(t2.wait_for_disconnect(), first_key); drop(t1); drop(t2); }) } #[bench] fn bench_msg_short_100(b: &mut Bencher) { let cfg = BenchConfig { tcp_nodelay: false, len: 100, times: 100, }; let addrs = [ "127.0.0.1:6990".parse().unwrap(), "127.0.0.1:6991".parse().unwrap(), ]; bench_network(b, addrs, &cfg); } #[bench] fn bench_msg_short_1000(b: &mut Bencher) { let cfg = BenchConfig { tcp_nodelay: false, len: 1000, times: 1000, }; let addrs = [ "127.0.0.1:9792".parse().unwrap(), "127.0.0.1:9793".parse().unwrap(), ]; bench_network(b, addrs, &cfg); } #[bench] fn bench_msg_short_10_000(b: &mut Bencher) { let cfg = BenchConfig { tcp_nodelay: false, len: 1000, times: 10_000, }; let addrs = [ "127.0.0.1:9792".parse().unwrap(), "127.0.0.1:9793".parse().unwrap(), ]; bench_network(b, addrs, &cfg); } #[bench] fn bench_msg_short_100_nodelay(b: &mut Bencher) { let cfg = BenchConfig { tcp_nodelay: true, len: 100, times: 100, }; let addrs = [ "127.0.0.1:4990".parse().unwrap(), "127.0.0.1:4991".parse().unwrap(), ]; bench_network(b, addrs, &cfg); } #[bench] fn bench_msg_short_1000_nodelay(b: &mut Bencher) { let cfg = BenchConfig { tcp_nodelay: true, len: 100, times: 1000, }; let addrs = [ "127.0.0.1:5990".parse().unwrap(), "127.0.0.1:5991".parse().unwrap(), ]; bench_network(b, addrs, &cfg); } #[bench] fn bench_msg_short_10_000_nodelay(b: &mut Bencher) { let cfg = BenchConfig { tcp_nodelay: true, len: 100, times: 10_000, }; let addrs = [ "127.0.0.1:5990".parse().unwrap(), "127.0.0.1:5991".parse().unwrap(), ]; bench_network(b, addrs, &cfg); } #[bench] fn bench_msg_long_10(b: &mut Bencher) { let cfg = BenchConfig { tcp_nodelay: false, len: 100_000, times: 10, }; let addrs = [ "127.0.0.1:9984".parse().unwrap(), "127.0.0.1:9985".parse().unwrap(), ]; bench_network(b, addrs, &cfg); } #[bench] fn bench_msg_long_100(b: &mut Bencher) { let cfg = BenchConfig { tcp_nodelay: false, len: 100_000, times: 100, }; let addrs = [ "127.0.0.1:9946".parse().unwrap(), "127.0.0.1:9947".parse().unwrap(), ]; bench_network(b, addrs, &cfg); } #[bench] fn bench_msg_long_10_nodelay(b: &mut Bencher) { let cfg = BenchConfig { tcp_nodelay: true, len: 100_000, times: 10, }; let addrs = [ "127.0.0.1:9198".parse().unwrap(), "127.0.0.1:9199".parse().unwrap(), ]; bench_network(b, addrs, &cfg); } #[bench] fn bench_msg_long_100_nodelay(b: &mut Bencher) { let cfg = BenchConfig { tcp_nodelay: true, len: 100_000, times: 100, }; let addrs = [ "127.0.0.1:9198".parse().unwrap(), "127.0.0.1:9199".parse().unwrap(), ]; bench_network(b, addrs, &cfg); }
25.810484
86
0.580378
03f4e74eb75e2c0b405619ca93482359a90c6cf9
1,131
/* RUST_BACKTRACE=1 RUST_LOG=debug cargo run -p oauth2_client_device_flow_example --bin device_flow_amazon -- 'YOUR_CLIENT_ID' */ use std::{env, error}; use http_api_isahc_client::IsahcClient; use oauth2_amazon::{AmazonProviderWithDevices, AmazonScope}; use oauth2_client::device_authorization_grant::Flow; #[tokio::main] async fn main() -> Result<(), Box<dyn error::Error>> { pretty_env_logger::init(); let client_id = env::args().nth(1).unwrap(); run(client_id).await } async fn run(client_id: String) -> Result<(), Box<dyn error::Error>> { let scopes = vec![AmazonScope::Profile, AmazonScope::PostalCode]; let flow = Flow::new(IsahcClient::new()?, IsahcClient::new()?); let provider = AmazonProviderWithDevices::new(client_id)?; let access_token_body = flow .execute( &provider, scopes, |user_code, verification_uri, _verification_uri_complete| { println!("open [{}] then input [{}]", verification_uri, user_code); }, ) .await?; println!("access_token_body: {:?}", access_token_body); Ok(()) }
28.275
123
0.65252
380a776723288c449e9bbb99d71d5e724bb6e77f
33,149
//! Parsers for applying parsers multiple times /// `separated_list0!(I -> IResult<I,T>, I -> IResult<I,O>) => I -> IResult<I, Vec<O>>` /// `separated_list0(sep, X)` returns a `Vec<X>`. /// /// ```rust /// # #[macro_use] extern crate nom; /// # use nom::{Err, error::ErrorKind, Needed, IResult}; /// use nom::multi::separated_list0; /// use nom::bytes::complete::tag; /// /// # fn main() { /// named!(parser<&str, Vec<&str>>, separated_list0!(tag("|"), tag("abc"))); /// /// assert_eq!(parser("abc|abc|abc"), Ok(("", vec!["abc", "abc", "abc"]))); /// assert_eq!(parser("abc123abc"), Ok(("123abc", vec!["abc"]))); /// assert_eq!(parser("abc|def"), Ok(("|def", vec!["abc"]))); /// assert_eq!(parser(""), Ok(("", vec![]))); /// assert_eq!(parser("def|abc"), Ok(("def|abc", vec![]))); /// # } /// ``` #[cfg(feature = "alloc")] #[macro_export(local_inner_macros)] macro_rules! separated_list0( ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( separated_list0!($i, |i| $submac!(i, $($args)*), |i| $submac2!(i, $($args2)*)) ); ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( separated_list0!($i, |i| $submac!(i, $($args)*), $g); ); ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( separated_list0!($i, $f, |i| $submac!(i, $($args)*)); ); ($i:expr, $f:expr, $g:expr) => ( $crate::multi::separated_list0c($i, $f, $g) ); ); /// `separated_list1!(I -> IResult<I,T>, I -> IResult<I,O>) => I -> IResult<I, Vec<O>>` /// `separated_list1(sep, X)` returns a `Vec<X>`. /// /// It will return an error if there is no element in the list. /// ```rust /// # #[macro_use] extern crate nom; /// # use nom::{Err, error::ErrorKind, Needed, IResult}; /// use nom::multi::separated_list1; /// use nom::bytes::complete::tag; /// /// # fn main() { /// named!(parser<&str, Vec<&str>>, separated_list1!(tag("|"), tag("abc"))); /// /// assert_eq!(parser("abc|abc|abc"), Ok(("", vec!["abc", "abc", "abc"]))); /// assert_eq!(parser("abc123abc"), Ok(("123abc", vec!["abc"]))); /// assert_eq!(parser("abc|def"), Ok(("|def", vec!["abc"]))); /// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Tag)))); /// assert_eq!(parser("def|abc"), Err(Err::Error(("def|abc", ErrorKind::Tag)))); /// # } /// ``` #[cfg(feature = "alloc")] #[macro_export(local_inner_macros)] macro_rules! separated_list1( ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( separated_list1!($i, |i| $submac!(i, $($args)*), |i| $submac2!(i, $($args2)*)) ); ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( separated_list1!($i, |i| $submac!(i, $($args)*), $g); ); ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( separated_list1!($i, $f, |i| $submac!(i, $($args)*)); ); ($i:expr, $f:expr, $g:expr) => ( $crate::multi::separated_list1c($i, $f, $g) ); ); /// `many0!(I -> IResult<I,O>) => I -> IResult<I, Vec<O>>` /// Applies the parser 0 or more times and returns the list of results in a `Vec`. /// /// The embedded parser may return `Incomplete`. /// /// `many0` will only return `Error` if the embedded parser does not consume any input /// (to avoid infinite loops). /// /// ``` /// # #[macro_use] extern crate nom; /// # fn main() { /// named!(multi<&[u8], Vec<&[u8]> >, many0!( tag!( "abcd" ) ) ); /// /// let a = b"abcdabcdefgh"; /// let b = b"azerty"; /// /// let res = vec![&b"abcd"[..], &b"abcd"[..]]; /// assert_eq!(multi(&a[..]),Ok((&b"efgh"[..], res))); /// assert_eq!(multi(&b[..]),Ok((&b"azerty"[..], Vec::new()))); /// # } /// ``` /// #[cfg(feature = "alloc")] #[macro_export(local_inner_macros)] macro_rules! many0( ($i:expr, $submac:ident!( $($args:tt)* )) => ( many0!($i, |i| $submac!(i, $($args)*)) ); ($i:expr, $f:expr) => ( $crate::multi::many0c($i, $f) ); ); /// `many1!(I -> IResult<I,O>) => I -> IResult<I, Vec<O>>` /// Applies the parser 1 or more times and returns the list of results in a `Vec`. /// /// The embedded parser may return `Incomplete`. /// /// ``` /// # #[macro_use] extern crate nom; /// # use nom::Err; /// # use nom::error::ErrorKind; /// # fn main() { /// named!(multi<&[u8], Vec<&[u8]> >, many1!( tag!( "abcd" ) ) ); /// /// let a = b"abcdabcdefgh"; /// let b = b"azerty"; /// /// let res = vec![&b"abcd"[..], &b"abcd"[..]]; /// assert_eq!(multi(&a[..]), Ok((&b"efgh"[..], res))); /// assert_eq!(multi(&b[..]), Err(Err::Error(error_position!(&b[..], ErrorKind::Tag)))); /// # } /// ``` #[cfg(feature = "alloc")] #[macro_export(local_inner_macros)] macro_rules! many1( ($i:expr, $submac:ident!( $($args:tt)* )) => ( many1!($i, |i| $submac!(i, $($args)*)) ); ($i:expr, $f:expr) => ( $crate::multi::many1c($i, $f) ); ); /// `many_till!(I -> IResult<I,O>, I -> IResult<I,P>) => I -> IResult<I, (Vec<O>, P)>` /// Applies the first parser until the second applies. Returns a tuple containing the list /// of results from the first in a Vec and the result of the second. /// /// The first embedded parser may return `Incomplete`. /// /// ``` /// # #[macro_use] extern crate nom; /// # use nom::Err; /// # use nom::error::ErrorKind; /// # fn main() { /// named!(multi<&[u8], (Vec<&[u8]>, &[u8]) >, many_till!( tag!( "abcd" ), tag!( "efgh" ) ) ); /// /// let a = b"abcdabcdefghabcd"; /// let b = b"efghabcd"; /// let c = b"azerty"; /// /// let res_a = (vec![&b"abcd"[..], &b"abcd"[..]], &b"efgh"[..]); /// let res_b: (Vec<&[u8]>, &[u8]) = (Vec::new(), &b"efgh"[..]); /// assert_eq!(multi(&a[..]),Ok((&b"abcd"[..], res_a))); /// assert_eq!(multi(&b[..]),Ok((&b"abcd"[..], res_b))); /// assert_eq!(multi(&c[..]), Err(Err::Error(error_node_position!(&c[..], ErrorKind::ManyTill, /// error_position!(&c[..], ErrorKind::Tag))))); /// # } /// ``` #[cfg(feature = "alloc")] #[macro_export(local_inner_macros)] macro_rules! many_till( ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( many_till!($i, |i| $submac!(i, $($args)*), |i| $submac2!(i, $($args2)*)) ); ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( many_till!($i, |i| $submac!(i, $($args)*), $g); ); ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( many_till!($i, $f, |i| $submac!(i, $($args)*)); ); ($i:expr, $f:expr, $g:expr) => ( $crate::multi::many_tillc($i, $f, $g) ); ); /// `many_m_n!(usize, usize, I -> IResult<I,O>) => I -> IResult<I, Vec<O>>` /// Applies the parser between m and n times (n included) and returns the list of /// results in a `Vec`. /// /// the embedded parser may return Incomplete /// /// ``` /// # #[macro_use] extern crate nom; /// # use nom::Err; /// # use nom::error::ErrorKind; /// # fn main() { /// named!(multi<&[u8], Vec<&[u8]> >, many_m_n!(2, 4, tag!( "abcd" ) ) ); /// /// let a = b"abcdefgh"; /// let b = b"abcdabcdefgh"; /// let c = b"abcdabcdabcdabcdabcdefgh"; /// /// assert_eq!(multi(&a[..]), Err(Err::Error(error_position!(&b"efgh"[..], ErrorKind::Tag)))); /// let res = vec![&b"abcd"[..], &b"abcd"[..]]; /// assert_eq!(multi(&b[..]),Ok((&b"efgh"[..], res))); /// let res2 = vec![&b"abcd"[..], &b"abcd"[..], &b"abcd"[..], &b"abcd"[..]]; /// assert_eq!(multi(&c[..]),Ok((&b"abcdefgh"[..], res2))); /// # } /// ``` #[cfg(feature = "alloc")] #[macro_export(local_inner_macros)] macro_rules! many_m_n( ($i:expr, $m:expr, $n: expr, $submac:ident!( $($args:tt)* )) => ( many_m_n!($i, $m, $n, |i| $submac!(i, $($args)*)) ); ($i:expr, $m:expr, $n: expr, $f:expr) => ( $crate::multi::many_m_nc($i, $m, $n, $f) ); ); /// `many0_count!(I -> IResult<I,O>) => I -> IResult<I, usize>` /// Applies the parser 0 or more times and returns the number of times the parser was applied. /// /// `many0_count` will only return `Error` if the embedded parser does not consume any input /// (to avoid infinite loops). /// /// ``` /// #[macro_use] extern crate nom; /// use nom::character::streaming::digit1; /// /// named!(number<&[u8], usize>, many0_count!(pair!(digit1, tag!(",")))); /// /// fn main() { /// assert_eq!(number(&b"123,45,abc"[..]), Ok((&b"abc"[..], 2))); /// } /// ``` /// #[macro_export] macro_rules! many0_count { ($i:expr, $submac:ident!( $($args:tt)* )) => ( $crate::multi::many0_countc($i, |i| $submac!(i, $($args)*)) ); ($i:expr, $f:expr) => ( $crate::multi::many0_countc($i, $f) ); } /// `many1_count!(I -> IResult<I,O>) => I -> IResult<I, usize>` /// Applies the parser 1 or more times and returns the number of times the parser was applied. /// /// ``` /// #[macro_use] extern crate nom; /// use nom::character::streaming::digit1; /// /// named!(number<&[u8], usize>, many1_count!(pair!(digit1, tag!(",")))); /// /// fn main() { /// assert_eq!(number(&b"123,45,abc"[..]), Ok((&b"abc"[..], 2))); /// } /// ``` /// #[macro_export] macro_rules! many1_count { ($i:expr, $submac:ident!( $($args:tt)* )) => ( $crate::multi::many1_countc($i, |i| $submac!(i, $($args)*)) ); ($i:expr, $f:expr) => ( $crate::multi::many1_countc($i, $f) ); } /// `count!(I -> IResult<I,O>, nb) => I -> IResult<I, Vec<O>>` /// Applies the child parser a specified number of times. /// /// ``` /// # #[macro_use] extern crate nom; /// # use nom::Err; /// # use nom::error::ErrorKind; /// # fn main() { /// named!(counter< Vec<&[u8]> >, count!( tag!( "abcd" ), 2 ) ); /// /// let a = b"abcdabcdabcdef"; /// let b = b"abcdefgh"; /// let res = vec![&b"abcd"[..], &b"abcd"[..]]; /// /// assert_eq!(counter(&a[..]),Ok((&b"abcdef"[..], res))); /// assert_eq!(counter(&b[..]), Err(Err::Error(error_position!(&b"efgh"[..], ErrorKind::Tag)))); /// # } /// ``` /// #[cfg(feature = "alloc")] #[macro_export(local_inner_macros)] macro_rules! count( ($i:expr, $submac:ident!( $($args:tt)* ), $count: expr) => ( count!($i, |i| $submac!(i, $($args)*), $count) ); ($i:expr, $f:expr, $count: expr) => ( $crate::multi::count($f, $count)($i) ); ); /// `length_count!(I -> IResult<I, nb>, I -> IResult<I,O>) => I -> IResult<I, Vec<O>>` /// Gets a number from the first parser, then applies the second parser that many times. /// /// ```rust /// # #[macro_use] extern crate nom; /// # use nom::{Err, Needed}; /// # use nom::error::ErrorKind; /// use nom::number::complete::be_u8; /// # fn main() { /// named!(parser<Vec<&[u8]>>, length_count!(be_u8, tag!("abc"))); /// /// assert_eq!(parser(&b"\x02abcabcabc"[..]), Ok(((&b"abc"[..], vec![&b"abc"[..], &b"abc"[..]])))); /// assert_eq!(parser(&b"\x04abcabcabc"[..]), Err(Err::Incomplete(Needed::new(3)))); /// # } /// ``` #[macro_export(local_inner_macros)] #[cfg(feature = "alloc")] macro_rules! length_count( ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( { use $crate::lib::std::result::Result::*; use $crate::Err; match $submac!($i, $($args)*) { Err(e) => Err(Err::convert(e)), Ok((i, o)) => { match count!(i, $submac2!($($args2)*), o as usize) { Err(e) => Err(Err::convert(e)), Ok((i2, o2)) => Ok((i2, o2)) } } } } ); ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( length_count!($i, $submac!($($args)*), call!($g)); ); ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( length_count!($i, call!($f), $submac!($($args)*)); ); ($i:expr, $f:expr, $g:expr) => ( length_count!($i, call!($f), call!($g)); ); ); /// `length_data!(I -> IResult<I, nb>) => O` /// /// `length_data` gets a number from the first parser, then takes a subslice of the input /// of that size and returns that subslice. /// /// ```rust /// # #[macro_use] extern crate nom; /// # use nom::{Err, Needed}; /// # use nom::error::ErrorKind; /// use nom::number::complete::be_u8; /// # fn main() { /// named!(parser, length_data!(be_u8)); /// /// assert_eq!(parser(&b"\x06abcabcabc"[..]), Ok((&b"abc"[..], &b"abcabc"[..]))); /// assert_eq!(parser(&b"\x06abc"[..]), Err(Err::Incomplete(Needed::new(6)))); /// # } /// ``` #[macro_export(local_inner_macros)] macro_rules! length_data( ($i:expr, $submac:ident!( $($args:tt)* )) => ({ $crate::multi::length_data(|i| $submac!(i, $($args)*))($i) }); ($i:expr, $f:expr) => ( $crate::multi::length_data($f)($i) ); ); /// `length_value!(I -> IResult<I, nb>, I -> IResult<I,O>) => I -> IResult<I, O>` /// /// Gets a number from the first parser, takes a subslice of the input of that size, /// then applies the second parser on that subslice. If the second parser returns /// `Incomplete`, `length_value` will return an error. /// /// ```rust /// # #[macro_use] extern crate nom; /// # use nom::{Err, Needed}; /// # use nom::error::ErrorKind; /// use nom::number::complete::be_u8; /// use nom::character::complete::alpha0; /// use nom::bytes::complete::tag; /// # fn main() { /// named!(parser, length_value!(be_u8, alpha0)); /// /// assert_eq!(parser(&b"\x06abcabcabc"[..]), Ok((&b"abc"[..], &b"abcabc"[..]))); /// assert_eq!(parser(&b"\x06abc"[..]), Err(Err::Incomplete(Needed::new(6)))); /// # } /// ``` #[macro_export(local_inner_macros)] macro_rules! length_value( ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( length_value!($i, |i| $submac!(i, $($args)*), |i| $submac2!(i, $($args2)*)) ); ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( length_value!($i, |i| $submac!(i, $($args)*), $g); ); ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( length_value!($i, $f, |i| $submac!(i, $($args)*)); ); ($i:expr, $f:expr, $g:expr) => ( $crate::multi::length_valuec($i, $f, $g); ); ); /// `fold_many0!(I -> IResult<I,O>, R, Fn(R, O) -> R) => I -> IResult<I, R>` /// Applies the parser 0 or more times and folds the list of return values. /// /// The embedded parser may return `Incomplete`. /// /// ``` /// # #[macro_use] extern crate nom; /// # fn main() { /// named!(multi<&[u8], Vec<&[u8]> >, /// fold_many0!( tag!( "abcd" ), Vec::new(), |mut acc: Vec<_>, item| { /// acc.push(item); /// acc /// })); /// /// let a = b"abcdabcdefgh"; /// let b = b"azerty"; /// /// let res = vec![&b"abcd"[..], &b"abcd"[..]]; /// assert_eq!(multi(&a[..]),Ok((&b"efgh"[..], res))); /// assert_eq!(multi(&b[..]),Ok((&b"azerty"[..], Vec::new()))); /// # } /// ``` /// 0 or more #[macro_export(local_inner_macros)] macro_rules! fold_many0( ($i:expr, $submac:ident!( $($args:tt)* ), $init:expr, $fold_f:expr) => ( fold_many0!($i, |i| $submac!(i, $($args)*), $init, $fold_f) ); ($i:expr, $f:expr, $init:expr, $fold_f:expr) => ( $crate::multi::fold_many0($f, $init, $fold_f)($i) ); ); /// `fold_many1!(I -> IResult<I,O>, R, Fn(R, O) -> R) => I -> IResult<I, R>` /// Applies the parser 1 or more times and folds the list of return values. /// /// The embedded parser may return `Incomplete`. /// /// ``` /// # #[macro_use] extern crate nom; /// # use nom::Err; /// # use nom::error::ErrorKind; /// # fn main() { /// named!(multi<&[u8], Vec<&[u8]> >, /// fold_many1!( tag!( "abcd" ), Vec::new(), |mut acc: Vec<_>, item| { /// acc.push(item); /// acc /// })); /// /// let a = b"abcdabcdefgh"; /// let b = b"azerty"; /// /// let res = vec![&b"abcd"[..], &b"abcd"[..]]; /// assert_eq!(multi(&a[..]),Ok((&b"efgh"[..], res))); /// assert_eq!(multi(&b[..]), Err(Err::Error(error_position!(&b[..], ErrorKind::Many1)))); /// # } /// ``` #[macro_export(local_inner_macros)] macro_rules! fold_many1( ($i:expr, $submac:ident!( $($args:tt)* ), $init:expr, $fold_f:expr) => ( fold_many1!($i, |i| $submac!(i, $($args)*), $init, $fold_f) ); ($i:expr, $f:expr, $init:expr, $fold_f:expr) => ( $crate::multi::fold_many1c($i, $f, $init, $fold_f) ); ($i:expr, $f:expr, $init:expr, $fold_f:expr) => ( fold_many1!($i, call!($f), $init, $fold_f); ); ); /// `fold_many_m_n!(usize, usize, I -> IResult<I,O>, R, Fn(R, O) -> R) => I -> IResult<I, R>` /// Applies the parser between m and n times (n included) and folds the list of return value. /// /// The embedded parser may return `Incomplete`. /// /// ``` /// # #[macro_use] extern crate nom; /// # use nom::Err; /// # use nom::error::ErrorKind; /// # fn main() { /// named!(multi<&[u8], Vec<&[u8]> >, /// fold_many_m_n!(2, 4, tag!( "abcd" ), Vec::new(), |mut acc: Vec<_>, item| { /// acc.push(item); /// acc /// })); /// /// let a = b"abcdefgh"; /// let b = b"abcdabcdefgh"; /// let c = b"abcdabcdabcdabcdabcdefgh"; /// /// assert_eq!(multi(&a[..]), Err(Err::Error(error_position!(&a[..], ErrorKind::ManyMN)))); /// let res = vec![&b"abcd"[..], &b"abcd"[..]]; /// assert_eq!(multi(&b[..]),Ok((&b"efgh"[..], res))); /// let res2 = vec![&b"abcd"[..], &b"abcd"[..], &b"abcd"[..], &b"abcd"[..]]; /// assert_eq!(multi(&c[..]),Ok((&b"abcdefgh"[..], res2))); /// # } /// ``` #[macro_export(local_inner_macros)] macro_rules! fold_many_m_n( ($i:expr, $m:expr, $n:expr, $submac:ident!( $($args:tt)* ), $init:expr, $fold_f:expr) => ( fold_many_m_n!($i, $m, $n, |i| $submac!(i, $($args)*), $init, $fold_f) ); ($i:expr, $m:expr, $n:expr, $f:expr, $init:expr, $fold_f:expr) => ( $crate::multi::fold_many_m_nc($i, $m, $n, $f, $init, $fold_f) ); ); #[cfg(test)] mod tests { use crate::character::streaming::digit1 as digit; use crate::error::ErrorKind; use crate::error::ParseError; use crate::internal::{Err, IResult, Needed}; use crate::lib::std::str::{self, FromStr}; #[cfg(feature = "alloc")] use crate::lib::std::vec::Vec; use crate::number::streaming::{be_u16, be_u8}; // reproduce the tag and take macros, because of module import order macro_rules! tag ( ($i:expr, $inp: expr) => ( { #[inline(always)] fn as_bytes<T: $crate::AsBytes>(b: &T) -> &[u8] { b.as_bytes() } let expected = $inp; let bytes = as_bytes(&expected); tag_bytes!($i,bytes) } ); ); macro_rules! tag_bytes ( ($i:expr, $bytes: expr) => ( { use $crate::lib::std::cmp::min; let len = $i.len(); let blen = $bytes.len(); let m = min(len, blen); let reduced = &$i[..m]; let b = &$bytes[..m]; let res: IResult<_,_,_> = if reduced != b { Err($crate::Err::Error($crate::error::make_error($i, $crate::error::ErrorKind::Tag))) } else if m < blen { Err($crate::Err::Incomplete(Needed::new(blen))) } else { Ok((&$i[blen..], reduced)) }; res } ); ); #[test] #[cfg(feature = "alloc")] fn separated_list0() { named!(multi<&[u8],Vec<&[u8]> >, separated_list0!(tag!(","), tag!("abcd"))); named!(multi_empty<&[u8],Vec<&[u8]> >, separated_list0!(tag!(","), tag!(""))); named!(multi_longsep<&[u8],Vec<&[u8]> >, separated_list0!(tag!(".."), tag!("abcd"))); let a = &b"abcdef"[..]; let b = &b"abcd,abcdef"[..]; let c = &b"azerty"[..]; let d = &b",,abc"[..]; let e = &b"abcd,abcd,ef"[..]; let f = &b"abc"[..]; let g = &b"abcd."[..]; let h = &b"abcd,abc"[..]; let res1 = vec![&b"abcd"[..]]; assert_eq!(multi(a), Ok((&b"ef"[..], res1))); let res2 = vec![&b"abcd"[..], &b"abcd"[..]]; assert_eq!(multi(b), Ok((&b"ef"[..], res2))); assert_eq!(multi(c), Ok((&b"azerty"[..], Vec::new()))); assert_eq!( multi_empty(d), Err(Err::Error(error_position!(d, ErrorKind::SeparatedList))) ); //let res3 = vec![&b""[..], &b""[..], &b""[..]]; //assert_eq!(multi_empty(d),Ok((&b"abc"[..], res3))); let res4 = vec![&b"abcd"[..], &b"abcd"[..]]; assert_eq!(multi(e), Ok((&b",ef"[..], res4))); assert_eq!(multi(f), Err(Err::Incomplete(Needed::new(4)))); assert_eq!(multi_longsep(g), Err(Err::Incomplete(Needed::new(2)))); assert_eq!(multi(h), Err(Err::Incomplete(Needed::new(4)))); } #[test] #[cfg(feature = "alloc")] fn separated_list1() { named!(multi<&[u8],Vec<&[u8]> >, separated_list1!(tag!(","), tag!("abcd"))); named!(multi_longsep<&[u8],Vec<&[u8]> >, separated_list1!(tag!(".."), tag!("abcd"))); let a = &b"abcdef"[..]; let b = &b"abcd,abcdef"[..]; let c = &b"azerty"[..]; let d = &b"abcd,abcd,ef"[..]; let f = &b"abc"[..]; let g = &b"abcd."[..]; let h = &b"abcd,abc"[..]; let res1 = vec![&b"abcd"[..]]; assert_eq!(multi(a), Ok((&b"ef"[..], res1))); let res2 = vec![&b"abcd"[..], &b"abcd"[..]]; assert_eq!(multi(b), Ok((&b"ef"[..], res2))); assert_eq!( multi(c), Err(Err::Error(error_position!(c, ErrorKind::Tag))) ); let res3 = vec![&b"abcd"[..], &b"abcd"[..]]; assert_eq!(multi(d), Ok((&b",ef"[..], res3))); assert_eq!(multi(f), Err(Err::Incomplete(Needed::new(4)))); assert_eq!(multi_longsep(g), Err(Err::Incomplete(Needed::new(2)))); assert_eq!(multi(h), Err(Err::Incomplete(Needed::new(4)))); } #[test] #[cfg(feature = "alloc")] fn many0() { named!(tag_abcd, tag!("abcd")); named!(tag_empty, tag!("")); named!( multi<&[u8],Vec<&[u8]> >, many0!(tag_abcd) ); named!( multi_empty<&[u8],Vec<&[u8]> >, many0!(tag_empty) ); assert_eq!(multi(&b"abcdef"[..]), Ok((&b"ef"[..], vec![&b"abcd"[..]]))); assert_eq!( multi(&b"abcdabcdefgh"[..]), Ok((&b"efgh"[..], vec![&b"abcd"[..], &b"abcd"[..]])) ); assert_eq!(multi(&b"azerty"[..]), Ok((&b"azerty"[..], Vec::new()))); assert_eq!(multi(&b"abcdab"[..]), Err(Err::Incomplete(Needed::new(4)))); assert_eq!(multi(&b"abcd"[..]), Err(Err::Incomplete(Needed::new(4)))); assert_eq!(multi(&b""[..]), Err(Err::Incomplete(Needed::new(4)))); assert_eq!( multi_empty(&b"abcdef"[..]), Err(Err::Error(error_position!( &b"abcdef"[..], ErrorKind::Many0 ))) ); } #[cfg(nightly)] use test::Bencher; #[cfg(nightly)] #[bench] fn many0_bench(b: &mut Bencher) { named!(multi<&[u8],Vec<&[u8]> >, many0!(tag!("abcd"))); b.iter(|| multi(&b"abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"[..])); } #[test] #[cfg(feature = "alloc")] fn many1() { named!(multi<&[u8],Vec<&[u8]> >, many1!(tag!("abcd"))); let a = &b"abcdef"[..]; let b = &b"abcdabcdefgh"[..]; let c = &b"azerty"[..]; let d = &b"abcdab"[..]; let res1 = vec![&b"abcd"[..]]; assert_eq!(multi(a), Ok((&b"ef"[..], res1))); let res2 = vec![&b"abcd"[..], &b"abcd"[..]]; assert_eq!(multi(b), Ok((&b"efgh"[..], res2))); assert_eq!( multi(c), Err(Err::Error(error_position!(c, ErrorKind::Tag))) ); assert_eq!(multi(d), Err(Err::Incomplete(Needed::new(4)))); } #[test] #[cfg(feature = "alloc")] fn many_till() { named!(multi<&[u8], (Vec<&[u8]>, &[u8]) >, many_till!( tag!( "abcd" ), tag!( "efgh" ) ) ); let a = b"abcdabcdefghabcd"; let b = b"efghabcd"; let c = b"azerty"; let res_a = (vec![&b"abcd"[..], &b"abcd"[..]], &b"efgh"[..]); let res_b: (Vec<&[u8]>, &[u8]) = (Vec::new(), &b"efgh"[..]); assert_eq!(multi(&a[..]), Ok((&b"abcd"[..], res_a))); assert_eq!(multi(&b[..]), Ok((&b"abcd"[..], res_b))); assert_eq!( multi(&c[..]), Err(Err::Error(error_node_position!( &c[..], ErrorKind::ManyTill, error_position!(&c[..], ErrorKind::Tag) ))) ); } #[test] #[cfg(feature = "std")] fn infinite_many() { fn tst(input: &[u8]) -> IResult<&[u8], &[u8]> { println!("input: {:?}", input); Err(Err::Error(error_position!(input, ErrorKind::Tag))) } // should not go into an infinite loop named!(multi0<&[u8],Vec<&[u8]> >, many0!(tst)); let a = &b"abcdef"[..]; assert_eq!(multi0(a), Ok((a, Vec::new()))); named!(multi1<&[u8],Vec<&[u8]> >, many1!(tst)); let a = &b"abcdef"[..]; assert_eq!( multi1(a), Err(Err::Error(error_position!(a, ErrorKind::Tag))) ); } #[test] #[cfg(feature = "alloc")] fn many_m_n() { named!(multi<&[u8],Vec<&[u8]> >, many_m_n!(2, 4, tag!("Abcd"))); let a = &b"Abcdef"[..]; let b = &b"AbcdAbcdefgh"[..]; let c = &b"AbcdAbcdAbcdAbcdefgh"[..]; let d = &b"AbcdAbcdAbcdAbcdAbcdefgh"[..]; let e = &b"AbcdAb"[..]; assert_eq!( multi(a), Err(Err::Error(error_position!(&b"ef"[..], ErrorKind::Tag))) ); let res1 = vec![&b"Abcd"[..], &b"Abcd"[..]]; assert_eq!(multi(b), Ok((&b"efgh"[..], res1))); let res2 = vec![&b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..]]; assert_eq!(multi(c), Ok((&b"efgh"[..], res2))); let res3 = vec![&b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..]]; assert_eq!(multi(d), Ok((&b"Abcdefgh"[..], res3))); assert_eq!(multi(e), Err(Err::Incomplete(Needed::new(4)))); } #[test] #[cfg(feature = "alloc")] fn count() { const TIMES: usize = 2; named!(tag_abc, tag!("abc")); named!( cnt_2<&[u8], Vec<&[u8]> >, count!(tag_abc, TIMES ) ); assert_eq!( cnt_2(&b"abcabcabcdef"[..]), Ok((&b"abcdef"[..], vec![&b"abc"[..], &b"abc"[..]])) ); assert_eq!(cnt_2(&b"ab"[..]), Err(Err::Incomplete(Needed::new(3)))); assert_eq!(cnt_2(&b"abcab"[..]), Err(Err::Incomplete(Needed::new(3)))); assert_eq!( cnt_2(&b"xxx"[..]), Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) ); assert_eq!( cnt_2(&b"xxxabcabcdef"[..]), Err(Err::Error(error_position!( &b"xxxabcabcdef"[..], ErrorKind::Tag ))) ); assert_eq!( cnt_2(&b"abcxxxabcdef"[..]), Err(Err::Error(error_position!( &b"xxxabcdef"[..], ErrorKind::Tag ))) ); } #[test] #[cfg(feature = "alloc")] fn count_zero() { const TIMES: usize = 0; named!(tag_abc, tag!("abc")); named!( counter_2<&[u8], Vec<&[u8]> >, count!(tag_abc, TIMES ) ); let done = &b"abcabcabcdef"[..]; let parsed_done = Vec::new(); let rest = done; let incomplete_1 = &b"ab"[..]; let parsed_incompl_1 = Vec::new(); let incomplete_2 = &b"abcab"[..]; let parsed_incompl_2 = Vec::new(); let error = &b"xxx"[..]; let error_remain = &b"xxx"[..]; let parsed_err = Vec::new(); let error_1 = &b"xxxabcabcdef"[..]; let parsed_err_1 = Vec::new(); let error_1_remain = &b"xxxabcabcdef"[..]; let error_2 = &b"abcxxxabcdef"[..]; let parsed_err_2 = Vec::new(); let error_2_remain = &b"abcxxxabcdef"[..]; assert_eq!(counter_2(done), Ok((rest, parsed_done))); assert_eq!( counter_2(incomplete_1), Ok((incomplete_1, parsed_incompl_1)) ); assert_eq!( counter_2(incomplete_2), Ok((incomplete_2, parsed_incompl_2)) ); assert_eq!(counter_2(error), Ok((error_remain, parsed_err))); assert_eq!(counter_2(error_1), Ok((error_1_remain, parsed_err_1))); assert_eq!(counter_2(error_2), Ok((error_2_remain, parsed_err_2))); } #[derive(Debug, Clone, PartialEq)] pub struct NilError; impl<I> From<(I, ErrorKind)> for NilError { fn from(_: (I, ErrorKind)) -> Self { NilError } } impl<I> ParseError<I> for NilError { fn from_error_kind(_: I, _: ErrorKind) -> NilError { NilError } fn append(_: I, _: ErrorKind, _: NilError) -> NilError { NilError } } named!(pub number<u32>, map_res!( map_res!( digit, str::from_utf8 ), FromStr::from_str )); #[test] #[cfg(feature = "alloc")] fn length_count() { named!(tag_abc, tag!(&b"abc"[..])); named!( cnt<&[u8], Vec<&[u8]> >, length_count!(number, tag_abc) ); assert_eq!( cnt(&b"2abcabcabcdef"[..]), Ok((&b"abcdef"[..], vec![&b"abc"[..], &b"abc"[..]])) ); assert_eq!(cnt(&b"2ab"[..]), Err(Err::Incomplete(Needed::new(3)))); assert_eq!(cnt(&b"3abcab"[..]), Err(Err::Incomplete(Needed::new(3)))); assert_eq!( cnt(&b"xxx"[..]), Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Digit))) ); assert_eq!( cnt(&b"2abcxxx"[..]), Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) ); } #[test] fn length_data() { named!( take<&[u8], &[u8]>, length_data!(number) ); assert_eq!( take(&b"6abcabcabcdef"[..]), Ok((&b"abcdef"[..], &b"abcabc"[..])) ); assert_eq!(take(&b"3ab"[..]), Err(Err::Incomplete(Needed::new(3)))); assert_eq!( take(&b"xxx"[..]), Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Digit))) ); assert_eq!(take(&b"2abcxxx"[..]), Ok((&b"cxxx"[..], &b"ab"[..]))); } #[test] fn length_value_test() { named!(length_value_1<&[u8], u16 >, length_value!(be_u8, be_u16)); named!(length_value_2<&[u8], (u8, u8) >, length_value!(be_u8, tuple!(be_u8, be_u8))); let i1 = [0, 5, 6]; assert_eq!( length_value_1(&i1), Err(Err::Error(error_position!(&b""[..], ErrorKind::Complete))) ); assert_eq!( length_value_2(&i1), Err(Err::Error(error_position!(&b""[..], ErrorKind::Complete))) ); let i2 = [1, 5, 6, 3]; assert_eq!( length_value_1(&i2), Err(Err::Error(error_position!(&i2[1..2], ErrorKind::Complete))) ); assert_eq!( length_value_2(&i2), Err(Err::Error(error_position!(&i2[1..2], ErrorKind::Complete))) ); let i3 = [2, 5, 6, 3, 4, 5, 7]; assert_eq!(length_value_1(&i3), Ok((&i3[3..], 1286))); assert_eq!(length_value_2(&i3), Ok((&i3[3..], (5, 6)))); let i4 = [3, 5, 6, 3, 4, 5]; assert_eq!(length_value_1(&i4), Ok((&i4[4..], 1286))); assert_eq!(length_value_2(&i4), Ok((&i4[4..], (5, 6)))); } #[test] #[cfg(feature = "alloc")] fn fold_many0() { fn fold_into_vec<T>(mut acc: Vec<T>, item: T) -> Vec<T> { acc.push(item); acc }; named!(tag_abcd, tag!("abcd")); named!(tag_empty, tag!("")); named!( multi<&[u8],Vec<&[u8]> >, fold_many0!(tag_abcd, Vec::new(), fold_into_vec) ); named!( multi_empty<&[u8],Vec<&[u8]> >, fold_many0!(tag_empty, Vec::new(), fold_into_vec) ); assert_eq!(multi(&b"abcdef"[..]), Ok((&b"ef"[..], vec![&b"abcd"[..]]))); assert_eq!( multi(&b"abcdabcdefgh"[..]), Ok((&b"efgh"[..], vec![&b"abcd"[..], &b"abcd"[..]])) ); assert_eq!(multi(&b"azerty"[..]), Ok((&b"azerty"[..], Vec::new()))); assert_eq!(multi(&b"abcdab"[..]), Err(Err::Incomplete(Needed::new(4)))); assert_eq!(multi(&b"abcd"[..]), Err(Err::Incomplete(Needed::new(4)))); assert_eq!(multi(&b""[..]), Err(Err::Incomplete(Needed::new(4)))); assert_eq!( multi_empty(&b"abcdef"[..]), Err(Err::Error(error_position!( &b"abcdef"[..], ErrorKind::Many0 ))) ); } #[test] #[cfg(feature = "alloc")] fn fold_many1() { fn fold_into_vec<T>(mut acc: Vec<T>, item: T) -> Vec<T> { acc.push(item); acc }; named!(multi<&[u8],Vec<&[u8]> >, fold_many1!(tag!("abcd"), Vec::new(), fold_into_vec)); let a = &b"abcdef"[..]; let b = &b"abcdabcdefgh"[..]; let c = &b"azerty"[..]; let d = &b"abcdab"[..]; let res1 = vec![&b"abcd"[..]]; assert_eq!(multi(a), Ok((&b"ef"[..], res1))); let res2 = vec![&b"abcd"[..], &b"abcd"[..]]; assert_eq!(multi(b), Ok((&b"efgh"[..], res2))); assert_eq!( multi(c), Err(Err::Error(error_position!(c, ErrorKind::Many1))) ); assert_eq!(multi(d), Err(Err::Incomplete(Needed::new(4)))); } #[test] #[cfg(feature = "alloc")] fn fold_many_m_n() { fn fold_into_vec<T>(mut acc: Vec<T>, item: T) -> Vec<T> { acc.push(item); acc }; named!(multi<&[u8],Vec<&[u8]> >, fold_many_m_n!(2, 4, tag!("Abcd"), Vec::new(), fold_into_vec)); let a = &b"Abcdef"[..]; let b = &b"AbcdAbcdefgh"[..]; let c = &b"AbcdAbcdAbcdAbcdefgh"[..]; let d = &b"AbcdAbcdAbcdAbcdAbcdefgh"[..]; let e = &b"AbcdAb"[..]; assert_eq!( multi(a), Err(Err::Error(error_position!(a, ErrorKind::ManyMN))) ); let res1 = vec![&b"Abcd"[..], &b"Abcd"[..]]; assert_eq!(multi(b), Ok((&b"efgh"[..], res1))); let res2 = vec![&b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..]]; assert_eq!(multi(c), Ok((&b"efgh"[..], res2))); let res3 = vec![&b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..]]; assert_eq!(multi(d), Ok((&b"Abcdefgh"[..], res3))); assert_eq!(multi(e), Err(Err::Incomplete(Needed::new(4)))); } #[test] fn many0_count() { named!( count0_nums(&[u8]) -> usize, many0_count!(pair!(digit, tag!(","))) ); assert_eq!(count0_nums(&b"123,junk"[..]), Ok((&b"junk"[..], 1))); assert_eq!(count0_nums(&b"123,45,junk"[..]), Ok((&b"junk"[..], 2))); assert_eq!( count0_nums(&b"1,2,3,4,5,6,7,8,9,0,junk"[..]), Ok((&b"junk"[..], 10)) ); assert_eq!(count0_nums(&b"hello"[..]), Ok((&b"hello"[..], 0))); } #[test] fn many1_count() { named!( count1_nums(&[u8]) -> usize, many1_count!(pair!(digit, tag!(","))) ); assert_eq!(count1_nums(&b"123,45,junk"[..]), Ok((&b"junk"[..], 2))); assert_eq!( count1_nums(&b"1,2,3,4,5,6,7,8,9,0,junk"[..]), Ok((&b"junk"[..], 10)) ); assert_eq!( count1_nums(&b"hello"[..]), Err(Err::Error(error_position!( &b"hello"[..], ErrorKind::Many1Count ))) ); } }
31.096623
100
0.522127
4bf7671edbdceee47e2bd1786ddf71905176a67a
7,678
use crate::ics02_client::header::{AnyHeader, Header}; use crate::ics02_client::msgs::update_client::MsgUpdateAnyClient; use crate::ics02_client::msgs::ClientMsg; use crate::ics18_relayer::context::Ics18Context; use crate::ics18_relayer::error::Error; use crate::ics24_host::identifier::ClientId; /// Builds a `ClientMsg::UpdateClient` for a client with id `client_id` running on the `dest` /// context, assuming that the latest header on the source context is `src_header`. pub fn build_client_update_datagram<Ctx>( dest: &Ctx, client_id: &ClientId, src_header: AnyHeader, ) -> Result<ClientMsg, Error> where Ctx: Ics18Context, { // Check if client for ibc0 on ibc1 has been updated to latest height: // - query client state on destination chain let dest_client_state = dest .query_client_full_state(client_id) .ok_or_else(|| Error::client_state_not_found(client_id.clone()))?; let dest_client_latest_height = dest_client_state.latest_height(); if src_header.height() == dest_client_latest_height { return Err(Error::client_already_up_to_date( client_id.clone(), src_header.height(), dest_client_latest_height, )); }; if dest_client_latest_height > src_header.height() { return Err(Error::client_at_higher_height( client_id.clone(), src_header.height(), dest_client_latest_height, )); }; // Client on destination chain can be updated. Ok(ClientMsg::UpdateClient(MsgUpdateAnyClient { client_id: client_id.clone(), header: src_header, signer: dest.signer(), })) } #[cfg(test)] mod tests { use crate::ics02_client::client_type::ClientType; use crate::ics02_client::header::Header; use crate::ics18_relayer::context::Ics18Context; use crate::ics18_relayer::utils::build_client_update_datagram; use crate::ics24_host::identifier::{ChainId, ClientId}; use crate::ics26_routing::msgs::Ics26Envelope; use crate::mock::context::MockContext; use crate::mock::host::HostType; use crate::prelude::*; use crate::Height; use test_env_log::test; #[test] /// Serves to test both ICS 26 `dispatch` & `build_client_update_datagram` functions. /// Implements a "ping pong" of client update messages, so that two chains repeatedly /// process a client update message and update their height in succession. fn client_update_ping_pong() { let chain_a_start_height = Height::new(1, 11); let chain_b_start_height = Height::new(1, 20); let client_on_b_for_a_height = Height::new(1, 10); // Should be smaller than `chain_a_start_height` let client_on_a_for_b_height = Height::new(1, 20); // Should be smaller than `chain_b_start_height` let num_iterations = 4; let client_on_a_for_b = ClientId::new(ClientType::Tendermint, 0).unwrap(); let client_on_b_for_a = ClientId::new(ClientType::Mock, 0).unwrap(); // Create two mock contexts, one for each chain. let mut ctx_a = MockContext::new( ChainId::new("mockgaiaA".to_string(), 1), HostType::Mock, 5, chain_a_start_height, ) .with_client_parametrized( &client_on_a_for_b, client_on_a_for_b_height, Some(ClientType::Tendermint), // The target host chain (B) is synthetic TM. Some(client_on_a_for_b_height), ); let mut ctx_b = MockContext::new( ChainId::new("mockgaiaB".to_string(), 1), HostType::SyntheticTendermint, 5, chain_b_start_height, ) .with_client_parametrized( &client_on_b_for_a, client_on_b_for_a_height, Some(ClientType::Mock), // The target host chain is mock. Some(client_on_b_for_a_height), ); for _i in 0..num_iterations { // Update client on chain B to latest height of A. // - create the client update message with the latest header from A let a_latest_header = ctx_a.query_latest_header().unwrap(); assert_eq!( a_latest_header.client_type(), ClientType::Mock, "Client type verification in header failed for context A (Mock); got {:?} but expected {:?}", a_latest_header.client_type(), ClientType::Mock ); let client_msg_b_res = build_client_update_datagram(&ctx_b, &client_on_b_for_a, a_latest_header); assert!( client_msg_b_res.is_ok(), "create_client_update failed for context destination {:?}, error: {:?}", ctx_b, client_msg_b_res ); let client_msg_b = client_msg_b_res.unwrap(); // - send the message to B. We bypass ICS18 interface and call directly into // MockContext `recv` method (to avoid additional serialization steps). let dispatch_res_b = ctx_b.deliver(Ics26Envelope::Ics2Msg(client_msg_b)); let validation_res = ctx_b.validate(); assert!( validation_res.is_ok(), "context validation failed with error {:?} for context {:?}", validation_res, ctx_b ); // Check if the update succeeded. assert!( dispatch_res_b.is_ok(), "Dispatch failed for host chain b with error: {:?}", dispatch_res_b ); let client_height_b = ctx_b .query_client_full_state(&client_on_b_for_a) .unwrap() .latest_height(); assert_eq!(client_height_b, ctx_a.query_latest_height()); // Update client on chain B to latest height of B. // - create the client update message with the latest header from B let b_latest_header = ctx_b.query_latest_header().unwrap(); assert_eq!( b_latest_header.client_type(), ClientType::Tendermint, "Client type verification in header failed for context B (TM); got {:?} but expected {:?}", b_latest_header.client_type(), ClientType::Tendermint ); let client_msg_a_res = build_client_update_datagram(&ctx_a, &client_on_a_for_b, b_latest_header); assert!( client_msg_a_res.is_ok(), "create_client_update failed for context destination {:?}, error: {:?}", ctx_a, client_msg_a_res ); let client_msg_a = client_msg_a_res.unwrap(); // - send the message to A let dispatch_res_a = ctx_a.deliver(Ics26Envelope::Ics2Msg(client_msg_a)); let validation_res = ctx_a.validate(); assert!( validation_res.is_ok(), "context validation failed with error {:?} for context {:?}", validation_res, ctx_a ); // Check if the update succeeded. assert!( dispatch_res_a.is_ok(), "Dispatch failed for host chain a with error: {:?}", dispatch_res_a ); let client_height_a = ctx_a .query_client_full_state(&client_on_a_for_b) .unwrap() .latest_height(); assert_eq!(client_height_a, ctx_b.query_latest_height()); } } }
38.777778
109
0.598333
fbc9187adddaa399cf79449cae792ff9d7c7937d
4,039
use crate::mnt6_753::{Fq, Fq3, Fq3Parameters, FQ_ONE, FQ_ZERO}; use algebra_core::{ biginteger::BigInteger768 as BigInteger, field_new, fields::fp6_2over3::{Fp6, Fp6Parameters}, }; pub type Fq6 = Fp6<Fq6Parameters>; pub struct Fq6Parameters; impl Fp6Parameters for Fq6Parameters { type Fp3Params = Fq3Parameters; #[rustfmt::skip] const NONRESIDUE: Fq3 = field_new!(Fq3, FQ_ZERO, FQ_ONE, FQ_ZERO); // Coefficients for the Frobenius automorphism. // c1[0] = 1, // c1[1] = 24129022407817241407134263419936114379815707076943508280977368156625538709102831814843582780138963119807143081677569721953561801075623741378629346409604471234573396989178424163772589090105392407118197799904755622897541183052133 // c1[2] = 24129022407817241407134263419936114379815707076943508280977368156625538709102831814843582780138963119807143081677569721953561801075623741378629346409604471234573396989178424163772589090105392407118197799904755622897541183052132 // c1[3] = 41898490967918953402344214791240637128170709919953949071783502921025352812571106773058893763790338921418070971888458477323173057491593855069696241854796396165721416325350064441470418137846398469611935719059908164220784476160000 // c1[4] = 17769468560101711995209951371304522748355002843010440790806134764399814103468274958215310983651375801610927890210888755369611256415970113691066895445191924931148019336171640277697829047741006062493737919155152541323243293107868 // c1[5] = 17769468560101711995209951371304522748355002843010440790806134764399814103468274958215310983651375801610927890210888755369611256415970113691066895445191924931148019336171640277697829047741006062493737919155152541323243293107869 #[rustfmt::skip] const FROBENIUS_COEFF_FP6_C1: &'static [Fq] = &[ FQ_ONE, field_new!(Fq, BigInteger([ 2665418275744511426, 7073776242814464967, 4441331072847607829, 5681016258918493042, 18254896527151449163, 10681724016023285331, 1760041123371930134, 4557299868084578750, 16702481779049799698, 14149724469588165150, 5617650120443517591, 449252806040736, ])), field_new!(Fq, BigInteger([ 7739145380395648640, 1403348385939055902, 11220424057264707228, 4567962295300549271, 5929583493640677751, 17618207486530478833, 16600462137977359741, 16551719371247820635, 12057922785354578416, 13022559182829558162, 13308285686168533250, 313705269181021, ])), field_new!(Fq, BigInteger([ 2265581976117350591, 18442012872391748519, 3807704300793525789, 12280644139289115082, 10655371227771325282, 1346491763263331896, 7477357615964975877, 12570239403004322603, 2180620924574446161, 12129628062772479841, 8853285699251153944, 362282887012814, ])), field_new!(Fq, BigInteger([ 12973180669431253567, 17038664486452692616, 11034024317238370177, 7712681843988565810, 4725787734130647531, 2175028350442404679, 9323639551697167751, 14465264105466053583, 8569442212929419360, 17553812953652473294, 13991744086792172309, 48577617831792, ])), field_new!(Fq, BigInteger([ 7899453564780116353, 4262348269618550065, 4254931332821270779, 8825735807606509581, 17051100767641418943, 13685288953644762793, 12929962610801289759, 2470844602302811697, 13214001206624640642, 234234166701528666, 6301108521067156651, 184125154691507, ])), ]; }
40.39
242
0.693241
ebfc8871d87395245dbd7bdbee40838cb1a88db3
58,810
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. //! ExecutionContext contains methods for registering data sources and executing queries use crate::optimizer::hash_build_probe_order::HashBuildProbeOrder; use log::debug; use std::fs; use std::path::Path; use std::string::String; use std::sync::Arc; use std::{ collections::{HashMap, HashSet}, sync::Mutex, }; use futures::{StreamExt, TryStreamExt}; use tokio::task::{self, JoinHandle}; use arrow::csv; use crate::datasource::csv::CsvFile; use crate::datasource::parquet::ParquetTable; use crate::datasource::TableProvider; use crate::error::{DataFusionError, Result}; use crate::execution::dataframe_impl::DataFrameImpl; use crate::logical_plan::{ FunctionRegistry, LogicalPlan, LogicalPlanBuilder, ToDFSchema, }; use crate::optimizer::filter_push_down::FilterPushDown; use crate::optimizer::optimizer::OptimizerRule; use crate::optimizer::projection_push_down::ProjectionPushDown; use crate::physical_plan::csv::CsvReadOptions; use crate::physical_plan::planner::DefaultPhysicalPlanner; use crate::physical_plan::udf::ScalarUDF; use crate::physical_plan::ExecutionPlan; use crate::physical_plan::PhysicalPlanner; use crate::sql::{ parser::{DFParser, FileType}, planner::{ContextProvider, SqlToRel}, }; use crate::variable::{VarProvider, VarType}; use crate::{dataframe::DataFrame, physical_plan::udaf::AggregateUDF}; use parquet::arrow::ArrowWriter; use parquet::file::properties::WriterProperties; /// ExecutionContext is the main interface for executing queries with DataFusion. The context /// provides the following functionality: /// /// * Create DataFrame from a CSV or Parquet data source. /// * Register a CSV or Parquet data source as a table that can be referenced from a SQL query. /// * Register a custom data source that can be referenced from a SQL query. /// * Execution a SQL query /// /// The following example demonstrates how to use the context to execute a query against a CSV /// data source using the DataFrame API: /// /// ``` /// use datafusion::prelude::*; /// # use datafusion::error::Result; /// # fn main() -> Result<()> { /// let mut ctx = ExecutionContext::new(); /// let df = ctx.read_csv("tests/example.csv", CsvReadOptions::new())?; /// let df = df.filter(col("a").lt_eq(col("b")))? /// .aggregate(vec![col("a")], vec![min(col("b"))])? /// .limit(100)?; /// let results = df.collect(); /// # Ok(()) /// # } /// ``` /// /// The following example demonstrates how to execute the same query using SQL: /// /// ``` /// use datafusion::prelude::*; /// /// # use datafusion::error::Result; /// # fn main() -> Result<()> { /// let mut ctx = ExecutionContext::new(); /// ctx.register_csv("example", "tests/example.csv", CsvReadOptions::new())?; /// let results = ctx.sql("SELECT a, MIN(b) FROM example GROUP BY a LIMIT 100")?; /// # Ok(()) /// # } /// ``` pub struct ExecutionContext { /// Internal state for the context pub state: Arc<Mutex<ExecutionContextState>>, } impl ExecutionContext { /// Create a new execution context using a default configuration. pub fn new() -> Self { Self::with_config(ExecutionConfig::new()) } /// Create a new execution context using the provided configuration pub fn with_config(config: ExecutionConfig) -> Self { Self { state: Arc::new(Mutex::new(ExecutionContextState { datasources: HashMap::new(), scalar_functions: HashMap::new(), var_provider: HashMap::new(), aggregate_functions: HashMap::new(), config, })), } } /// of RecordBatch instances) pub fn sql(&mut self, sql: &str) -> Result<Arc<dyn DataFrame>> { let plan = self.create_logical_plan(sql)?; match plan { LogicalPlan::CreateExternalTable { ref schema, ref name, ref location, ref file_type, ref has_header, } => match file_type { FileType::CSV => { self.register_csv( name, location, CsvReadOptions::new() .schema(&schema.as_ref().to_owned().into()) .has_header(*has_header), )?; let plan = LogicalPlanBuilder::empty(false).build()?; Ok(Arc::new(DataFrameImpl::new(self.state.clone(), &plan))) } FileType::Parquet => { self.register_parquet(name, location)?; let plan = LogicalPlanBuilder::empty(false).build()?; Ok(Arc::new(DataFrameImpl::new(self.state.clone(), &plan))) } _ => Err(DataFusionError::NotImplemented(format!( "Unsupported file type {:?}.", file_type ))), }, plan => Ok(Arc::new(DataFrameImpl::new(self.state.clone(), &plan))), } } /// Creates a logical plan. This function is intended for internal use and should not be /// called directly. pub fn create_logical_plan(&self, sql: &str) -> Result<LogicalPlan> { let statements = DFParser::parse_sql(sql)?; if statements.len() != 1 { return Err(DataFusionError::NotImplemented( "The context currently only supports a single SQL statement".to_string(), )); } // create a query planner let state = self.state.lock().unwrap().clone(); let query_planner = SqlToRel::new(&state); Ok(query_planner.statement_to_plan(&statements[0])?) } /// Register variable pub fn register_variable( &mut self, variable_type: VarType, provider: Arc<dyn VarProvider + Send + Sync>, ) { self.state .lock() .unwrap() .var_provider .insert(variable_type, provider); } /// Register a scalar UDF pub fn register_udf(&mut self, f: ScalarUDF) { self.state .lock() .unwrap() .scalar_functions .insert(f.name.clone(), Arc::new(f)); } /// Register a aggregate UDF pub fn register_udaf(&mut self, f: AggregateUDF) { self.state .lock() .unwrap() .aggregate_functions .insert(f.name.clone(), Arc::new(f)); } /// Creates a DataFrame for reading a CSV data source. pub fn read_csv( &mut self, filename: &str, options: CsvReadOptions, ) -> Result<Arc<dyn DataFrame>> { Ok(Arc::new(DataFrameImpl::new( self.state.clone(), &LogicalPlanBuilder::scan_csv(&filename, options, None)?.build()?, ))) } /// Creates a DataFrame for reading a Parquet data source. pub fn read_parquet(&mut self, filename: &str) -> Result<Arc<dyn DataFrame>> { Ok(Arc::new(DataFrameImpl::new( self.state.clone(), &LogicalPlanBuilder::scan_parquet( &filename, None, self.state.lock().unwrap().config.concurrency, )? .build()?, ))) } /// Creates a DataFrame for reading a custom TableProvider pub fn read_table( &mut self, provider: Arc<dyn TableProvider + Send + Sync>, ) -> Result<Arc<dyn DataFrame>> { let schema = provider.schema(); let table_scan = LogicalPlan::TableScan { table_name: "".to_string(), source: provider, projected_schema: schema.to_dfschema_ref()?, projection: None, filters: vec![], }; Ok(Arc::new(DataFrameImpl::new( self.state.clone(), &LogicalPlanBuilder::from(&table_scan).build()?, ))) } /// Register a CSV data source so that it can be referenced from SQL statements /// executed against this context. pub fn register_csv( &mut self, name: &str, filename: &str, options: CsvReadOptions, ) -> Result<()> { self.register_table(name, Box::new(CsvFile::try_new(filename, options)?)); Ok(()) } /// Register a Parquet data source so that it can be referenced from SQL statements /// executed against this context. pub fn register_parquet(&mut self, name: &str, filename: &str) -> Result<()> { let table = ParquetTable::try_new( &filename, self.state.lock().unwrap().config.concurrency, )?; self.register_table(name, Box::new(table)); Ok(()) } /// Register a table using a custom TableProvider so that it can be referenced from SQL /// statements executed against this context. pub fn register_table( &mut self, name: &str, provider: Box<dyn TableProvider + Send + Sync>, ) { self.state .lock() .unwrap() .datasources .insert(name.to_string(), provider.into()); } /// Retrieves a DataFrame representing a table previously registered by calling the /// register_table function. An Err result will be returned if no table has been /// registered with the provided name. pub fn table(&mut self, table_name: &str) -> Result<Arc<dyn DataFrame>> { match self.state.lock().unwrap().datasources.get(table_name) { Some(provider) => { let schema = provider.schema(); let table_scan = LogicalPlan::TableScan { table_name: table_name.to_string(), source: Arc::clone(provider), projected_schema: schema.to_dfschema_ref()?, projection: None, filters: vec![], }; Ok(Arc::new(DataFrameImpl::new( self.state.clone(), &LogicalPlanBuilder::from(&table_scan).build()?, ))) } _ => Err(DataFusionError::Plan(format!( "No table named '{}'", table_name ))), } } /// The set of available tables. Use `table` to get a specific table. pub fn tables(&self) -> HashSet<String> { self.state .lock() .unwrap() .datasources .keys() .cloned() .collect() } /// Optimize the logical plan by applying optimizer rules pub fn optimize(&self, plan: &LogicalPlan) -> Result<LogicalPlan> { // Apply standard rewrites and optimizations debug!("Logical plan:\n {:?}", plan); let mut plan = ProjectionPushDown::new().optimize(&plan)?; plan = FilterPushDown::new().optimize(&plan)?; plan = HashBuildProbeOrder::new().optimize(&plan)?; debug!("Optimized logical plan:\n {:?}", plan); self.state .lock() .unwrap() .config .query_planner .rewrite_logical_plan(plan) } /// Create a physical plan from a logical plan pub fn create_physical_plan( &self, logical_plan: &LogicalPlan, ) -> Result<Arc<dyn ExecutionPlan>> { let state = self.state.lock().unwrap(); state .config .query_planner .create_physical_plan(logical_plan, &state) } /// Execute a query and write the results to a partitioned CSV file pub async fn write_csv( &self, plan: Arc<dyn ExecutionPlan>, path: String, ) -> Result<()> { // create directory to contain the CSV files (one per partition) let fs_path = Path::new(&path); match fs::create_dir(fs_path) { Ok(()) => { let mut tasks = vec![]; for i in 0..plan.output_partitioning().partition_count() { let plan = plan.clone(); let filename = format!("part-{}.csv", i); let path = fs_path.join(&filename); let file = fs::File::create(path)?; let mut writer = csv::Writer::new(file); let stream = plan.execute(i).await?; let handle: JoinHandle<Result<()>> = task::spawn(async move { stream .map(|batch| writer.write(&batch?)) .try_collect() .await .map_err(DataFusionError::from) }); tasks.push(handle); } futures::future::join_all(tasks).await; Ok(()) } Err(e) => Err(DataFusionError::Execution(format!( "Could not create directory {}: {:?}", path, e ))), } } /// Execute a query and write the results to a partitioned Parquet file pub async fn write_parquet( &self, plan: Arc<dyn ExecutionPlan>, path: String, writer_properties: Option<WriterProperties>, ) -> Result<()> { // create directory to contain the Parquet files (one per partition) let fs_path = Path::new(&path); match fs::create_dir(fs_path) { Ok(()) => { let mut tasks = vec![]; for i in 0..plan.output_partitioning().partition_count() { let plan = plan.clone(); let filename = format!("part-{}.parquet", i); let path = fs_path.join(&filename); let file = fs::File::create(path)?; let mut writer = ArrowWriter::try_new( file.try_clone().unwrap(), plan.schema(), writer_properties.clone(), )?; let stream = plan.execute(i).await?; let handle: JoinHandle<Result<()>> = task::spawn(async move { stream .map(|batch| writer.write(&batch?)) .try_collect() .await .map_err(DataFusionError::from)?; writer.close().map_err(DataFusionError::from) }); tasks.push(handle); } futures::future::join_all(tasks).await; Ok(()) } Err(e) => Err(DataFusionError::Execution(format!( "Could not create directory {}: {:?}", path, e ))), } } } impl From<Arc<Mutex<ExecutionContextState>>> for ExecutionContext { fn from(state: Arc<Mutex<ExecutionContextState>>) -> Self { ExecutionContext { state } } } impl FunctionRegistry for ExecutionContext { fn udfs(&self) -> HashSet<String> { self.state.lock().unwrap().udfs() } fn udf(&self, name: &str) -> Result<Arc<ScalarUDF>> { self.state.lock().unwrap().udf(name) } fn udaf(&self, name: &str) -> Result<Arc<AggregateUDF>> { self.state.lock().unwrap().udaf(name) } } /// A planner used to add extensions to DataFusion logical and physical plans. pub trait QueryPlanner { /// Given a `LogicalPlan`, create a new, modified `LogicalPlan` /// plan. This method is run after built in `OptimizerRule`s. By /// default returns the `plan` unmodified. fn rewrite_logical_plan(&self, plan: LogicalPlan) -> Result<LogicalPlan> { Ok(plan) } /// Given a `LogicalPlan`, create an `ExecutionPlan` suitable for execution fn create_physical_plan( &self, logical_plan: &LogicalPlan, ctx_state: &ExecutionContextState, ) -> Result<Arc<dyn ExecutionPlan>>; } /// The query planner used if no user defined planner is provided struct DefaultQueryPlanner {} impl QueryPlanner for DefaultQueryPlanner { /// Given a `LogicalPlan`, create an `ExecutionPlan` suitable for execution fn create_physical_plan( &self, logical_plan: &LogicalPlan, ctx_state: &ExecutionContextState, ) -> Result<Arc<dyn ExecutionPlan>> { let planner = DefaultPhysicalPlanner::default(); planner.create_physical_plan(logical_plan, ctx_state) } } /// Configuration options for execution context #[derive(Clone)] pub struct ExecutionConfig { /// Number of concurrent threads for query execution. pub concurrency: usize, /// Default batch size when reading data sources pub batch_size: usize, /// Responsible for planning `LogicalPlan`s, and `ExecutionPlan` query_planner: Arc<dyn QueryPlanner + Send + Sync>, } impl ExecutionConfig { /// Create an execution config with default setting pub fn new() -> Self { Self { concurrency: num_cpus::get(), batch_size: 32768, query_planner: Arc::new(DefaultQueryPlanner {}), } } /// Customize max_concurrency pub fn with_concurrency(mut self, n: usize) -> Self { // concurrency must be greater than zero assert!(n > 0); self.concurrency = n; self } /// Customize batch size pub fn with_batch_size(mut self, n: usize) -> Self { // batch size must be greater than zero assert!(n > 0); self.batch_size = n; self } /// Replace the default query planner pub fn with_query_planner( mut self, query_planner: Arc<dyn QueryPlanner + Send + Sync>, ) -> Self { self.query_planner = query_planner; self } } /// Execution context for registering data sources and executing queries #[derive(Clone)] pub struct ExecutionContextState { /// Data sources that are registered with the context pub datasources: HashMap<String, Arc<dyn TableProvider + Send + Sync>>, /// Scalar functions that are registered with the context pub scalar_functions: HashMap<String, Arc<ScalarUDF>>, /// Variable provider that are registered with the context pub var_provider: HashMap<VarType, Arc<dyn VarProvider + Send + Sync>>, /// Aggregate functions registered in the context pub aggregate_functions: HashMap<String, Arc<AggregateUDF>>, /// Context configuration pub config: ExecutionConfig, } impl ContextProvider for ExecutionContextState { fn get_table_provider( &self, name: &str, ) -> Option<Arc<dyn TableProvider + Send + Sync>> { self.datasources.get(name).map(|ds| Arc::clone(ds)) } fn get_function_meta(&self, name: &str) -> Option<Arc<ScalarUDF>> { self.scalar_functions.get(name).cloned() } fn get_aggregate_meta(&self, name: &str) -> Option<Arc<AggregateUDF>> { self.aggregate_functions.get(name).cloned() } } impl FunctionRegistry for ExecutionContextState { fn udfs(&self) -> HashSet<String> { self.scalar_functions.keys().cloned().collect() } fn udf(&self, name: &str) -> Result<Arc<ScalarUDF>> { let result = self.scalar_functions.get(name); result.cloned().ok_or_else(|| { DataFusionError::Plan(format!( "There is no UDF named \"{}\" in the registry", name )) }) } fn udaf(&self, name: &str) -> Result<Arc<AggregateUDF>> { let result = self.aggregate_functions.get(name); result.cloned().ok_or_else(|| { DataFusionError::Plan(format!( "There is no UDAF named \"{}\" in the registry", name )) }) } } #[cfg(test)] mod tests { use super::*; use crate::logical_plan::{col, create_udf, sum}; use crate::physical_plan::functions::ScalarFunctionImplementation; use crate::physical_plan::{collect, collect_partitioned}; use crate::test; use crate::variable::VarType; use crate::{ datasource::MemTable, logical_plan::create_udaf, physical_plan::expressions::AvgAccumulator, }; use arrow::array::{ArrayRef, Float64Array, Int32Array, StringArray}; use arrow::compute::add; use arrow::datatypes::*; use arrow::record_batch::RecordBatch; use std::fs::File; use std::thread::{self, JoinHandle}; use std::{io::prelude::*, sync::Mutex}; use tempfile::TempDir; use test::*; #[tokio::test] async fn parallel_projection() -> Result<()> { let partition_count = 4; let results = execute("SELECT c1, c2 FROM test", partition_count).await?; // there should be one batch per partition assert_eq!(results.len(), partition_count); // each batch should contain 2 columns and 10 rows with correct field names for batch in &results { assert_eq!(batch.num_columns(), 2); assert_eq!(batch.num_rows(), 10); assert_eq!(field_names(batch), vec!["c1", "c2"]); } Ok(()) } #[tokio::test] async fn create_variable_expr() -> Result<()> { let tmp_dir = TempDir::new()?; let partition_count = 4; let mut ctx = create_ctx(&tmp_dir, partition_count)?; let variable_provider = test::variable::SystemVar::new(); ctx.register_variable(VarType::System, Arc::new(variable_provider)); let variable_provider = test::variable::UserDefinedVar::new(); ctx.register_variable(VarType::UserDefined, Arc::new(variable_provider)); let provider = test::create_table_dual(); ctx.register_table("dual", provider); let results = plan_and_collect(&mut ctx, "SELECT @@version, @name FROM dual").await?; let batch = &results[0]; assert_eq!(2, batch.num_columns()); assert_eq!(1, batch.num_rows()); assert_eq!(field_names(batch), vec!["@@version", "@name"]); let version = batch .column(0) .as_any() .downcast_ref::<StringArray>() .expect("failed to cast version"); assert_eq!(version.value(0), "system-var-@@version"); let name = batch .column(1) .as_any() .downcast_ref::<StringArray>() .expect("failed to cast name"); assert_eq!(name.value(0), "user-defined-var-@name"); Ok(()) } #[tokio::test] async fn parallel_query_with_filter() -> Result<()> { let tmp_dir = TempDir::new()?; let partition_count = 4; let ctx = create_ctx(&tmp_dir, partition_count)?; let logical_plan = ctx.create_logical_plan("SELECT c1, c2 FROM test WHERE c1 > 0 AND c1 < 3")?; let logical_plan = ctx.optimize(&logical_plan)?; let physical_plan = ctx.create_physical_plan(&logical_plan)?; println!("{:?}", physical_plan); let results = collect_partitioned(physical_plan).await?; assert_eq!(results.len(), partition_count); // there should be a total of 2 batches with 20 rows because the where clause filters // out results from 2 partitions // note that the order of partitions is not deterministic let mut num_batches = 0; let mut num_rows = 0; for partition in &results { for batch in partition { num_batches += 1; num_rows += batch.num_rows(); } } assert_eq!(2, num_batches); assert_eq!(20, num_rows); Ok(()) } #[tokio::test] async fn projection_on_table_scan() -> Result<()> { let tmp_dir = TempDir::new()?; let partition_count = 4; let mut ctx = create_ctx(&tmp_dir, partition_count)?; let table = ctx.table("test")?; let logical_plan = LogicalPlanBuilder::from(&table.to_logical_plan()) .project(vec![col("c2")])? .build()?; let optimized_plan = ctx.optimize(&logical_plan)?; match &optimized_plan { LogicalPlan::Projection { input, .. } => match &**input { LogicalPlan::TableScan { source, projected_schema, .. } => { assert_eq!(source.schema().fields().len(), 2); assert_eq!(projected_schema.fields().len(), 1); } _ => panic!("input to projection should be TableScan"), }, _ => panic!("expect optimized_plan to be projection"), } let expected = "Projection: #c2\ \n TableScan: test projection=Some([1])"; assert_eq!(format!("{:?}", optimized_plan), expected); let physical_plan = ctx.create_physical_plan(&optimized_plan)?; assert_eq!(1, physical_plan.schema().fields().len()); assert_eq!("c2", physical_plan.schema().field(0).name().as_str()); let batches = collect(physical_plan).await?; assert_eq!(4, batches.len()); assert_eq!(1, batches[0].num_columns()); assert_eq!(10, batches[0].num_rows()); Ok(()) } #[test] fn preserve_nullability_on_projection() -> Result<()> { let tmp_dir = TempDir::new()?; let ctx = create_ctx(&tmp_dir, 1)?; let schema = ctx .state .lock() .unwrap() .datasources .get("test") .unwrap() .schema(); assert_eq!(schema.field_with_name("c1")?.is_nullable(), false); let plan = LogicalPlanBuilder::scan_empty("", schema.as_ref(), None)? .project(vec![col("c1")])? .build()?; let plan = ctx.optimize(&plan)?; let physical_plan = ctx.create_physical_plan(&Arc::new(plan))?; assert_eq!( physical_plan.schema().field_with_name("c1")?.is_nullable(), false ); Ok(()) } #[tokio::test] async fn projection_on_memory_scan() -> Result<()> { let schema = Schema::new(vec![ Field::new("a", DataType::Int32, false), Field::new("b", DataType::Int32, false), Field::new("c", DataType::Int32, false), ]); let schema = SchemaRef::new(schema); let partitions = vec![vec![RecordBatch::try_new( schema.clone(), vec![ Arc::new(Int32Array::from(vec![1, 10, 10, 100])), Arc::new(Int32Array::from(vec![2, 12, 12, 120])), Arc::new(Int32Array::from(vec![3, 12, 12, 120])), ], )?]]; let plan = LogicalPlanBuilder::scan_memory(partitions, schema, None)? .project(vec![col("b")])? .build()?; assert_fields_eq(&plan, vec!["b"]); let ctx = ExecutionContext::new(); let optimized_plan = ctx.optimize(&plan)?; match &optimized_plan { LogicalPlan::Projection { input, .. } => match &**input { LogicalPlan::TableScan { source, projected_schema, .. } => { assert_eq!(source.schema().fields().len(), 3); assert_eq!(projected_schema.fields().len(), 1); } _ => panic!("input to projection should be InMemoryScan"), }, _ => panic!("expect optimized_plan to be projection"), } let expected = "Projection: #b\ \n TableScan: projection=Some([1])"; assert_eq!(format!("{:?}", optimized_plan), expected); let physical_plan = ctx.create_physical_plan(&optimized_plan)?; assert_eq!(1, physical_plan.schema().fields().len()); assert_eq!("b", physical_plan.schema().field(0).name().as_str()); let batches = collect(physical_plan).await?; assert_eq!(1, batches.len()); assert_eq!(1, batches[0].num_columns()); assert_eq!(4, batches[0].num_rows()); Ok(()) } #[tokio::test] async fn sort() -> Result<()> { let results = execute("SELECT c1, c2 FROM test ORDER BY c1 DESC, c2 ASC", 4).await?; assert_eq!(results.len(), 1); let batch = &results[0]; let expected: Vec<&str> = vec![ "3,1", "3,2", "3,3", "3,4", "3,5", "3,6", "3,7", "3,8", "3,9", "3,10", "2,1", "2,2", "2,3", "2,4", "2,5", "2,6", "2,7", "2,8", "2,9", "2,10", "1,1", "1,2", "1,3", "1,4", "1,5", "1,6", "1,7", "1,8", "1,9", "1,10", "0,1", "0,2", "0,3", "0,4", "0,5", "0,6", "0,7", "0,8", "0,9", "0,10", ]; assert_eq!(test::format_batch(batch), expected); Ok(()) } #[tokio::test] async fn aggregate() -> Result<()> { let results = execute("SELECT SUM(c1), SUM(c2) FROM test", 4).await?; assert_eq!(results.len(), 1); let batch = &results[0]; assert_eq!(field_names(batch), vec!["SUM(c1)", "SUM(c2)"]); let expected: Vec<&str> = vec!["60,220"]; let mut rows = test::format_batch(&batch); rows.sort(); assert_eq!(rows, expected); Ok(()) } #[tokio::test] async fn aggregate_avg() -> Result<()> { let results = execute("SELECT AVG(c1), AVG(c2) FROM test", 4).await?; assert_eq!(results.len(), 1); let batch = &results[0]; assert_eq!(field_names(batch), vec!["AVG(c1)", "AVG(c2)"]); let expected: Vec<&str> = vec!["1.5,5.5"]; let mut rows = test::format_batch(&batch); rows.sort(); assert_eq!(rows, expected); Ok(()) } #[tokio::test] async fn aggregate_max() -> Result<()> { let results = execute("SELECT MAX(c1), MAX(c2) FROM test", 4).await?; assert_eq!(results.len(), 1); let batch = &results[0]; assert_eq!(field_names(batch), vec!["MAX(c1)", "MAX(c2)"]); let expected: Vec<&str> = vec!["3,10"]; let mut rows = test::format_batch(&batch); rows.sort(); assert_eq!(rows, expected); Ok(()) } #[tokio::test] async fn aggregate_min() -> Result<()> { let results = execute("SELECT MIN(c1), MIN(c2) FROM test", 4).await?; assert_eq!(results.len(), 1); let batch = &results[0]; assert_eq!(field_names(batch), vec!["MIN(c1)", "MIN(c2)"]); let expected: Vec<&str> = vec!["0,1"]; let mut rows = test::format_batch(&batch); rows.sort(); assert_eq!(rows, expected); Ok(()) } #[tokio::test] async fn aggregate_grouped() -> Result<()> { let results = execute("SELECT c1, SUM(c2) FROM test GROUP BY c1", 4).await?; assert_eq!(results.len(), 1); let batch = &results[0]; assert_eq!(field_names(batch), vec!["c1", "SUM(c2)"]); let expected: Vec<&str> = vec!["0,55", "1,55", "2,55", "3,55"]; let mut rows = test::format_batch(&batch); rows.sort(); assert_eq!(rows, expected); Ok(()) } #[tokio::test] async fn aggregate_grouped_avg() -> Result<()> { let results = execute("SELECT c1, AVG(c2) FROM test GROUP BY c1", 4).await?; assert_eq!(results.len(), 1); let batch = &results[0]; assert_eq!(field_names(batch), vec!["c1", "AVG(c2)"]); let expected: Vec<&str> = vec!["0,5.5", "1,5.5", "2,5.5", "3,5.5"]; let mut rows = test::format_batch(&batch); rows.sort(); assert_eq!(rows, expected); Ok(()) } #[tokio::test] async fn aggregate_grouped_empty() -> Result<()> { let results = execute("SELECT c1, AVG(c2) FROM test WHERE c1 = 123 GROUP BY c1", 4).await?; assert_eq!(results.len(), 1); let batch = &results[0]; assert_eq!(field_names(batch), vec!["c1", "AVG(c2)"]); let expected: Vec<&str> = vec![]; let mut rows = test::format_batch(&batch); rows.sort(); assert_eq!(rows, expected); Ok(()) } #[tokio::test] async fn aggregate_grouped_max() -> Result<()> { let results = execute("SELECT c1, MAX(c2) FROM test GROUP BY c1", 4).await?; assert_eq!(results.len(), 1); let batch = &results[0]; assert_eq!(field_names(batch), vec!["c1", "MAX(c2)"]); let expected: Vec<&str> = vec!["0,10", "1,10", "2,10", "3,10"]; let mut rows = test::format_batch(&batch); rows.sort(); assert_eq!(rows, expected); Ok(()) } #[tokio::test] async fn aggregate_grouped_min() -> Result<()> { let results = execute("SELECT c1, MIN(c2) FROM test GROUP BY c1", 4).await?; assert_eq!(results.len(), 1); let batch = &results[0]; assert_eq!(field_names(batch), vec!["c1", "MIN(c2)"]); let expected: Vec<&str> = vec!["0,1", "1,1", "2,1", "3,1"]; let mut rows = test::format_batch(&batch); rows.sort(); assert_eq!(rows, expected); Ok(()) } #[tokio::test] async fn count_basic() -> Result<()> { let results = execute("SELECT COUNT(c1), COUNT(c2) FROM test", 1).await?; assert_eq!(results.len(), 1); let batch = &results[0]; assert_eq!(field_names(batch), vec!["COUNT(c1)", "COUNT(c2)"]); let expected: Vec<&str> = vec!["10,10"]; let mut rows = test::format_batch(&batch); rows.sort(); assert_eq!(rows, expected); Ok(()) } #[tokio::test] async fn count_partitioned() -> Result<()> { let results = execute("SELECT COUNT(c1), COUNT(c2) FROM test", 4).await?; assert_eq!(results.len(), 1); let batch = &results[0]; assert_eq!(field_names(batch), vec!["COUNT(c1)", "COUNT(c2)"]); let expected: Vec<&str> = vec!["40,40"]; let mut rows = test::format_batch(&batch); rows.sort(); assert_eq!(rows, expected); Ok(()) } #[tokio::test] async fn count_aggregated() -> Result<()> { let results = execute("SELECT c1, COUNT(c2) FROM test GROUP BY c1", 4).await?; assert_eq!(results.len(), 1); let batch = &results[0]; assert_eq!(field_names(batch), vec!["c1", "COUNT(c2)"]); let expected = vec!["0,10", "1,10", "2,10", "3,10"]; let mut rows = test::format_batch(&batch); rows.sort(); assert_eq!(rows, expected); Ok(()) } #[tokio::test] async fn group_by_date_trunc() -> Result<()> { let tmp_dir = TempDir::new()?; let mut ctx = ExecutionContext::new(); let schema = Arc::new(Schema::new(vec![ Field::new("c2", DataType::UInt64, false), Field::new( "t1", DataType::Timestamp(TimeUnit::Microsecond, None), false, ), ])); // generate a partitioned file for partition in 0..4 { let filename = format!("partition-{}.{}", partition, "csv"); let file_path = tmp_dir.path().join(&filename); let mut file = File::create(file_path)?; // generate some data for i in 0..10 { let data = format!("{},2020-12-{}T00:00:00.000\n", i, i + 10); file.write_all(data.as_bytes())?; } } ctx.register_csv( "test", tmp_dir.path().to_str().unwrap(), CsvReadOptions::new().schema(&schema).has_header(false), )?; let results = plan_and_collect( &mut ctx, "SELECT date_trunc('week', t1) as week, SUM(c2) FROM test GROUP BY date_trunc('week', t1)" ).await?; assert_eq!(results.len(), 1); let batch = &results[0]; assert_eq!(field_names(batch), vec!["week", "SUM(c2)"]); let expected: Vec<&str> = vec!["2020-12-07T00:00:00,24", "2020-12-14T00:00:00,156"]; let mut rows = test::format_batch(&batch); rows.sort(); assert_eq!(rows, expected); Ok(()) } async fn run_count_distinct_integers_aggregated_scenario( partitions: Vec<Vec<(&str, u64)>>, ) -> Result<Vec<RecordBatch>> { let tmp_dir = TempDir::new()?; let mut ctx = ExecutionContext::new(); let schema = Arc::new(Schema::new(vec![ Field::new("c_group", DataType::Utf8, false), Field::new("c_int8", DataType::Int8, false), Field::new("c_int16", DataType::Int16, false), Field::new("c_int32", DataType::Int32, false), Field::new("c_int64", DataType::Int64, false), Field::new("c_uint8", DataType::UInt8, false), Field::new("c_uint16", DataType::UInt16, false), Field::new("c_uint32", DataType::UInt32, false), Field::new("c_uint64", DataType::UInt64, false), ])); for (i, partition) in partitions.iter().enumerate() { let filename = format!("partition-{}.csv", i); let file_path = tmp_dir.path().join(&filename); let mut file = File::create(file_path)?; for row in partition { let row_str = format!( "{},{}\n", row.0, // Populate values for each of the integer fields in the // schema. (0..8) .map(|_| { row.1.to_string() }) .collect::<Vec<_>>() .join(","), ); file.write_all(row_str.as_bytes())?; } } ctx.register_csv( "test", tmp_dir.path().to_str().unwrap(), CsvReadOptions::new().schema(&schema).has_header(false), )?; let results = plan_and_collect( &mut ctx, " SELECT c_group, COUNT(c_uint64), COUNT(DISTINCT c_int8), COUNT(DISTINCT c_int16), COUNT(DISTINCT c_int32), COUNT(DISTINCT c_int64), COUNT(DISTINCT c_uint8), COUNT(DISTINCT c_uint16), COUNT(DISTINCT c_uint32), COUNT(DISTINCT c_uint64) FROM test GROUP BY c_group ", ) .await?; Ok(results) } #[tokio::test] async fn count_distinct_integers_aggregated_single_partition() -> Result<()> { let partitions = vec![ // The first member of each tuple will be the value for the // `c_group` column, and the second member will be the value for // each of the int/uint fields. vec![ ("a", 1), ("a", 1), ("a", 2), ("b", 9), ("c", 9), ("c", 10), ("c", 9), ], ]; let results = run_count_distinct_integers_aggregated_scenario(partitions).await?; assert_eq!(results.len(), 1); let batch = &results[0]; assert_eq!(batch.num_rows(), 3); assert_eq!(batch.num_columns(), 10); let mut result = test::format_batch(&batch); result.sort_unstable(); assert_eq!( result, vec![ "a,3,2,2,2,2,2,2,2,2", "b,1,1,1,1,1,1,1,1,1", "c,3,2,2,2,2,2,2,2,2", ], ); Ok(()) } #[tokio::test] async fn count_distinct_integers_aggregated_multiple_partitions() -> Result<()> { let partitions = vec![ // The first member of each tuple will be the value for the // `c_group` column, and the second member will be the value for // each of the int/uint fields. vec![("a", 1), ("a", 1), ("a", 2), ("b", 9), ("c", 9)], vec![("a", 1), ("a", 3), ("b", 8), ("b", 9), ("b", 10), ("b", 11)], ]; let results = run_count_distinct_integers_aggregated_scenario(partitions).await?; assert_eq!(results.len(), 1); let batch = &results[0]; assert_eq!(batch.num_rows(), 3); assert_eq!(batch.num_columns(), 10); let mut result = test::format_batch(&batch); result.sort_unstable(); assert_eq!( result, vec![ "a,5,3,3,3,3,3,3,3,3", "b,5,4,4,4,4,4,4,4,4", "c,1,1,1,1,1,1,1,1,1", ], ); Ok(()) } #[test] fn aggregate_with_alias() -> Result<()> { let tmp_dir = TempDir::new()?; let ctx = create_ctx(&tmp_dir, 1)?; let schema = Arc::new(Schema::new(vec![ Field::new("c1", DataType::Utf8, false), Field::new("c2", DataType::UInt32, false), ])); let plan = LogicalPlanBuilder::scan_empty("", schema.as_ref(), None)? .aggregate(vec![col("c1")], vec![sum(col("c2"))])? .project(vec![col("c1"), col("SUM(c2)").alias("total_salary")])? .build()?; let plan = ctx.optimize(&plan)?; let physical_plan = ctx.create_physical_plan(&Arc::new(plan))?; assert_eq!("c1", physical_plan.schema().field(0).name().as_str()); assert_eq!( "total_salary", physical_plan.schema().field(1).name().as_str() ); Ok(()) } #[tokio::test] async fn write_csv_results() -> Result<()> { // create partitioned input file and context let tmp_dir = TempDir::new()?; let mut ctx = create_ctx(&tmp_dir, 4)?; // execute a simple query and write the results to CSV let out_dir = tmp_dir.as_ref().to_str().unwrap().to_string() + "/out"; write_csv(&mut ctx, "SELECT c1, c2 FROM test", &out_dir).await?; // create a new context and verify that the results were saved to a partitioned csv file let mut ctx = ExecutionContext::new(); let schema = Arc::new(Schema::new(vec![ Field::new("c1", DataType::UInt32, false), Field::new("c2", DataType::UInt64, false), ])); // register each partition as well as the top level dir let csv_read_option = CsvReadOptions::new().schema(&schema); ctx.register_csv("part0", &format!("{}/part-0.csv", out_dir), csv_read_option)?; ctx.register_csv("part1", &format!("{}/part-1.csv", out_dir), csv_read_option)?; ctx.register_csv("part2", &format!("{}/part-2.csv", out_dir), csv_read_option)?; ctx.register_csv("part3", &format!("{}/part-3.csv", out_dir), csv_read_option)?; ctx.register_csv("allparts", &out_dir, csv_read_option)?; let part0 = plan_and_collect(&mut ctx, "SELECT c1, c2 FROM part0").await?; let part1 = plan_and_collect(&mut ctx, "SELECT c1, c2 FROM part1").await?; let part2 = plan_and_collect(&mut ctx, "SELECT c1, c2 FROM part2").await?; let part3 = plan_and_collect(&mut ctx, "SELECT c1, c2 FROM part3").await?; let allparts = plan_and_collect(&mut ctx, "SELECT c1, c2 FROM allparts").await?; let part0_count: usize = part0.iter().map(|batch| batch.num_rows()).sum(); let part1_count: usize = part1.iter().map(|batch| batch.num_rows()).sum(); let part2_count: usize = part2.iter().map(|batch| batch.num_rows()).sum(); let part3_count: usize = part3.iter().map(|batch| batch.num_rows()).sum(); let allparts_count: usize = allparts.iter().map(|batch| batch.num_rows()).sum(); assert_eq!(part0_count, 10); assert_eq!(part1_count, 10); assert_eq!(part2_count, 10); assert_eq!(part3_count, 10); assert_eq!(allparts_count, 40); Ok(()) } #[tokio::test] async fn write_parquet_results() -> Result<()> { // create partitioned input file and context let tmp_dir = TempDir::new()?; let mut ctx = create_ctx(&tmp_dir, 4)?; // execute a simple query and write the results to CSV let out_dir = tmp_dir.as_ref().to_str().unwrap().to_string() + "/out"; write_parquet(&mut ctx, "SELECT c1, c2 FROM test", &out_dir, None).await?; // create a new context and verify that the results were saved to a partitioned csv file let mut ctx = ExecutionContext::new(); // register each partition as well as the top level dir ctx.register_parquet("part0", &format!("{}/part-0.parquet", out_dir))?; ctx.register_parquet("part1", &format!("{}/part-1.parquet", out_dir))?; ctx.register_parquet("part2", &format!("{}/part-2.parquet", out_dir))?; ctx.register_parquet("part3", &format!("{}/part-3.parquet", out_dir))?; ctx.register_parquet("allparts", &out_dir)?; let part0 = plan_and_collect(&mut ctx, "SELECT c1, c2 FROM part0").await?; let part1 = plan_and_collect(&mut ctx, "SELECT c1, c2 FROM part1").await?; let part2 = plan_and_collect(&mut ctx, "SELECT c1, c2 FROM part2").await?; let part3 = plan_and_collect(&mut ctx, "SELECT c1, c2 FROM part3").await?; let allparts = plan_and_collect(&mut ctx, "SELECT c1, c2 FROM allparts").await?; let part0_count: usize = part0.iter().map(|batch| batch.num_rows()).sum(); let part1_count: usize = part1.iter().map(|batch| batch.num_rows()).sum(); let part2_count: usize = part2.iter().map(|batch| batch.num_rows()).sum(); let part3_count: usize = part3.iter().map(|batch| batch.num_rows()).sum(); let allparts_count: usize = allparts.iter().map(|batch| batch.num_rows()).sum(); assert_eq!(part0_count, 10); assert_eq!(part1_count, 10); assert_eq!(part2_count, 10); assert_eq!(part3_count, 10); assert_eq!(allparts_count, 40); Ok(()) } #[tokio::test] async fn query_csv_with_custom_partition_extension() -> Result<()> { let tmp_dir = TempDir::new()?; // The main stipulation of this test: use a file extension that isn't .csv. let file_extension = ".tst"; let mut ctx = ExecutionContext::new(); let schema = populate_csv_partitions(&tmp_dir, 2, file_extension)?; ctx.register_csv( "test", tmp_dir.path().to_str().unwrap(), CsvReadOptions::new() .schema(&schema) .file_extension(file_extension), )?; let results = plan_and_collect(&mut ctx, "SELECT SUM(c1), SUM(c2), COUNT(*) FROM test") .await?; assert_eq!(results.len(), 1); assert_eq!(results[0].num_rows(), 1); assert_eq!(test::format_batch(&results[0]), vec!["10,110,20"]); Ok(()) } #[test] fn send_context_to_threads() -> Result<()> { // ensure ExecutionContexts can be used in a multi-threaded // environment. Usecase is for concurrent planing. let tmp_dir = TempDir::new()?; let partition_count = 4; let ctx = Arc::new(Mutex::new(create_ctx(&tmp_dir, partition_count)?)); let threads: Vec<JoinHandle<Result<_>>> = (0..2) .map(|_| ctx.clone()) .map(|ctx_clone| { thread::spawn(move || { let ctx = ctx_clone.lock().expect("Locked context"); // Ensure we can create logical plan code on a separate thread. ctx.create_logical_plan( "SELECT c1, c2 FROM test WHERE c1 > 0 AND c1 < 3", ) }) }) .collect(); for thread in threads { thread.join().expect("Failed to join thread")?; } Ok(()) } #[tokio::test] async fn scalar_udf() -> Result<()> { let schema = Schema::new(vec![ Field::new("a", DataType::Int32, false), Field::new("b", DataType::Int32, false), ]); let batch = RecordBatch::try_new( Arc::new(schema.clone()), vec![ Arc::new(Int32Array::from(vec![1, 10, 10, 100])), Arc::new(Int32Array::from(vec![2, 12, 12, 120])), ], )?; let mut ctx = ExecutionContext::new(); let provider = MemTable::try_new(Arc::new(schema), vec![vec![batch]])?; ctx.register_table("t", Box::new(provider)); let myfunc: ScalarFunctionImplementation = Arc::new(|args: &[ArrayRef]| { let l = &args[0] .as_any() .downcast_ref::<Int32Array>() .expect("cast failed"); let r = &args[1] .as_any() .downcast_ref::<Int32Array>() .expect("cast failed"); Ok(Arc::new(add(l, r)?)) }); ctx.register_udf(create_udf( "my_add", vec![DataType::Int32, DataType::Int32], Arc::new(DataType::Int32), myfunc, )); // from here on, we may be in a different scope. We would still like to be able // to call UDFs. let t = ctx.table("t")?; let plan = LogicalPlanBuilder::from(&t.to_logical_plan()) .project(vec![ col("a"), col("b"), ctx.udf("my_add")?.call(vec![col("a"), col("b")]), ])? .build()?; assert_eq!( format!("{:?}", plan), "Projection: #a, #b, my_add(#a, #b)\n TableScan: t projection=None" ); let plan = ctx.optimize(&plan)?; let plan = ctx.create_physical_plan(&plan)?; let result = collect(plan).await?; let batch = &result[0]; assert_eq!(3, batch.num_columns()); assert_eq!(4, batch.num_rows()); assert_eq!(field_names(batch), vec!["a", "b", "my_add(a,b)"]); let a = batch .column(0) .as_any() .downcast_ref::<Int32Array>() .expect("failed to cast a"); let b = batch .column(1) .as_any() .downcast_ref::<Int32Array>() .expect("failed to cast b"); let sum = batch .column(2) .as_any() .downcast_ref::<Int32Array>() .expect("failed to cast sum"); assert_eq!(4, a.len()); assert_eq!(4, b.len()); assert_eq!(4, sum.len()); for i in 0..sum.len() { assert_eq!(a.value(i) + b.value(i), sum.value(i)); } Ok(()) } #[tokio::test] async fn simple_avg() -> Result<()> { let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]); let batch1 = RecordBatch::try_new( Arc::new(schema.clone()), vec![Arc::new(Int32Array::from(vec![1, 2, 3]))], )?; let batch2 = RecordBatch::try_new( Arc::new(schema.clone()), vec![Arc::new(Int32Array::from(vec![4, 5]))], )?; let mut ctx = ExecutionContext::new(); let provider = MemTable::try_new(Arc::new(schema), vec![vec![batch1], vec![batch2]])?; ctx.register_table("t", Box::new(provider)); let result = plan_and_collect(&mut ctx, "SELECT AVG(a) FROM t").await?; let batch = &result[0]; assert_eq!(1, batch.num_columns()); assert_eq!(1, batch.num_rows()); let values = batch .column(0) .as_any() .downcast_ref::<Float64Array>() .expect("failed to cast version"); assert_eq!(values.len(), 1); // avg(1,2,3,4,5) = 3.0 assert_eq!(values.value(0), 3.0_f64); Ok(()) } /// tests the creation, registration and usage of a UDAF #[tokio::test] async fn simple_udaf() -> Result<()> { let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]); let batch1 = RecordBatch::try_new( Arc::new(schema.clone()), vec![Arc::new(Int32Array::from(vec![1, 2, 3]))], )?; let batch2 = RecordBatch::try_new( Arc::new(schema.clone()), vec![Arc::new(Int32Array::from(vec![4, 5]))], )?; let mut ctx = ExecutionContext::new(); let provider = MemTable::try_new(Arc::new(schema), vec![vec![batch1], vec![batch2]])?; ctx.register_table("t", Box::new(provider)); // define a udaf, using a DataFusion's accumulator let my_avg = create_udaf( "MY_AVG", DataType::Float64, Arc::new(DataType::Float64), Arc::new(|| Ok(Box::new(AvgAccumulator::try_new(&DataType::Float64)?))), Arc::new(vec![DataType::UInt64, DataType::Float64]), ); ctx.register_udaf(my_avg); let result = plan_and_collect(&mut ctx, "SELECT MY_AVG(a) FROM t").await?; let batch = &result[0]; assert_eq!(1, batch.num_columns()); assert_eq!(1, batch.num_rows()); let values = batch .column(0) .as_any() .downcast_ref::<Float64Array>() .expect("failed to cast version"); assert_eq!(values.len(), 1); // avg(1,2,3,4,5) = 3.0 assert_eq!(values.value(0), 3.0_f64); Ok(()) } #[tokio::test] async fn custom_query_planner() -> Result<()> { let mut ctx = ExecutionContext::with_config( ExecutionConfig::new().with_query_planner(Arc::new(MyQueryPlanner {})), ); let df = ctx.sql("SELECT 1")?; df.collect().await.expect_err("query not supported"); Ok(()) } struct MyPhysicalPlanner {} impl PhysicalPlanner for MyPhysicalPlanner { fn create_physical_plan( &self, _logical_plan: &LogicalPlan, _ctx_state: &ExecutionContextState, ) -> Result<Arc<dyn ExecutionPlan>> { Err(DataFusionError::NotImplemented( "query not supported".to_string(), )) } } struct MyQueryPlanner {} impl QueryPlanner for MyQueryPlanner { fn create_physical_plan( &self, logical_plan: &LogicalPlan, ctx_state: &ExecutionContextState, ) -> Result<Arc<dyn ExecutionPlan>> { let physical_planner = MyPhysicalPlanner {}; physical_planner.create_physical_plan(logical_plan, ctx_state) } } /// Execute SQL and return results async fn plan_and_collect( ctx: &mut ExecutionContext, sql: &str, ) -> Result<Vec<RecordBatch>> { let logical_plan = ctx.create_logical_plan(sql)?; let logical_plan = ctx.optimize(&logical_plan)?; let physical_plan = ctx.create_physical_plan(&logical_plan)?; collect(physical_plan).await } fn field_names(result: &RecordBatch) -> Vec<String> { result .schema() .fields() .iter() .map(|x| x.name().clone()) .collect::<Vec<String>>() } /// Execute SQL and return results async fn execute(sql: &str, partition_count: usize) -> Result<Vec<RecordBatch>> { let tmp_dir = TempDir::new()?; let mut ctx = create_ctx(&tmp_dir, partition_count)?; plan_and_collect(&mut ctx, sql).await } /// Execute SQL and write results to partitioned csv files async fn write_csv( ctx: &mut ExecutionContext, sql: &str, out_dir: &str, ) -> Result<()> { let logical_plan = ctx.create_logical_plan(sql)?; let logical_plan = ctx.optimize(&logical_plan)?; let physical_plan = ctx.create_physical_plan(&logical_plan)?; ctx.write_csv(physical_plan, out_dir.to_string()).await } /// Execute SQL and write results to partitioned parquet files async fn write_parquet( ctx: &mut ExecutionContext, sql: &str, out_dir: &str, writer_properties: Option<WriterProperties>, ) -> Result<()> { let logical_plan = ctx.create_logical_plan(sql)?; let logical_plan = ctx.optimize(&logical_plan)?; let physical_plan = ctx.create_physical_plan(&logical_plan)?; ctx.write_parquet(physical_plan, out_dir.to_string(), writer_properties) .await } /// Generate CSV partitions within the supplied directory fn populate_csv_partitions( tmp_dir: &TempDir, partition_count: usize, file_extension: &str, ) -> Result<SchemaRef> { // define schema for data source (csv file) let schema = Arc::new(Schema::new(vec![ Field::new("c1", DataType::UInt32, false), Field::new("c2", DataType::UInt64, false), ])); // generate a partitioned file for partition in 0..partition_count { let filename = format!("partition-{}.{}", partition, file_extension); let file_path = tmp_dir.path().join(&filename); let mut file = File::create(file_path)?; // generate some data for i in 0..=10 { let data = format!("{},{}\n", partition, i); file.write_all(data.as_bytes())?; } } Ok(schema) } /// Generate a partitioned CSV file and register it with an execution context fn create_ctx(tmp_dir: &TempDir, partition_count: usize) -> Result<ExecutionContext> { let mut ctx = ExecutionContext::new(); let schema = populate_csv_partitions(tmp_dir, partition_count, ".csv")?; // register csv file with the execution context ctx.register_csv( "test", tmp_dir.path().to_str().unwrap(), CsvReadOptions::new().schema(&schema), )?; Ok(ctx) } }
34.19186
102
0.545996
2931c0cc16665da592bf2f4d0ddc36f03886d4f2
84,848
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. pub fn parse_http_generic_error( response: &http::Response<bytes::Bytes>, ) -> Result<smithy_types::Error, smithy_json::deserialize::Error> { crate::json_errors::parse_generic_error(response.body(), response.headers()) } pub fn deser_structure_crate_error_invalid_parameter_exceptionjson_err( input: &[u8], mut builder: crate::error::invalid_parameter_exception::Builder, ) -> Result<crate::error::invalid_parameter_exception::Builder, smithy_json::deserialize::Error> { let mut tokens_owned = smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input)) .peekable(); let tokens = &mut tokens_owned; smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "Message" => { builder = builder.set_message( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "ParameterName" => { builder = builder.set_parameter_name( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } if tokens.next().is_some() { return Err(smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_structure_crate_error_resource_not_found_exceptionjson_err( input: &[u8], mut builder: crate::error::resource_not_found_exception::Builder, ) -> Result<crate::error::resource_not_found_exception::Builder, smithy_json::deserialize::Error> { let mut tokens_owned = smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input)) .peekable(); let tokens = &mut tokens_owned; smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "Message" => { builder = builder.set_message( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "ResourceType" => { builder = builder.set_resource_type( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "ResourceName" => { builder = builder.set_resource_name( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } if tokens.next().is_some() { return Err(smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_structure_crate_error_resource_precondition_not_met_exceptionjson_err( input: &[u8], mut builder: crate::error::resource_precondition_not_met_exception::Builder, ) -> Result< crate::error::resource_precondition_not_met_exception::Builder, smithy_json::deserialize::Error, > { let mut tokens_owned = smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input)) .peekable(); let tokens = &mut tokens_owned; smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "Message" => { builder = builder.set_message( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "ResourceType" => { builder = builder.set_resource_type( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "ResourceName" => { builder = builder.set_resource_name( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } if tokens.next().is_some() { return Err(smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_cancel_journal_kinesis_stream( input: &[u8], mut builder: crate::output::cancel_journal_kinesis_stream_output::Builder, ) -> Result< crate::output::cancel_journal_kinesis_stream_output::Builder, smithy_json::deserialize::Error, > { let mut tokens_owned = smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input)) .peekable(); let tokens = &mut tokens_owned; smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "StreamId" => { builder = builder.set_stream_id( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } if tokens.next().is_some() { return Err(smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_structure_crate_error_limit_exceeded_exceptionjson_err( input: &[u8], mut builder: crate::error::limit_exceeded_exception::Builder, ) -> Result<crate::error::limit_exceeded_exception::Builder, smithy_json::deserialize::Error> { let mut tokens_owned = smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input)) .peekable(); let tokens = &mut tokens_owned; smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "Message" => { builder = builder.set_message( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "ResourceType" => { builder = builder.set_resource_type( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } if tokens.next().is_some() { return Err(smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_structure_crate_error_resource_already_exists_exceptionjson_err( input: &[u8], mut builder: crate::error::resource_already_exists_exception::Builder, ) -> Result<crate::error::resource_already_exists_exception::Builder, smithy_json::deserialize::Error> { let mut tokens_owned = smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input)) .peekable(); let tokens = &mut tokens_owned; smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "Message" => { builder = builder.set_message( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "ResourceType" => { builder = builder.set_resource_type( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "ResourceName" => { builder = builder.set_resource_name( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } if tokens.next().is_some() { return Err(smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_structure_crate_error_resource_in_use_exceptionjson_err( input: &[u8], mut builder: crate::error::resource_in_use_exception::Builder, ) -> Result<crate::error::resource_in_use_exception::Builder, smithy_json::deserialize::Error> { let mut tokens_owned = smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input)) .peekable(); let tokens = &mut tokens_owned; smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "Message" => { builder = builder.set_message( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "ResourceType" => { builder = builder.set_resource_type( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "ResourceName" => { builder = builder.set_resource_name( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } if tokens.next().is_some() { return Err(smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_create_ledger( input: &[u8], mut builder: crate::output::create_ledger_output::Builder, ) -> Result<crate::output::create_ledger_output::Builder, smithy_json::deserialize::Error> { let mut tokens_owned = smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input)) .peekable(); let tokens = &mut tokens_owned; smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "Arn" => { builder = builder.set_arn( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "CreationDateTime" => { builder = builder.set_creation_date_time( smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), smithy_types::instant::Format::EpochSeconds, )?, ); } "DeletionProtection" => { builder = builder.set_deletion_protection( smithy_json::deserialize::token::expect_bool_or_null(tokens.next())?, ); } "KmsKeyArn" => { builder = builder.set_kms_key_arn( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "Name" => { builder = builder.set_name( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "PermissionsMode" => { builder = builder.set_permissions_mode( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| { s.to_unescaped() .map(|u| crate::model::PermissionsMode::from(u.as_ref())) }) .transpose()?, ); } "State" => { builder = builder.set_state( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| { s.to_unescaped() .map(|u| crate::model::LedgerState::from(u.as_ref())) }) .transpose()?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } if tokens.next().is_some() { return Err(smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_describe_journal_kinesis_stream( input: &[u8], mut builder: crate::output::describe_journal_kinesis_stream_output::Builder, ) -> Result< crate::output::describe_journal_kinesis_stream_output::Builder, smithy_json::deserialize::Error, > { let mut tokens_owned = smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input)) .peekable(); let tokens = &mut tokens_owned; smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "Stream" => { builder = builder.set_stream( crate::json_deser::deser_structure_crate_model_journal_kinesis_stream_description(tokens)? ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } if tokens.next().is_some() { return Err(smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_describe_journal_s3_export( input: &[u8], mut builder: crate::output::describe_journal_s3_export_output::Builder, ) -> Result< crate::output::describe_journal_s3_export_output::Builder, smithy_json::deserialize::Error, > { let mut tokens_owned = smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input)) .peekable(); let tokens = &mut tokens_owned; smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "ExportDescription" => { builder = builder.set_export_description( crate::json_deser::deser_structure_crate_model_journal_s3_export_description(tokens)? ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } if tokens.next().is_some() { return Err(smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_describe_ledger( input: &[u8], mut builder: crate::output::describe_ledger_output::Builder, ) -> Result<crate::output::describe_ledger_output::Builder, smithy_json::deserialize::Error> { let mut tokens_owned = smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input)) .peekable(); let tokens = &mut tokens_owned; smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "Arn" => { builder = builder.set_arn( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "CreationDateTime" => { builder = builder.set_creation_date_time( smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), smithy_types::instant::Format::EpochSeconds, )?, ); } "DeletionProtection" => { builder = builder.set_deletion_protection( smithy_json::deserialize::token::expect_bool_or_null(tokens.next())?, ); } "EncryptionDescription" => { builder = builder.set_encryption_description( crate::json_deser::deser_structure_crate_model_ledger_encryption_description(tokens)? ); } "Name" => { builder = builder.set_name( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "PermissionsMode" => { builder = builder.set_permissions_mode( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| { s.to_unescaped() .map(|u| crate::model::PermissionsMode::from(u.as_ref())) }) .transpose()?, ); } "State" => { builder = builder.set_state( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| { s.to_unescaped() .map(|u| crate::model::LedgerState::from(u.as_ref())) }) .transpose()?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } if tokens.next().is_some() { return Err(smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_export_journal_to_s3( input: &[u8], mut builder: crate::output::export_journal_to_s3_output::Builder, ) -> Result<crate::output::export_journal_to_s3_output::Builder, smithy_json::deserialize::Error> { let mut tokens_owned = smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input)) .peekable(); let tokens = &mut tokens_owned; smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "ExportId" => { builder = builder.set_export_id( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } if tokens.next().is_some() { return Err(smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_get_block( input: &[u8], mut builder: crate::output::get_block_output::Builder, ) -> Result<crate::output::get_block_output::Builder, smithy_json::deserialize::Error> { let mut tokens_owned = smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input)) .peekable(); let tokens = &mut tokens_owned; smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "Block" => { builder = builder.set_block( crate::json_deser::deser_structure_crate_model_value_holder(tokens)?, ); } "Proof" => { builder = builder.set_proof( crate::json_deser::deser_structure_crate_model_value_holder(tokens)?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } if tokens.next().is_some() { return Err(smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_get_digest( input: &[u8], mut builder: crate::output::get_digest_output::Builder, ) -> Result<crate::output::get_digest_output::Builder, smithy_json::deserialize::Error> { let mut tokens_owned = smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input)) .peekable(); let tokens = &mut tokens_owned; smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "Digest" => { builder = builder.set_digest( smithy_json::deserialize::token::expect_blob_or_null(tokens.next())?, ); } "DigestTipAddress" => { builder = builder.set_digest_tip_address( crate::json_deser::deser_structure_crate_model_value_holder(tokens)?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } if tokens.next().is_some() { return Err(smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_get_revision( input: &[u8], mut builder: crate::output::get_revision_output::Builder, ) -> Result<crate::output::get_revision_output::Builder, smithy_json::deserialize::Error> { let mut tokens_owned = smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input)) .peekable(); let tokens = &mut tokens_owned; smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "Proof" => { builder = builder.set_proof( crate::json_deser::deser_structure_crate_model_value_holder(tokens)?, ); } "Revision" => { builder = builder.set_revision( crate::json_deser::deser_structure_crate_model_value_holder(tokens)?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } if tokens.next().is_some() { return Err(smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_list_journal_kinesis_streams_for_ledger( input: &[u8], mut builder: crate::output::list_journal_kinesis_streams_for_ledger_output::Builder, ) -> Result< crate::output::list_journal_kinesis_streams_for_ledger_output::Builder, smithy_json::deserialize::Error, > { let mut tokens_owned = smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input)) .peekable(); let tokens = &mut tokens_owned; smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "NextToken" => { builder = builder.set_next_token( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "Streams" => { builder = builder.set_streams( crate::json_deser::deser_list_com_amazonaws_qldb_journal_kinesis_stream_description_list(tokens)? ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } if tokens.next().is_some() { return Err(smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_list_journal_s3_exports( input: &[u8], mut builder: crate::output::list_journal_s3_exports_output::Builder, ) -> Result<crate::output::list_journal_s3_exports_output::Builder, smithy_json::deserialize::Error> { let mut tokens_owned = smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input)) .peekable(); let tokens = &mut tokens_owned; smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "JournalS3Exports" => { builder = builder.set_journal_s3_exports( crate::json_deser::deser_list_com_amazonaws_qldb_journal_s3_export_list(tokens)? ); } "NextToken" => { builder = builder.set_next_token( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } if tokens.next().is_some() { return Err(smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_list_journal_s3_exports_for_ledger( input: &[u8], mut builder: crate::output::list_journal_s3_exports_for_ledger_output::Builder, ) -> Result< crate::output::list_journal_s3_exports_for_ledger_output::Builder, smithy_json::deserialize::Error, > { let mut tokens_owned = smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input)) .peekable(); let tokens = &mut tokens_owned; smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "JournalS3Exports" => { builder = builder.set_journal_s3_exports( crate::json_deser::deser_list_com_amazonaws_qldb_journal_s3_export_list(tokens)? ); } "NextToken" => { builder = builder.set_next_token( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } if tokens.next().is_some() { return Err(smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_list_ledgers( input: &[u8], mut builder: crate::output::list_ledgers_output::Builder, ) -> Result<crate::output::list_ledgers_output::Builder, smithy_json::deserialize::Error> { let mut tokens_owned = smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input)) .peekable(); let tokens = &mut tokens_owned; smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "Ledgers" => { builder = builder.set_ledgers( crate::json_deser::deser_list_com_amazonaws_qldb_ledger_list(tokens)?, ); } "NextToken" => { builder = builder.set_next_token( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } if tokens.next().is_some() { return Err(smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_list_tags_for_resource( input: &[u8], mut builder: crate::output::list_tags_for_resource_output::Builder, ) -> Result<crate::output::list_tags_for_resource_output::Builder, smithy_json::deserialize::Error> { let mut tokens_owned = smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input)) .peekable(); let tokens = &mut tokens_owned; smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "Tags" => { builder = builder.set_tags( crate::json_deser::deser_map_com_amazonaws_qldb_tags(tokens)?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } if tokens.next().is_some() { return Err(smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_stream_journal_to_kinesis( input: &[u8], mut builder: crate::output::stream_journal_to_kinesis_output::Builder, ) -> Result<crate::output::stream_journal_to_kinesis_output::Builder, smithy_json::deserialize::Error> { let mut tokens_owned = smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input)) .peekable(); let tokens = &mut tokens_owned; smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "StreamId" => { builder = builder.set_stream_id( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } if tokens.next().is_some() { return Err(smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_update_ledger( input: &[u8], mut builder: crate::output::update_ledger_output::Builder, ) -> Result<crate::output::update_ledger_output::Builder, smithy_json::deserialize::Error> { let mut tokens_owned = smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input)) .peekable(); let tokens = &mut tokens_owned; smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "Arn" => { builder = builder.set_arn( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "CreationDateTime" => { builder = builder.set_creation_date_time( smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), smithy_types::instant::Format::EpochSeconds, )?, ); } "DeletionProtection" => { builder = builder.set_deletion_protection( smithy_json::deserialize::token::expect_bool_or_null(tokens.next())?, ); } "EncryptionDescription" => { builder = builder.set_encryption_description( crate::json_deser::deser_structure_crate_model_ledger_encryption_description(tokens)? ); } "Name" => { builder = builder.set_name( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "State" => { builder = builder.set_state( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| { s.to_unescaped() .map(|u| crate::model::LedgerState::from(u.as_ref())) }) .transpose()?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } if tokens.next().is_some() { return Err(smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn deser_operation_crate_operation_update_ledger_permissions_mode( input: &[u8], mut builder: crate::output::update_ledger_permissions_mode_output::Builder, ) -> Result< crate::output::update_ledger_permissions_mode_output::Builder, smithy_json::deserialize::Error, > { let mut tokens_owned = smithy_json::deserialize::json_token_iter(crate::json_deser::or_empty_doc(input)) .peekable(); let tokens = &mut tokens_owned; smithy_json::deserialize::token::expect_start_object(tokens.next())?; loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "Arn" => { builder = builder.set_arn( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "Name" => { builder = builder.set_name( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "PermissionsMode" => { builder = builder.set_permissions_mode( smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| { s.to_unescaped() .map(|u| crate::model::PermissionsMode::from(u.as_ref())) }) .transpose()?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } if tokens.next().is_some() { return Err(smithy_json::deserialize::Error::custom( "found more JSON tokens after completing parsing", )); } Ok(builder) } pub fn or_empty_doc(data: &[u8]) -> &[u8] { if data.is_empty() { b"{}" } else { data } } pub fn deser_structure_crate_model_journal_kinesis_stream_description<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::JournalKinesisStreamDescription>, smithy_json::deserialize::Error> where I: Iterator< Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::JournalKinesisStreamDescription::builder(); loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "LedgerName" => { builder = builder.set_ledger_name( smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "CreationTime" => { builder = builder.set_creation_time( smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), smithy_types::instant::Format::EpochSeconds, )?, ); } "InclusiveStartTime" => { builder = builder.set_inclusive_start_time( smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), smithy_types::instant::Format::EpochSeconds, )?, ); } "ExclusiveEndTime" => { builder = builder.set_exclusive_end_time( smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), smithy_types::instant::Format::EpochSeconds, )?, ); } "RoleArn" => { builder = builder.set_role_arn( smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "StreamId" => { builder = builder.set_stream_id( smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "Arn" => { builder = builder.set_arn( smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "Status" => { builder = builder.set_status( smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| { s.to_unescaped() .map(|u| crate::model::StreamStatus::from(u.as_ref())) }) .transpose()?, ); } "KinesisConfiguration" => { builder = builder.set_kinesis_configuration( crate::json_deser::deser_structure_crate_model_kinesis_configuration(tokens)? ); } "ErrorCause" => { builder = builder.set_error_cause( smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| { s.to_unescaped() .map(|u| crate::model::ErrorCause::from(u.as_ref())) }) .transpose()?, ); } "StreamName" => { builder = builder.set_stream_name( smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } Ok(Some(builder.build())) } _ => Err(smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_journal_s3_export_description<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::JournalS3ExportDescription>, smithy_json::deserialize::Error> where I: Iterator< Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::JournalS3ExportDescription::builder(); loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "LedgerName" => { builder = builder.set_ledger_name( smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "ExportId" => { builder = builder.set_export_id( smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "ExportCreationTime" => { builder = builder.set_export_creation_time( smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), smithy_types::instant::Format::EpochSeconds, )?, ); } "Status" => { builder = builder.set_status( smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| { s.to_unescaped() .map(|u| crate::model::ExportStatus::from(u.as_ref())) }) .transpose()?, ); } "InclusiveStartTime" => { builder = builder.set_inclusive_start_time( smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), smithy_types::instant::Format::EpochSeconds, )?, ); } "ExclusiveEndTime" => { builder = builder.set_exclusive_end_time( smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), smithy_types::instant::Format::EpochSeconds, )?, ); } "S3ExportConfiguration" => { builder = builder.set_s3_export_configuration( crate::json_deser::deser_structure_crate_model_s3_export_configuration(tokens)? ); } "RoleArn" => { builder = builder.set_role_arn( smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } Ok(Some(builder.build())) } _ => Err(smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_ledger_encryption_description<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::LedgerEncryptionDescription>, smithy_json::deserialize::Error> where I: Iterator< Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::LedgerEncryptionDescription::builder(); loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "KmsKeyArn" => { builder = builder.set_kms_key_arn( smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "EncryptionStatus" => { builder = builder.set_encryption_status( smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| { s.to_unescaped().map(|u| { crate::model::EncryptionStatus::from(u.as_ref()) }) }) .transpose()?, ); } "InaccessibleKmsKeyDateTime" => { builder = builder.set_inaccessible_kms_key_date_time( smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), smithy_types::instant::Format::EpochSeconds, )?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } Ok(Some(builder.build())) } _ => Err(smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_value_holder<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::ValueHolder>, smithy_json::deserialize::Error> where I: Iterator< Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::ValueHolder::builder(); loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "IonText" => { builder = builder.set_ion_text( smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } Ok(Some(builder.build())) } _ => Err(smithy_json::deserialize::Error::custom( "expected start object or null", )), } } #[allow(clippy::type_complexity, non_snake_case)] pub fn deser_list_com_amazonaws_qldb_journal_kinesis_stream_description_list<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result< Option<std::vec::Vec<crate::model::JournalKinesisStreamDescription>>, smithy_json::deserialize::Error, > where I: Iterator< Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(smithy_json::deserialize::Token::StartArray { .. }) => { let mut items = Vec::new(); loop { match tokens.peek() { Some(Ok(smithy_json::deserialize::Token::EndArray { .. })) => { tokens.next().transpose().unwrap(); break; } _ => { let value = crate::json_deser::deser_structure_crate_model_journal_kinesis_stream_description(tokens)? ; if let Some(value) = value { items.push(value); } } } } Ok(Some(items)) } _ => Err(smithy_json::deserialize::Error::custom( "expected start array or null", )), } } #[allow(clippy::type_complexity, non_snake_case)] pub fn deser_list_com_amazonaws_qldb_journal_s3_export_list<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result< Option<std::vec::Vec<crate::model::JournalS3ExportDescription>>, smithy_json::deserialize::Error, > where I: Iterator< Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(smithy_json::deserialize::Token::StartArray { .. }) => { let mut items = Vec::new(); loop { match tokens.peek() { Some(Ok(smithy_json::deserialize::Token::EndArray { .. })) => { tokens.next().transpose().unwrap(); break; } _ => { let value = crate::json_deser::deser_structure_crate_model_journal_s3_export_description(tokens)? ; if let Some(value) = value { items.push(value); } } } } Ok(Some(items)) } _ => Err(smithy_json::deserialize::Error::custom( "expected start array or null", )), } } #[allow(clippy::type_complexity, non_snake_case)] pub fn deser_list_com_amazonaws_qldb_ledger_list<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<std::vec::Vec<crate::model::LedgerSummary>>, smithy_json::deserialize::Error> where I: Iterator< Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(smithy_json::deserialize::Token::StartArray { .. }) => { let mut items = Vec::new(); loop { match tokens.peek() { Some(Ok(smithy_json::deserialize::Token::EndArray { .. })) => { tokens.next().transpose().unwrap(); break; } _ => { let value = crate::json_deser::deser_structure_crate_model_ledger_summary(tokens)?; if let Some(value) = value { items.push(value); } } } } Ok(Some(items)) } _ => Err(smithy_json::deserialize::Error::custom( "expected start array or null", )), } } #[allow(clippy::type_complexity, non_snake_case)] pub fn deser_map_com_amazonaws_qldb_tags<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result< Option< std::collections::HashMap<std::string::String, std::option::Option<std::string::String>>, >, smithy_json::deserialize::Error, > where I: Iterator< Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(smithy_json::deserialize::Token::StartObject { .. }) => { let mut map = std::collections::HashMap::new(); loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { let key = key.to_unescaped().map(|u| u.into_owned())?; let value = smithy_json::deserialize::token::expect_string_or_null(tokens.next())? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?; map.insert(key, value); } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } Ok(Some(map)) } _ => Err(smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_kinesis_configuration<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::KinesisConfiguration>, smithy_json::deserialize::Error> where I: Iterator< Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::KinesisConfiguration::builder(); loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "StreamArn" => { builder = builder.set_stream_arn( smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "AggregationEnabled" => { builder = builder.set_aggregation_enabled( smithy_json::deserialize::token::expect_bool_or_null( tokens.next(), )?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } Ok(Some(builder.build())) } _ => Err(smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_s3_export_configuration<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::S3ExportConfiguration>, smithy_json::deserialize::Error> where I: Iterator< Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::S3ExportConfiguration::builder(); loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "Bucket" => { builder = builder.set_bucket( smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "Prefix" => { builder = builder.set_prefix( smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "EncryptionConfiguration" => { builder = builder.set_encryption_configuration( crate::json_deser::deser_structure_crate_model_s3_encryption_configuration(tokens)? ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } Ok(Some(builder.build())) } _ => Err(smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_ledger_summary<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::LedgerSummary>, smithy_json::deserialize::Error> where I: Iterator< Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::LedgerSummary::builder(); loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "Name" => { builder = builder.set_name( smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } "State" => { builder = builder.set_state( smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| { s.to_unescaped() .map(|u| crate::model::LedgerState::from(u.as_ref())) }) .transpose()?, ); } "CreationDateTime" => { builder = builder.set_creation_date_time( smithy_json::deserialize::token::expect_timestamp_or_null( tokens.next(), smithy_types::instant::Format::EpochSeconds, )?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } Ok(Some(builder.build())) } _ => Err(smithy_json::deserialize::Error::custom( "expected start object or null", )), } } pub fn deser_structure_crate_model_s3_encryption_configuration<'a, I>( tokens: &mut std::iter::Peekable<I>, ) -> Result<Option<crate::model::S3EncryptionConfiguration>, smithy_json::deserialize::Error> where I: Iterator< Item = Result<smithy_json::deserialize::Token<'a>, smithy_json::deserialize::Error>, >, { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::ValueNull { .. }) => Ok(None), Some(smithy_json::deserialize::Token::StartObject { .. }) => { #[allow(unused_mut)] let mut builder = crate::model::S3EncryptionConfiguration::builder(); loop { match tokens.next().transpose()? { Some(smithy_json::deserialize::Token::EndObject { .. }) => break, Some(smithy_json::deserialize::Token::ObjectKey { key, .. }) => { match key.to_unescaped()?.as_ref() { "ObjectEncryptionType" => { builder = builder.set_object_encryption_type( smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| { s.to_unescaped().map(|u| { crate::model::S3ObjectEncryptionType::from(u.as_ref()) }) }) .transpose()?, ); } "KmsKeyArn" => { builder = builder.set_kms_key_arn( smithy_json::deserialize::token::expect_string_or_null( tokens.next(), )? .map(|s| s.to_unescaped().map(|u| u.into_owned())) .transpose()?, ); } _ => smithy_json::deserialize::token::skip_value(tokens)?, } } _ => { return Err(smithy_json::deserialize::Error::custom( "expected object key or end object", )) } } } Ok(Some(builder.build())) } _ => Err(smithy_json::deserialize::Error::custom( "expected start object or null", )), } }
44.260824
125
0.456793
290bd3c2e63e9ad7856fe49b0b8f265a3b4477a9
1,941
// Copyright 2018 Stefan Kroboth // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. //! Node use std::f64; use std::sync::Arc; use std::sync::RwLock; use Extent; use NodeRef; #[derive(Debug)] pub struct Node { id: u64, pub pos: Vec<f64>, val: f64, pub children: Vec<NodeRef>, } impl PartialEq for Node { fn eq(&self, other: &Node) -> bool { self.id == other.id } } impl Node { pub fn new(pos: Vec<f64>, id: u64) -> Node { Node { id, pos, val: 0.0, children: vec![], } } pub fn id(&self) -> u64 { self.id } /// *giggles* pub fn push_child(&mut self, node: &NodeRef) -> &mut Self { self.children.push(node.clone()); self } pub fn val(&self) -> f64 { self.val } pub fn as_ref(self) -> NodeRef { Arc::new(RwLock::new(self)) } pub fn inside(&self, pos: Vec<f64>, extent: Extent) -> bool { pos.iter() .zip(self.pos.iter()) .zip(extent.iter()) .map(|((&xn, xk), &l)| (xn >= xk - l) && (xn <= xk + l)) .filter(|x| !x) .count() == 0 } pub fn add(&mut self, pos: Vec<f64>, ext: Extent) -> &mut Self { // println!("{:?} x {:?} x {:?}", self.pos, pos, ext); self.val += self .pos .iter() .zip(pos.iter()) .map(|(a, b)| (a - b).abs()) .zip(ext.iter()) .map(|(d, e)| 1.0 - d / e) .fold(f64::INFINITY, |a, b| a.min(b)); self } pub fn clear(&mut self) -> &mut Self { self.val = 0.0; self } }
22.569767
77
0.48171
8f85f248faaac67a5c24eb1dc8dfc99bf4dc7abe
6,873
//! Checks miscellaneous properties of `Timer`. use r3::{ hunk::Hunk, kernel::{self, prelude::*, traits, Cfg, StaticTask, StaticTimer, TimerRef}, time::{Duration, Time}, }; use wyhash::WyHash; use super::Driver; use crate::utils::{ conditional::{KernelBoostPriorityExt, KernelTimeExt}, SeqTracker, }; pub trait SupportedSystem: traits::KernelBase + traits::KernelTimer + traits::KernelStatic + KernelBoostPriorityExt + KernelTimeExt { } impl< T: traits::KernelBase + traits::KernelTimer + traits::KernelStatic + KernelBoostPriorityExt + KernelTimeExt, > SupportedSystem for T { } pub struct App<System: SupportedSystem> { timer1: StaticTimer<System>, timer2: StaticTimer<System>, timer3: StaticTimer<System>, timer4: StaticTimer<System>, task: StaticTask<System>, seq: Hunk<System, SeqTracker>, } impl<System: SupportedSystem> App<System> { pub const fn new<C, D: Driver<Self, System = System>>(b: &mut Cfg<C>) -> Self where C: ~const traits::CfgBase<System = System> + ~const traits::CfgTask + ~const traits::CfgTimer, { let timer1 = StaticTimer::define() .active(true) .delay(Duration::from_millis(200)) .start((42, timer1_body::<System, D>)) .finish(b); let timer2 = StaticTimer::define() .active(true) .delay(Duration::from_millis(100)) .start((52, timer2_body::<System, D>)) .finish(b); let timer3 = StaticTimer::define() .period(Duration::from_millis(0)) .start(unreachable_timer_body::<System, D>) .finish(b); let timer4 = StaticTimer::define() .delay(Duration::from_millis(0)) .period(Duration::from_millis(0)) .start(unreachable_timer_body::<System, D>) .finish(b); let task = StaticTask::define() .active(true) .start(task_body::<System, D>) .priority(1) .finish(b); let seq = Hunk::<_, SeqTracker>::define().finish(b); App { timer1, timer2, timer3, timer4, task, seq, } } } fn task_body<System: SupportedSystem, D: Driver<App<System>>>() { let App { seq, timer2, timer3, timer4, .. } = D::app(); // Start `timer3`. `timer3` is now in the Active state, but it will never // fire because its delay is `None` (infinity). timer3.start().unwrap(); // The same goes for `timer4`. timer4.set_delay(None).unwrap(); timer4.start().unwrap(); // `timer2` is already active, so this is no-op timer2.start().unwrap(); // `timer2` wake-up time System::park().unwrap(); seq.expect_and_replace(1, 2); if let Some(cap) = System::TIME_CAPABILITY { let now = Time::from_millis(100); let now_got = System::time(cap).unwrap(); log::trace!("time = {:?} (expected {:?})", now_got, now); assert!(now_got.as_micros() >= now.as_micros()); assert!(now_got.as_micros() <= now.as_micros() + 100_000); } // `timer1` wake-up time System::park().unwrap(); seq.expect_and_replace(3, 4); if let Some(cap) = System::TIME_CAPABILITY { let now = Time::from_millis(200); let now_got = System::time(cap).unwrap(); log::trace!("time = {:?} (expected {:?})", now_got, now); assert!(now_got.as_micros() >= now.as_micros()); assert!(now_got.as_micros() <= now.as_micros() + 100_000); } D::success(); } fn timer1_body<System: SupportedSystem, D: Driver<App<System>, System = System>>(param: usize) { let App { timer1, timer2, task, seq, .. } = D::app(); assert_eq!(param, 42); // Context query assert!(!System::is_task_context()); assert!(System::is_interrupt_context()); assert!(System::is_boot_complete()); // Check `timer1`'s expiration time in `task` // (`System::time` is disallowed in a non-task context) seq.expect_and_replace(2, 3); task.unpark().unwrap(); // `PartialEq` assert_ne!(timer1, timer2); assert_eq!(timer1, timer1); assert_eq!(timer2, timer2); // `Hash` let hash = |x: TimerRef<'_, System>| { use core::hash::{Hash, Hasher}; let mut hasher = WyHash::with_seed(42); x.hash(&mut hasher); hasher.finish() }; assert_eq!(hash(*timer1), hash(*timer1)); assert_eq!(hash(*timer2), hash(*timer2)); // Disallowed in a non-task context if let Some(cap) = System::BOOST_PRIORITY_CAPABILITY { assert_eq!( System::boost_priority(cap), Err(kernel::BoostPriorityError::BadContext), ); } assert_eq!( unsafe { System::exit_task() }, Err(kernel::ExitTaskError::BadContext), ); assert_eq!(System::park(), Err(kernel::ParkError::BadContext)); // Invalid ID if let Some(bad_id) = D::bad_raw_timer_id() { let bad_timer: TimerRef<'_, System> = unsafe { TimerRef::from_id(bad_id) }; assert_eq!( bad_timer.start(), Err(r3::kernel::StartTimerError::NoAccess) ); } // Disallowed with CPU Lock acitve System::acquire_cpu_lock().unwrap(); assert_eq!(timer1.start(), Err(r3::kernel::StartTimerError::BadContext)); assert_eq!(timer1.stop(), Err(r3::kernel::StopTimerError::BadContext)); assert_eq!( timer1.set_delay(None), Err(r3::kernel::SetTimerDelayError::BadContext) ); assert_eq!( timer1.set_period(None), Err(r3::kernel::SetTimerPeriodError::BadContext) ); unsafe { System::release_cpu_lock().unwrap() }; // Negative duration assert_eq!( timer1.set_delay(Some(Duration::from_micros(-1))), Err(r3::kernel::SetTimerDelayError::BadParam) ); assert_eq!( timer1.set_delay(Some(Duration::MIN)), Err(r3::kernel::SetTimerDelayError::BadParam) ); assert_eq!( timer1.set_period(Some(Duration::from_micros(-1))), Err(r3::kernel::SetTimerPeriodError::BadParam) ); assert_eq!( timer1.set_period(Some(Duration::MIN)), Err(r3::kernel::SetTimerPeriodError::BadParam) ); } fn timer2_body<System: SupportedSystem, D: Driver<App<System>>>(param: usize) { let App { task, seq, .. } = D::app(); assert_eq!(param, 52); // Check `timer2`'s expiration time in `task` // (`System::time` is disallowed in a non-task context) seq.expect_and_replace(0, 1); task.unpark().unwrap(); } fn unreachable_timer_body<System: SupportedSystem, D: Driver<App<System>>>() { unreachable!() }
28.283951
96
0.582569
237faa2689699b0bd73f911461d69a7fdc9d4c23
2,606
//! @ Each entry in |eqtb| is a |memory_word|. Most of these words are of type //! |two_halves|, and subdivided into three fields: //! //! \yskip\hangg 1) The |eq_level| (a quarterword) is the level of grouping at //! which this equivalent was defined. If the level is |level_zero|, the //! equivalent has never been defined; |level_one| refers to the outer level //! (outside of all groups), and this level is also used for global //! definitions that never go away. Higher levels are for equivalents that //! will disappear at the end of their group. @^global definitions@> //! //! \yskip\hangg 2) The |eq_type| (another quarterword) specifies what kind of //! entry this is. There are many types, since each \TeX\ primitive like //! \.{\\hbox}, \.{\\def}, etc., has its own special code. The list of //! command codes above includes all possible settings of the |eq_type| field. //! //! \yskip\hangg 3) The |equiv| (a halfword) is the current equivalent value. //! This may be a font number, a pointer into |mem|, or a variety of other //! things. // // @d eq_level_field(#)==#.hh.b1 pub(crate) macro eq_level_field($val:expr) { $val[crate::section_0113::MEMORY_WORD_HH_B1] } // @d eq_type_field(#)==#.hh.b0 pub(crate) macro eq_type_field($val:expr) { $val[crate::section_0113::MEMORY_WORD_HH_B0] } // @d equiv_field(#)==#.hh.rh pub(crate) macro equiv_field($val:expr) { $val[crate::section_0113::MEMORY_WORD_HH_RH] } // @d eq_level(#)==eq_level_field(eqtb[#]) {level of definition} /// level of definition #[allow(unused_macros)] pub(crate) macro eq_level($globals:expr, $val:expr) { crate::section_0221::eq_level_field!($globals.eqtb[$val as crate::section_0115::pointer]) } // @d eq_type(#)==eq_type_field(eqtb[#]) {command code for equivalent} /// command code for equivalent #[allow(unused_macros)] pub(crate) macro eq_type($globals:expr, $val:expr) { crate::section_0221::eq_type_field!($globals.eqtb[$val as crate::section_0115::pointer]) } // @d equiv(#)==equiv_field(eqtb[#]) {equivalent value} /// equivalent value pub(crate) macro equiv($globals:expr, $val:expr) { crate::section_0221::equiv_field!($globals.eqtb[$val as crate::section_0115::pointer]) } // @d level_zero=min_quarterword {level for undefined quantities} /// level for undefined quantities pub(crate) const level_zero: quarterword = min_quarterword; // @d level_one=level_zero+1 {outermost level for defined quantities} /// outermost level for defined quantities pub(crate) const level_one: quarterword = level_zero + 1; use crate::section_0110::min_quarterword; use crate::section_0113::quarterword;
44.931034
93
0.719877
61d534c2bff9d0f8bcc2e1a9301d642884c6d2df
5,760
use crate::access::{Aligned, MaybeUnaligned}; use crate::private::get_api; use crate::sys; use crate::VariantArray; use std::fmt; /// A reference-counted vector of bytes that uses Godot's pool allocator. pub struct ByteArray(pub(crate) sys::godot_pool_byte_array); pub type Read<'a> = Aligned<ReadGuard<'a>>; pub type Write<'a> = Aligned<WriteGuard<'a>>; impl ByteArray { /// Creates an empty array. pub fn new() -> Self { ByteArray::default() } /// Creates an array by trying to convert each variant. /// /// When no viable conversion exists, the default value `0` is pushed. pub fn from_variant_array(array: &VariantArray) -> Self { unsafe { let mut result = sys::godot_pool_byte_array::default(); (get_api().godot_pool_byte_array_new_with_array)(&mut result, &array.0); ByteArray(result) } } /// Appends a byte to the end of the array. pub fn push(&mut self, byte: u8) { unsafe { (get_api().godot_pool_byte_array_append)(&mut self.0, byte); } } /// Appends each byte to the end of the array. pub fn push_array(&mut self, bytes: &ByteArray) { unsafe { (get_api().godot_pool_byte_array_append_array)(&mut self.0, &bytes.0); } } // TODO(error handling) /// Inserts a byte at the given offset. pub fn insert(&mut self, offset: i32, byte: u8) -> bool { unsafe { let status = (get_api().godot_pool_byte_array_insert)(&mut self.0, offset, byte); status != sys::godot_error_GODOT_OK } } /// Inverts the order of the elements in the array. pub fn invert(&mut self) { unsafe { (get_api().godot_pool_byte_array_invert)(&mut self.0) } } /// Removes an element at the given offset. pub fn remove(&mut self, idx: i32) { unsafe { (get_api().godot_pool_byte_array_remove)(&mut self.0, idx); } } /// Changes the size of the array, possibly removing elements or pushing default values. pub fn resize(&mut self, size: i32) { unsafe { (get_api().godot_pool_byte_array_resize)(&mut self.0, size); } } /// Returns a copy of the byte at the given offset. pub fn get(&self, idx: i32) -> u8 { unsafe { (get_api().godot_pool_byte_array_get)(&self.0, idx) } } /// Sets the value of the byte at the given offset. pub fn set(&mut self, idx: i32, byte: u8) { unsafe { (get_api().godot_pool_byte_array_set)(&mut self.0, idx, byte); } } /// Returns the number of elements in the array. pub fn len(&self) -> i32 { unsafe { (get_api().godot_pool_byte_array_size)(&self.0) } } pub fn read<'a>(&'a self) -> Read<'a> { unsafe { MaybeUnaligned::new(ReadGuard::new(self.sys())) .try_into_aligned() .expect("Pool array access should be aligned. This indicates a bug in Godot") } } pub fn write<'a>(&'a mut self) -> Write<'a> { unsafe { MaybeUnaligned::new(WriteGuard::new(self.sys() as *mut _)) .try_into_aligned() .expect("Pool array access should be aligned. This indicates a bug in Godot") } } #[doc(hidden)] pub fn sys(&self) -> *const sys::godot_pool_byte_array { &self.0 } #[doc(hidden)] pub fn from_sys(sys: sys::godot_pool_byte_array) -> Self { ByteArray(sys) } impl_common_methods! { pub fn new_ref(& self) -> ByteArray : godot_pool_byte_array_new_copy; } } impl fmt::Debug for ByteArray { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.read().iter()).finish() } } impl_basic_traits!( for ByteArray as godot_pool_byte_array { Drop => godot_pool_byte_array_destroy; Default => godot_pool_byte_array_new; } ); define_access_guard! { pub struct ReadGuard<'a> : sys::godot_pool_byte_array_read_access { access = godot_pool_byte_array_read(*const sys::godot_pool_byte_array), len = godot_pool_byte_array_size, } Guard<Target=u8> => godot_pool_byte_array_read_access_ptr -> *const u8; Drop => godot_pool_byte_array_read_access_destroy; Clone => godot_pool_byte_array_read_access_copy; } define_access_guard! { pub struct WriteGuard<'a> : sys::godot_pool_byte_array_write_access { access = godot_pool_byte_array_write(*mut sys::godot_pool_byte_array), len = godot_pool_byte_array_size, } Guard<Target=u8> + WritePtr => godot_pool_byte_array_write_access_ptr -> *mut u8; Drop => godot_pool_byte_array_write_access_destroy; } godot_test!( test_byte_array_access { let mut arr = ByteArray::new(); for i in 0..8 { arr.push(i); } let original_read = { let read = arr.read(); assert_eq!(&[0, 1, 2, 3, 4, 5, 6, 7], read.as_slice()); read.clone() }; let mut cow_arr = arr.new_ref(); { let mut write = cow_arr.write(); assert_eq!(8, write.len()); for i in write.as_mut_slice() { *i *= 2; } } for i in 0..8 { assert_eq!(i * 2, cow_arr.get(i as i32)); } // the write shouldn't have affected the original array assert_eq!(&[0, 1, 2, 3, 4, 5, 6, 7], original_read.as_slice()); } ); godot_test!( test_byte_array_debug { let mut arr = ByteArray::new(); for i in 0..8 { arr.push(i); } assert_eq!(format!("{:?}", arr), "[0, 1, 2, 3, 4, 5, 6, 7]"); } );
29.538462
93
0.588368
9b42a0f6a0a396fdaf7646408b90b3dc4794e850
149,618
#![allow(unused_parens)] //! # Machine Learning //! //! The Machine Learning Library (MLL) is a set of classes and functions for statistical //! classification, regression, and clustering of data. //! //! Most of the classification and regression algorithms are implemented as C++ classes. As the //! algorithms have different sets of features (like an ability to handle missing measurements or //! categorical input variables), there is a little common ground between the classes. This common //! ground is defined by the class cv::ml::StatModel that all the other ML classes are derived from. //! //! See detailed overview here: @ref ml_intro. use crate::{mod_prelude::*, core, sys, types}; pub mod prelude { pub use { super::ParamGridTrait, super::TrainData, super::StatModel, super::NormalBayesClassifier, super::KNearest, super::SVM_Kernel, super::SVM, super::EM, super::DTrees_NodeTrait, super::DTrees_SplitTrait, super::DTrees, super::RTrees, super::Boost, super::ANN_MLP, super::LogisticRegression, super::SVMSGD }; } /// each training sample occupies a column of samples pub const COL_SAMPLE: i32 = 1; pub const EM_DEFAULT_MAX_ITERS: i32 = 100; pub const EM_DEFAULT_NCLUSTERS: i32 = 5; pub const EM_START_AUTO_STEP: i32 = 0; pub const EM_START_E_STEP: i32 = 1; pub const EM_START_M_STEP: i32 = 2; /// each training sample is a row of samples pub const ROW_SAMPLE: i32 = 0; pub const TEST_ERROR: i32 = 0; pub const TRAIN_ERROR: i32 = 1; /// categorical variables pub const VAR_CATEGORICAL: i32 = 1; /// same as VAR_ORDERED pub const VAR_NUMERICAL: i32 = 0; /// ordered variables pub const VAR_ORDERED: i32 = 0; /// possible activation functions #[repr(C)] #[derive(Copy, Clone, Debug, PartialEq)] pub enum ANN_MLP_ActivationFunctions { /// Identity function: ![inline formula](https://latex.codecogs.com/png.latex?f%28x%29%3Dx) IDENTITY = 0, /// Symmetrical sigmoid: ![inline formula](https://latex.codecogs.com/png.latex?f%28x%29%3D%5Cbeta%2A%281%2De%5E%7B%2D%5Calpha%20x%7D%29%2F%281%2Be%5E%7B%2D%5Calpha%20x%7D%29) /// /// Note: /// If you are using the default sigmoid activation function with the default parameter values /// fparam1=0 and fparam2=0 then the function used is y = 1.7159\*tanh(2/3 \* x), so the output /// will range from [-1.7159, 1.7159], instead of [0,1]. SIGMOID_SYM = 1, /// Gaussian function: ![inline formula](https://latex.codecogs.com/png.latex?f%28x%29%3D%5Cbeta%20e%5E%7B%2D%5Calpha%20x%2Ax%7D) GAUSSIAN = 2, /// ReLU function: ![inline formula](https://latex.codecogs.com/png.latex?f%28x%29%3Dmax%280%2Cx%29) RELU = 3, /// Leaky ReLU function: for x>0 ![inline formula](https://latex.codecogs.com/png.latex?f%28x%29%3Dx%20) and x<=0 ![inline formula](https://latex.codecogs.com/png.latex?f%28x%29%3D%5Calpha%20x%20) LEAKYRELU = 4, } opencv_type_enum! { crate::ml::ANN_MLP_ActivationFunctions } /// Train options #[repr(C)] #[derive(Copy, Clone, Debug, PartialEq)] pub enum ANN_MLP_TrainFlags { /// Update the network weights, rather than compute them from scratch. In the latter case /// the weights are initialized using the Nguyen-Widrow algorithm. UPDATE_WEIGHTS = 1, /// Do not normalize the input vectors. If this flag is not set, the training algorithm /// normalizes each input feature independently, shifting its mean value to 0 and making the /// standard deviation equal to 1. If the network is assumed to be updated frequently, the new /// training data could be much different from original one. In this case, you should take care /// of proper normalization. NO_INPUT_SCALE = 2, /// Do not normalize the output vectors. If the flag is not set, the training algorithm /// normalizes each output feature independently, by transforming it to the certain range /// depending on the used activation function. NO_OUTPUT_SCALE = 4, } opencv_type_enum! { crate::ml::ANN_MLP_TrainFlags } /// Available training methods #[repr(C)] #[derive(Copy, Clone, Debug, PartialEq)] pub enum ANN_MLP_TrainingMethods { /// The back-propagation algorithm. BACKPROP = 0, /// The RPROP algorithm. See [RPROP93](https://docs.opencv.org/4.3.0/d0/de3/citelist.html#CITEREF_RPROP93) for details. RPROP = 1, /// The simulated annealing algorithm. See [Kirkpatrick83](https://docs.opencv.org/4.3.0/d0/de3/citelist.html#CITEREF_Kirkpatrick83) for details. ANNEAL = 2, } opencv_type_enum! { crate::ml::ANN_MLP_TrainingMethods } /// Boosting type. /// Gentle AdaBoost and Real AdaBoost are often the preferable choices. #[repr(C)] #[derive(Copy, Clone, Debug, PartialEq)] pub enum Boost_Types { /// Discrete AdaBoost. DISCRETE = 0, /// Real AdaBoost. It is a technique that utilizes confidence-rated predictions /// and works well with categorical data. REAL = 1, /// LogitBoost. It can produce good regression fits. LOGIT = 2, /// Gentle AdaBoost. It puts less weight on outlier data points and for that /// reason is often good with regression data. GENTLE = 3, } opencv_type_enum! { crate::ml::Boost_Types } /// Predict options #[repr(C)] #[derive(Copy, Clone, Debug, PartialEq)] pub enum DTrees_Flags { PREDICT_AUTO = 0, PREDICT_SUM = 256, PREDICT_MAX_VOTE = 512, PREDICT_MASK = 768, } opencv_type_enum! { crate::ml::DTrees_Flags } /// Type of covariation matrices #[repr(C)] #[derive(Copy, Clone, Debug, PartialEq)] pub enum EM_Types { /// A scaled identity matrix ![inline formula](https://latex.codecogs.com/png.latex?%5Cmu%5Fk%20%2A%20I). There is the only /// parameter ![inline formula](https://latex.codecogs.com/png.latex?%5Cmu%5Fk) to be estimated for each matrix. The option may be used in special cases, /// when the constraint is relevant, or as a first step in the optimization (for example in case /// when the data is preprocessed with PCA). The results of such preliminary estimation may be /// passed again to the optimization procedure, this time with /// covMatType=EM::COV_MAT_DIAGONAL. COV_MAT_SPHERICAL = 0, /// A diagonal matrix with positive diagonal elements. The number of /// free parameters is d for each matrix. This is most commonly used option yielding good /// estimation results. COV_MAT_DIAGONAL = 1, /// A symmetric positively defined matrix. The number of free /// parameters in each matrix is about ![inline formula](https://latex.codecogs.com/png.latex?d%5E2%2F2). It is not recommended to use this option, unless /// there is pretty accurate initial estimation of the parameters and/or a huge number of /// training samples. COV_MAT_GENERIC = 2, // A symmetric positively defined matrix. The number of free // parameters in each matrix is about ![inline formula](https://latex.codecogs.com/png.latex?d%5E2%2F2). It is not recommended to use this option, unless // there is pretty accurate initial estimation of the parameters and/or a huge number of // training samples. // COV_MAT_DEFAULT = 1 as isize, // duplicate discriminant } opencv_type_enum! { crate::ml::EM_Types } /// %Error types #[repr(C)] #[derive(Copy, Clone, Debug, PartialEq)] pub enum ErrorTypes { TEST_ERROR = 0, TRAIN_ERROR = 1, } opencv_type_enum! { crate::ml::ErrorTypes } /// Implementations of KNearest algorithm #[repr(C)] #[derive(Copy, Clone, Debug, PartialEq)] pub enum KNearest_Types { BRUTE_FORCE = 1, KDTREE = 2, } opencv_type_enum! { crate::ml::KNearest_Types } /// Training methods #[repr(C)] #[derive(Copy, Clone, Debug, PartialEq)] pub enum LogisticRegression_Methods { BATCH = 0, /// Set MiniBatchSize to a positive integer when using this method. MINI_BATCH = 1, } opencv_type_enum! { crate::ml::LogisticRegression_Methods } /// Regularization kinds #[repr(C)] #[derive(Copy, Clone, Debug, PartialEq)] pub enum LogisticRegression_RegKinds { /// Regularization disabled REG_DISABLE = -1, /// %L1 norm REG_L1 = 0, /// %L2 norm REG_L2 = 1, } opencv_type_enum! { crate::ml::LogisticRegression_RegKinds } /// Margin type. #[repr(C)] #[derive(Copy, Clone, Debug, PartialEq)] pub enum SVMSGD_MarginType { /// General case, suits to the case of non-linearly separable sets, allows outliers. SOFT_MARGIN = 0, /// More accurate for the case of linearly separable sets. HARD_MARGIN = 1, } opencv_type_enum! { crate::ml::SVMSGD_MarginType } /// SVMSGD type. /// ASGD is often the preferable choice. #[repr(C)] #[derive(Copy, Clone, Debug, PartialEq)] pub enum SVMSGD_SvmsgdType { /// Stochastic Gradient Descent SGD = 0, /// Average Stochastic Gradient Descent ASGD = 1, } opencv_type_enum! { crate::ml::SVMSGD_SvmsgdType } /// %SVM kernel type /// /// A comparison of different kernels on the following 2D test case with four classes. Four /// SVM::C_SVC SVMs have been trained (one against rest) with auto_train. Evaluation on three /// different kernels (SVM::CHI2, SVM::INTER, SVM::RBF). The color depicts the class with max score. /// Bright means max-score \> 0, dark means max-score \< 0. /// ![image](https://docs.opencv.org/4.3.0/SVM_Comparison.png) #[repr(C)] #[derive(Copy, Clone, Debug, PartialEq)] pub enum SVM_KernelTypes { /// Returned by SVM::getKernelType in case when custom kernel has been set CUSTOM = -1, /// Linear kernel. No mapping is done, linear discrimination (or regression) is /// done in the original feature space. It is the fastest option. ![inline formula](https://latex.codecogs.com/png.latex?K%28x%5Fi%2C%20x%5Fj%29%20%3D%20x%5Fi%5ET%20x%5Fj). LINEAR = 0, /// Polynomial kernel: /// ![inline formula](https://latex.codecogs.com/png.latex?K%28x%5Fi%2C%20x%5Fj%29%20%3D%20%28%5Cgamma%20x%5Fi%5ET%20x%5Fj%20%2B%20coef0%29%5E%7Bdegree%7D%2C%20%5Cgamma%20%3E%200). POLY = 1, /// Radial basis function (RBF), a good choice in most cases. /// ![inline formula](https://latex.codecogs.com/png.latex?K%28x%5Fi%2C%20x%5Fj%29%20%3D%20e%5E%7B%2D%5Cgamma%20%7C%7Cx%5Fi%20%2D%20x%5Fj%7C%7C%5E2%7D%2C%20%5Cgamma%20%3E%200). RBF = 2, /// Sigmoid kernel: ![inline formula](https://latex.codecogs.com/png.latex?K%28x%5Fi%2C%20x%5Fj%29%20%3D%20%5Ctanh%28%5Cgamma%20x%5Fi%5ET%20x%5Fj%20%2B%20coef0%29). SIGMOID = 3, /// Exponential Chi2 kernel, similar to the RBF kernel: /// ![inline formula](https://latex.codecogs.com/png.latex?K%28x%5Fi%2C%20x%5Fj%29%20%3D%20e%5E%7B%2D%5Cgamma%20%5Cchi%5E2%28x%5Fi%2Cx%5Fj%29%7D%2C%20%5Cchi%5E2%28x%5Fi%2Cx%5Fj%29%20%3D%20%28x%5Fi%2Dx%5Fj%29%5E2%2F%28x%5Fi%2Bx%5Fj%29%2C%20%5Cgamma%20%3E%200). CHI2 = 4, /// Histogram intersection kernel. A fast kernel. ![inline formula](https://latex.codecogs.com/png.latex?K%28x%5Fi%2C%20x%5Fj%29%20%3D%20min%28x%5Fi%2Cx%5Fj%29). INTER = 5, } opencv_type_enum! { crate::ml::SVM_KernelTypes } /// %SVM params type #[repr(C)] #[derive(Copy, Clone, Debug, PartialEq)] pub enum SVM_ParamTypes { C = 0, GAMMA = 1, P = 2, NU = 3, COEF = 4, DEGREE = 5, } opencv_type_enum! { crate::ml::SVM_ParamTypes } /// %SVM type #[repr(C)] #[derive(Copy, Clone, Debug, PartialEq)] pub enum SVM_Types { /// C-Support Vector Classification. n-class classification (n ![inline formula](https://latex.codecogs.com/png.latex?%5Cgeq) 2), allows /// imperfect separation of classes with penalty multiplier C for outliers. C_SVC = 100, /// ![inline formula](https://latex.codecogs.com/png.latex?%5Cnu)-Support Vector Classification. n-class classification with possible /// imperfect separation. Parameter ![inline formula](https://latex.codecogs.com/png.latex?%5Cnu) (in the range 0..1, the larger the value, the smoother /// the decision boundary) is used instead of C. NU_SVC = 101, /// Distribution Estimation (One-class %SVM). All the training data are from /// the same class, %SVM builds a boundary that separates the class from the rest of the feature /// space. ONE_CLASS = 102, /// ![inline formula](https://latex.codecogs.com/png.latex?%5Cepsilon)-Support Vector Regression. The distance between feature vectors /// from the training set and the fitting hyper-plane must be less than p. For outliers the /// penalty multiplier C is used. EPS_SVR = 103, /// ![inline formula](https://latex.codecogs.com/png.latex?%5Cnu)-Support Vector Regression. ![inline formula](https://latex.codecogs.com/png.latex?%5Cnu) is used instead of p. /// See [LibSVM](https://docs.opencv.org/4.3.0/d0/de3/citelist.html#CITEREF_LibSVM) for details. NU_SVR = 104, } opencv_type_enum! { crate::ml::SVM_Types } /// Sample types #[repr(C)] #[derive(Copy, Clone, Debug, PartialEq)] pub enum SampleTypes { /// each training sample is a row of samples ROW_SAMPLE = 0, /// each training sample occupies a column of samples COL_SAMPLE = 1, } opencv_type_enum! { crate::ml::SampleTypes } /// Predict options #[repr(C)] #[derive(Copy, Clone, Debug, PartialEq)] pub enum StatModel_Flags { UPDATE_MODEL = 1, // makes the method return the raw results (the sum), not the class label // RAW_OUTPUT = 1 as isize, // duplicate discriminant COMPRESSED_INPUT = 2, PREPROCESSED_INPUT = 4, } opencv_type_enum! { crate::ml::StatModel_Flags } /// Variable types #[repr(C)] #[derive(Copy, Clone, Debug, PartialEq)] pub enum VariableTypes { /// same as VAR_ORDERED VAR_NUMERICAL = 0, // ordered variables // VAR_ORDERED = 0 as isize, // duplicate discriminant /// categorical variables VAR_CATEGORICAL = 1, } opencv_type_enum! { crate::ml::VariableTypes } pub type ANN_MLP_ANNEAL = dyn crate::ml::ANN_MLP; /// Creates test set pub fn create_concentric_spheres_test_set(nsamples: i32, nfeatures: i32, nclasses: i32, samples: &mut dyn core::ToOutputArray, responses: &mut dyn core::ToOutputArray) -> Result<()> { output_array_arg!(samples); output_array_arg!(responses); unsafe { sys::cv_ml_createConcentricSpheresTestSet_int_int_int_const__OutputArrayR_const__OutputArrayR(nsamples, nfeatures, nclasses, samples.as_raw__OutputArray(), responses.as_raw__OutputArray()) }.into_result() } /// Generates _sample_ from multivariate normal distribution /// /// ## Parameters /// * mean: an average row vector /// * cov: symmetric covariation matrix /// * nsamples: returned samples count /// * samples: returned samples array pub fn rand_mv_normal(mean: &dyn core::ToInputArray, cov: &dyn core::ToInputArray, nsamples: i32, samples: &mut dyn core::ToOutputArray) -> Result<()> { input_array_arg!(mean); input_array_arg!(cov); output_array_arg!(samples); unsafe { sys::cv_ml_randMVNormal_const__InputArrayR_const__InputArrayR_int_const__OutputArrayR(mean.as_raw__InputArray(), cov.as_raw__InputArray(), nsamples, samples.as_raw__OutputArray()) }.into_result() } /// Artificial Neural Networks - Multi-Layer Perceptrons. /// /// Unlike many other models in ML that are constructed and trained at once, in the MLP model these /// steps are separated. First, a network with the specified topology is created using the non-default /// constructor or the method ANN_MLP::create. All the weights are set to zeros. Then, the network is /// trained using a set of input and output vectors. The training procedure can be repeated more than /// once, that is, the weights can be adjusted based on the new training data. /// /// Additional flags for StatModel::train are available: ANN_MLP::TrainFlags. /// ## See also /// @ref ml_intro_ann pub trait ANN_MLP: crate::ml::StatModel { fn as_raw_ANN_MLP(&self) -> *const c_void; fn as_raw_mut_ANN_MLP(&mut self) -> *mut c_void; /// Sets training method and common parameters. /// ## Parameters /// * method: Default value is ANN_MLP::RPROP. See ANN_MLP::TrainingMethods. /// * param1: passed to setRpropDW0 for ANN_MLP::RPROP and to setBackpropWeightScale for ANN_MLP::BACKPROP and to initialT for ANN_MLP::ANNEAL. /// * param2: passed to setRpropDWMin for ANN_MLP::RPROP and to setBackpropMomentumScale for ANN_MLP::BACKPROP and to finalT for ANN_MLP::ANNEAL. /// /// ## C++ default parameters /// * param1: 0 /// * param2: 0 fn set_train_method(&mut self, method: i32, param1: f64, param2: f64) -> Result<()> { unsafe { sys::cv_ml_ANN_MLP_setTrainMethod_int_double_double(self.as_raw_mut_ANN_MLP(), method, param1, param2) }.into_result() } /// Returns current training method fn get_train_method(&self) -> Result<i32> { unsafe { sys::cv_ml_ANN_MLP_getTrainMethod_const(self.as_raw_ANN_MLP()) }.into_result() } /// Initialize the activation function for each neuron. /// Currently the default and the only fully supported activation function is ANN_MLP::SIGMOID_SYM. /// ## Parameters /// * type: The type of activation function. See ANN_MLP::ActivationFunctions. /// * param1: The first parameter of the activation function, ![inline formula](https://latex.codecogs.com/png.latex?%5Calpha). Default value is 0. /// * param2: The second parameter of the activation function, ![inline formula](https://latex.codecogs.com/png.latex?%5Cbeta). Default value is 0. /// /// ## C++ default parameters /// * param1: 0 /// * param2: 0 fn set_activation_function(&mut self, typ: i32, param1: f64, param2: f64) -> Result<()> { unsafe { sys::cv_ml_ANN_MLP_setActivationFunction_int_double_double(self.as_raw_mut_ANN_MLP(), typ, param1, param2) }.into_result() } /// Integer vector specifying the number of neurons in each layer including the input and output layers. /// The very first element specifies the number of elements in the input layer. /// The last element - number of elements in the output layer. Default value is empty Mat. /// ## See also /// getLayerSizes fn set_layer_sizes(&mut self, _layer_sizes: &dyn core::ToInputArray) -> Result<()> { input_array_arg!(_layer_sizes); unsafe { sys::cv_ml_ANN_MLP_setLayerSizes_const__InputArrayR(self.as_raw_mut_ANN_MLP(), _layer_sizes.as_raw__InputArray()) }.into_result() } /// Integer vector specifying the number of neurons in each layer including the input and output layers. /// The very first element specifies the number of elements in the input layer. /// The last element - number of elements in the output layer. /// ## See also /// setLayerSizes fn get_layer_sizes(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_ANN_MLP_getLayerSizes_const(self.as_raw_ANN_MLP()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } /// Termination criteria of the training algorithm. /// You can specify the maximum number of iterations (maxCount) and/or how much the error could /// change between the iterations to make the algorithm continue (epsilon). Default value is /// TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, 0.01). /// ## See also /// setTermCriteria fn get_term_criteria(&self) -> Result<core::TermCriteria> { unsafe { sys::cv_ml_ANN_MLP_getTermCriteria_const(self.as_raw_ANN_MLP()) }.into_result() } /// Termination criteria of the training algorithm. /// You can specify the maximum number of iterations (maxCount) and/or how much the error could /// change between the iterations to make the algorithm continue (epsilon). Default value is /// TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, 0.01). /// ## See also /// setTermCriteria getTermCriteria fn set_term_criteria(&mut self, val: core::TermCriteria) -> Result<()> { unsafe { sys::cv_ml_ANN_MLP_setTermCriteria_TermCriteria(self.as_raw_mut_ANN_MLP(), val.opencv_as_extern()) }.into_result() } /// BPROP: Strength of the weight gradient term. /// The recommended value is about 0.1. Default value is 0.1. /// ## See also /// setBackpropWeightScale fn get_backprop_weight_scale(&self) -> Result<f64> { unsafe { sys::cv_ml_ANN_MLP_getBackpropWeightScale_const(self.as_raw_ANN_MLP()) }.into_result() } /// BPROP: Strength of the weight gradient term. /// The recommended value is about 0.1. Default value is 0.1. /// ## See also /// setBackpropWeightScale getBackpropWeightScale fn set_backprop_weight_scale(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_ml_ANN_MLP_setBackpropWeightScale_double(self.as_raw_mut_ANN_MLP(), val) }.into_result() } /// BPROP: Strength of the momentum term (the difference between weights on the 2 previous iterations). /// This parameter provides some inertia to smooth the random fluctuations of the weights. It can /// vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough. /// Default value is 0.1. /// ## See also /// setBackpropMomentumScale fn get_backprop_momentum_scale(&self) -> Result<f64> { unsafe { sys::cv_ml_ANN_MLP_getBackpropMomentumScale_const(self.as_raw_ANN_MLP()) }.into_result() } /// BPROP: Strength of the momentum term (the difference between weights on the 2 previous iterations). /// This parameter provides some inertia to smooth the random fluctuations of the weights. It can /// vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough. /// Default value is 0.1. /// ## See also /// setBackpropMomentumScale getBackpropMomentumScale fn set_backprop_momentum_scale(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_ml_ANN_MLP_setBackpropMomentumScale_double(self.as_raw_mut_ANN_MLP(), val) }.into_result() } /// RPROP: Initial value ![inline formula](https://latex.codecogs.com/png.latex?%5CDelta%5F0) of update-values ![inline formula](https://latex.codecogs.com/png.latex?%5CDelta%5F%7Bij%7D). /// Default value is 0.1. /// ## See also /// setRpropDW0 fn get_rprop_dw0(&self) -> Result<f64> { unsafe { sys::cv_ml_ANN_MLP_getRpropDW0_const(self.as_raw_ANN_MLP()) }.into_result() } /// RPROP: Initial value ![inline formula](https://latex.codecogs.com/png.latex?%5CDelta%5F0) of update-values ![inline formula](https://latex.codecogs.com/png.latex?%5CDelta%5F%7Bij%7D). /// Default value is 0.1. /// ## See also /// setRpropDW0 getRpropDW0 fn set_rprop_dw0(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_ml_ANN_MLP_setRpropDW0_double(self.as_raw_mut_ANN_MLP(), val) }.into_result() } /// RPROP: Increase factor ![inline formula](https://latex.codecogs.com/png.latex?%5Ceta%5E%2B). /// It must be \>1. Default value is 1.2. /// ## See also /// setRpropDWPlus fn get_rprop_dw_plus(&self) -> Result<f64> { unsafe { sys::cv_ml_ANN_MLP_getRpropDWPlus_const(self.as_raw_ANN_MLP()) }.into_result() } /// RPROP: Increase factor ![inline formula](https://latex.codecogs.com/png.latex?%5Ceta%5E%2B). /// It must be \>1. Default value is 1.2. /// ## See also /// setRpropDWPlus getRpropDWPlus fn set_rprop_dw_plus(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_ml_ANN_MLP_setRpropDWPlus_double(self.as_raw_mut_ANN_MLP(), val) }.into_result() } /// RPROP: Decrease factor ![inline formula](https://latex.codecogs.com/png.latex?%5Ceta%5E%2D). /// It must be \<1. Default value is 0.5. /// ## See also /// setRpropDWMinus fn get_rprop_dw_minus(&self) -> Result<f64> { unsafe { sys::cv_ml_ANN_MLP_getRpropDWMinus_const(self.as_raw_ANN_MLP()) }.into_result() } /// RPROP: Decrease factor ![inline formula](https://latex.codecogs.com/png.latex?%5Ceta%5E%2D). /// It must be \<1. Default value is 0.5. /// ## See also /// setRpropDWMinus getRpropDWMinus fn set_rprop_dw_minus(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_ml_ANN_MLP_setRpropDWMinus_double(self.as_raw_mut_ANN_MLP(), val) }.into_result() } /// RPROP: Update-values lower limit ![inline formula](https://latex.codecogs.com/png.latex?%5CDelta%5F%7Bmin%7D). /// It must be positive. Default value is FLT_EPSILON. /// ## See also /// setRpropDWMin fn get_rprop_dw_min(&self) -> Result<f64> { unsafe { sys::cv_ml_ANN_MLP_getRpropDWMin_const(self.as_raw_ANN_MLP()) }.into_result() } /// RPROP: Update-values lower limit ![inline formula](https://latex.codecogs.com/png.latex?%5CDelta%5F%7Bmin%7D). /// It must be positive. Default value is FLT_EPSILON. /// ## See also /// setRpropDWMin getRpropDWMin fn set_rprop_dw_min(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_ml_ANN_MLP_setRpropDWMin_double(self.as_raw_mut_ANN_MLP(), val) }.into_result() } /// RPROP: Update-values upper limit ![inline formula](https://latex.codecogs.com/png.latex?%5CDelta%5F%7Bmax%7D). /// It must be \>1. Default value is 50. /// ## See also /// setRpropDWMax fn get_rprop_dw_max(&self) -> Result<f64> { unsafe { sys::cv_ml_ANN_MLP_getRpropDWMax_const(self.as_raw_ANN_MLP()) }.into_result() } /// RPROP: Update-values upper limit ![inline formula](https://latex.codecogs.com/png.latex?%5CDelta%5F%7Bmax%7D). /// It must be \>1. Default value is 50. /// ## See also /// setRpropDWMax getRpropDWMax fn set_rprop_dw_max(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_ml_ANN_MLP_setRpropDWMax_double(self.as_raw_mut_ANN_MLP(), val) }.into_result() } /// ANNEAL: Update initial temperature. /// It must be \>=0. Default value is 10. /// ## See also /// setAnnealInitialT fn get_anneal_initial_t(&self) -> Result<f64> { unsafe { sys::cv_ml_ANN_MLP_getAnnealInitialT_const(self.as_raw_ANN_MLP()) }.into_result() } /// ANNEAL: Update initial temperature. /// It must be \>=0. Default value is 10. /// ## See also /// setAnnealInitialT getAnnealInitialT fn set_anneal_initial_t(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_ml_ANN_MLP_setAnnealInitialT_double(self.as_raw_mut_ANN_MLP(), val) }.into_result() } /// ANNEAL: Update final temperature. /// It must be \>=0 and less than initialT. Default value is 0.1. /// ## See also /// setAnnealFinalT fn get_anneal_final_t(&self) -> Result<f64> { unsafe { sys::cv_ml_ANN_MLP_getAnnealFinalT_const(self.as_raw_ANN_MLP()) }.into_result() } /// ANNEAL: Update final temperature. /// It must be \>=0 and less than initialT. Default value is 0.1. /// ## See also /// setAnnealFinalT getAnnealFinalT fn set_anneal_final_t(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_ml_ANN_MLP_setAnnealFinalT_double(self.as_raw_mut_ANN_MLP(), val) }.into_result() } /// ANNEAL: Update cooling ratio. /// It must be \>0 and less than 1. Default value is 0.95. /// ## See also /// setAnnealCoolingRatio fn get_anneal_cooling_ratio(&self) -> Result<f64> { unsafe { sys::cv_ml_ANN_MLP_getAnnealCoolingRatio_const(self.as_raw_ANN_MLP()) }.into_result() } /// ANNEAL: Update cooling ratio. /// It must be \>0 and less than 1. Default value is 0.95. /// ## See also /// setAnnealCoolingRatio getAnnealCoolingRatio fn set_anneal_cooling_ratio(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_ml_ANN_MLP_setAnnealCoolingRatio_double(self.as_raw_mut_ANN_MLP(), val) }.into_result() } /// ANNEAL: Update iteration per step. /// It must be \>0 . Default value is 10. /// ## See also /// setAnnealItePerStep fn get_anneal_ite_per_step(&self) -> Result<i32> { unsafe { sys::cv_ml_ANN_MLP_getAnnealItePerStep_const(self.as_raw_ANN_MLP()) }.into_result() } /// ANNEAL: Update iteration per step. /// It must be \>0 . Default value is 10. /// ## See also /// setAnnealItePerStep getAnnealItePerStep fn set_anneal_ite_per_step(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_ml_ANN_MLP_setAnnealItePerStep_int(self.as_raw_mut_ANN_MLP(), val) }.into_result() } /// Set/initialize anneal RNG fn set_anneal_energy_rng(&mut self, rng: &core::RNG) -> Result<()> { unsafe { sys::cv_ml_ANN_MLP_setAnnealEnergyRNG_const_RNGR(self.as_raw_mut_ANN_MLP(), rng.as_raw_RNG()) }.into_result() } fn get_weights(&self, layer_idx: i32) -> Result<core::Mat> { unsafe { sys::cv_ml_ANN_MLP_getWeights_const_int(self.as_raw_ANN_MLP(), layer_idx) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } } impl dyn ANN_MLP + '_ { /// Creates empty model /// /// Use StatModel::train to train the model, Algorithm::load\<ANN_MLP\>(filename) to load the pre-trained model. /// Note that the train method has optional flags: ANN_MLP::TrainFlags. pub fn create() -> Result<core::Ptr::<dyn crate::ml::ANN_MLP>> { unsafe { sys::cv_ml_ANN_MLP_create() }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::ml::ANN_MLP>::opencv_from_extern(r) } ) } /// Loads and creates a serialized ANN from a file /// /// Use ANN::save to serialize and store an ANN to disk. /// Load the ANN from this file again, by calling this function with the path to the file. /// /// ## Parameters /// * filepath: path to serialized ANN pub fn load(filepath: &str) -> Result<core::Ptr::<dyn crate::ml::ANN_MLP>> { extern_container_arg!(filepath); unsafe { sys::cv_ml_ANN_MLP_load_const_StringR(filepath.opencv_as_extern()) }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::ml::ANN_MLP>::opencv_from_extern(r) } ) } } /// Boosted tree classifier derived from DTrees /// ## See also /// @ref ml_intro_boost pub trait Boost: crate::ml::DTrees { fn as_raw_Boost(&self) -> *const c_void; fn as_raw_mut_Boost(&mut self) -> *mut c_void; /// Type of the boosting algorithm. /// See Boost::Types. Default value is Boost::REAL. /// ## See also /// setBoostType fn get_boost_type(&self) -> Result<i32> { unsafe { sys::cv_ml_Boost_getBoostType_const(self.as_raw_Boost()) }.into_result() } /// Type of the boosting algorithm. /// See Boost::Types. Default value is Boost::REAL. /// ## See also /// setBoostType getBoostType fn set_boost_type(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_ml_Boost_setBoostType_int(self.as_raw_mut_Boost(), val) }.into_result() } /// The number of weak classifiers. /// Default value is 100. /// ## See also /// setWeakCount fn get_weak_count(&self) -> Result<i32> { unsafe { sys::cv_ml_Boost_getWeakCount_const(self.as_raw_Boost()) }.into_result() } /// The number of weak classifiers. /// Default value is 100. /// ## See also /// setWeakCount getWeakCount fn set_weak_count(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_ml_Boost_setWeakCount_int(self.as_raw_mut_Boost(), val) }.into_result() } /// A threshold between 0 and 1 used to save computational time. /// Samples with summary weight ![inline formula](https://latex.codecogs.com/png.latex?%5Cleq%201%20%2D%20weight%5Ftrim%5Frate) do not participate in the *next* /// iteration of training. Set this parameter to 0 to turn off this functionality. Default value is 0.95. /// ## See also /// setWeightTrimRate fn get_weight_trim_rate(&self) -> Result<f64> { unsafe { sys::cv_ml_Boost_getWeightTrimRate_const(self.as_raw_Boost()) }.into_result() } /// A threshold between 0 and 1 used to save computational time. /// Samples with summary weight ![inline formula](https://latex.codecogs.com/png.latex?%5Cleq%201%20%2D%20weight%5Ftrim%5Frate) do not participate in the *next* /// iteration of training. Set this parameter to 0 to turn off this functionality. Default value is 0.95. /// ## See also /// setWeightTrimRate getWeightTrimRate fn set_weight_trim_rate(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_ml_Boost_setWeightTrimRate_double(self.as_raw_mut_Boost(), val) }.into_result() } } impl dyn Boost + '_ { /// Creates the empty model. /// Use StatModel::train to train the model, Algorithm::load\<Boost\>(filename) to load the pre-trained model. pub fn create() -> Result<core::Ptr::<dyn crate::ml::Boost>> { unsafe { sys::cv_ml_Boost_create() }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::ml::Boost>::opencv_from_extern(r) } ) } /// Loads and creates a serialized Boost from a file /// /// Use Boost::save to serialize and store an RTree to disk. /// Load the Boost from this file again, by calling this function with the path to the file. /// Optionally specify the node for the file containing the classifier /// /// ## Parameters /// * filepath: path to serialized Boost /// * nodeName: name of node containing the classifier /// /// ## C++ default parameters /// * node_name: String() pub fn load(filepath: &str, node_name: &str) -> Result<core::Ptr::<dyn crate::ml::Boost>> { extern_container_arg!(filepath); extern_container_arg!(node_name); unsafe { sys::cv_ml_Boost_load_const_StringR_const_StringR(filepath.opencv_as_extern(), node_name.opencv_as_extern()) }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::ml::Boost>::opencv_from_extern(r) } ) } } /// The class represents a single decision tree or a collection of decision trees. /// /// The current public interface of the class allows user to train only a single decision tree, however /// the class is capable of storing multiple decision trees and using them for prediction (by summing /// responses or using a voting schemes), and the derived from DTrees classes (such as RTrees and Boost) /// use this capability to implement decision tree ensembles. /// ## See also /// @ref ml_intro_trees pub trait DTrees: crate::ml::StatModel { fn as_raw_DTrees(&self) -> *const c_void; fn as_raw_mut_DTrees(&mut self) -> *mut c_void; /// Cluster possible values of a categorical variable into K\<=maxCategories clusters to /// find a suboptimal split. /// If a discrete variable, on which the training procedure tries to make a split, takes more than /// maxCategories values, the precise best subset estimation may take a very long time because the /// algorithm is exponential. Instead, many decision trees engines (including our implementation) /// try to find sub-optimal split in this case by clustering all the samples into maxCategories /// clusters that is some categories are merged together. The clustering is applied only in n \> /// 2-class classification problems for categorical variables with N \> max_categories possible /// values. In case of regression and 2-class classification the optimal split can be found /// efficiently without employing clustering, thus the parameter is not used in these cases. /// Default value is 10. /// ## See also /// setMaxCategories fn get_max_categories(&self) -> Result<i32> { unsafe { sys::cv_ml_DTrees_getMaxCategories_const(self.as_raw_DTrees()) }.into_result() } /// Cluster possible values of a categorical variable into K\<=maxCategories clusters to /// find a suboptimal split. /// If a discrete variable, on which the training procedure tries to make a split, takes more than /// maxCategories values, the precise best subset estimation may take a very long time because the /// algorithm is exponential. Instead, many decision trees engines (including our implementation) /// try to find sub-optimal split in this case by clustering all the samples into maxCategories /// clusters that is some categories are merged together. The clustering is applied only in n \> /// 2-class classification problems for categorical variables with N \> max_categories possible /// values. In case of regression and 2-class classification the optimal split can be found /// efficiently without employing clustering, thus the parameter is not used in these cases. /// Default value is 10. /// ## See also /// setMaxCategories getMaxCategories fn set_max_categories(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_ml_DTrees_setMaxCategories_int(self.as_raw_mut_DTrees(), val) }.into_result() } /// The maximum possible depth of the tree. /// That is the training algorithms attempts to split a node while its depth is less than maxDepth. /// The root node has zero depth. The actual depth may be smaller if the other termination criteria /// are met (see the outline of the training procedure @ref ml_intro_trees "here"), and/or if the /// tree is pruned. Default value is INT_MAX. /// ## See also /// setMaxDepth fn get_max_depth(&self) -> Result<i32> { unsafe { sys::cv_ml_DTrees_getMaxDepth_const(self.as_raw_DTrees()) }.into_result() } /// The maximum possible depth of the tree. /// That is the training algorithms attempts to split a node while its depth is less than maxDepth. /// The root node has zero depth. The actual depth may be smaller if the other termination criteria /// are met (see the outline of the training procedure @ref ml_intro_trees "here"), and/or if the /// tree is pruned. Default value is INT_MAX. /// ## See also /// setMaxDepth getMaxDepth fn set_max_depth(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_ml_DTrees_setMaxDepth_int(self.as_raw_mut_DTrees(), val) }.into_result() } /// If the number of samples in a node is less than this parameter then the node will not be split. /// /// Default value is 10. /// ## See also /// setMinSampleCount fn get_min_sample_count(&self) -> Result<i32> { unsafe { sys::cv_ml_DTrees_getMinSampleCount_const(self.as_raw_DTrees()) }.into_result() } /// If the number of samples in a node is less than this parameter then the node will not be split. /// /// Default value is 10. /// ## See also /// setMinSampleCount getMinSampleCount fn set_min_sample_count(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_ml_DTrees_setMinSampleCount_int(self.as_raw_mut_DTrees(), val) }.into_result() } /// If CVFolds \> 1 then algorithms prunes the built decision tree using K-fold /// cross-validation procedure where K is equal to CVFolds. /// Default value is 10. /// ## See also /// setCVFolds fn get_cv_folds(&self) -> Result<i32> { unsafe { sys::cv_ml_DTrees_getCVFolds_const(self.as_raw_DTrees()) }.into_result() } /// If CVFolds \> 1 then algorithms prunes the built decision tree using K-fold /// cross-validation procedure where K is equal to CVFolds. /// Default value is 10. /// ## See also /// setCVFolds getCVFolds fn set_cv_folds(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_ml_DTrees_setCVFolds_int(self.as_raw_mut_DTrees(), val) }.into_result() } /// If true then surrogate splits will be built. /// These splits allow to work with missing data and compute variable importance correctly. /// Default value is false. /// /// Note: currently it's not implemented. /// ## See also /// setUseSurrogates fn get_use_surrogates(&self) -> Result<bool> { unsafe { sys::cv_ml_DTrees_getUseSurrogates_const(self.as_raw_DTrees()) }.into_result() } /// If true then surrogate splits will be built. /// These splits allow to work with missing data and compute variable importance correctly. /// Default value is false. /// /// Note: currently it's not implemented. /// ## See also /// setUseSurrogates getUseSurrogates fn set_use_surrogates(&mut self, val: bool) -> Result<()> { unsafe { sys::cv_ml_DTrees_setUseSurrogates_bool(self.as_raw_mut_DTrees(), val) }.into_result() } /// If true then a pruning will be harsher. /// This will make a tree more compact and more resistant to the training data noise but a bit less /// accurate. Default value is true. /// ## See also /// setUse1SERule fn get_use1_se_rule(&self) -> Result<bool> { unsafe { sys::cv_ml_DTrees_getUse1SERule_const(self.as_raw_DTrees()) }.into_result() } /// If true then a pruning will be harsher. /// This will make a tree more compact and more resistant to the training data noise but a bit less /// accurate. Default value is true. /// ## See also /// setUse1SERule getUse1SERule fn set_use1_se_rule(&mut self, val: bool) -> Result<()> { unsafe { sys::cv_ml_DTrees_setUse1SERule_bool(self.as_raw_mut_DTrees(), val) }.into_result() } /// If true then pruned branches are physically removed from the tree. /// Otherwise they are retained and it is possible to get results from the original unpruned (or /// pruned less aggressively) tree. Default value is true. /// ## See also /// setTruncatePrunedTree fn get_truncate_pruned_tree(&self) -> Result<bool> { unsafe { sys::cv_ml_DTrees_getTruncatePrunedTree_const(self.as_raw_DTrees()) }.into_result() } /// If true then pruned branches are physically removed from the tree. /// Otherwise they are retained and it is possible to get results from the original unpruned (or /// pruned less aggressively) tree. Default value is true. /// ## See also /// setTruncatePrunedTree getTruncatePrunedTree fn set_truncate_pruned_tree(&mut self, val: bool) -> Result<()> { unsafe { sys::cv_ml_DTrees_setTruncatePrunedTree_bool(self.as_raw_mut_DTrees(), val) }.into_result() } /// Termination criteria for regression trees. /// If all absolute differences between an estimated value in a node and values of train samples /// in this node are less than this parameter then the node will not be split further. Default /// value is 0.01f /// ## See also /// setRegressionAccuracy fn get_regression_accuracy(&self) -> Result<f32> { unsafe { sys::cv_ml_DTrees_getRegressionAccuracy_const(self.as_raw_DTrees()) }.into_result() } /// Termination criteria for regression trees. /// If all absolute differences between an estimated value in a node and values of train samples /// in this node are less than this parameter then the node will not be split further. Default /// value is 0.01f /// ## See also /// setRegressionAccuracy getRegressionAccuracy fn set_regression_accuracy(&mut self, val: f32) -> Result<()> { unsafe { sys::cv_ml_DTrees_setRegressionAccuracy_float(self.as_raw_mut_DTrees(), val) }.into_result() } /// The array of a priori class probabilities, sorted by the class label value. /// /// The parameter can be used to tune the decision tree preferences toward a certain class. For /// example, if you want to detect some rare anomaly occurrence, the training base will likely /// contain much more normal cases than anomalies, so a very good classification performance /// will be achieved just by considering every case as normal. To avoid this, the priors can be /// specified, where the anomaly probability is artificially increased (up to 0.5 or even /// greater), so the weight of the misclassified anomalies becomes much bigger, and the tree is /// adjusted properly. /// /// You can also think about this parameter as weights of prediction categories which determine /// relative weights that you give to misclassification. That is, if the weight of the first /// category is 1 and the weight of the second category is 10, then each mistake in predicting /// the second category is equivalent to making 10 mistakes in predicting the first category. /// Default value is empty Mat. /// ## See also /// setPriors fn get_priors(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_DTrees_getPriors_const(self.as_raw_DTrees()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } /// The array of a priori class probabilities, sorted by the class label value. /// /// The parameter can be used to tune the decision tree preferences toward a certain class. For /// example, if you want to detect some rare anomaly occurrence, the training base will likely /// contain much more normal cases than anomalies, so a very good classification performance /// will be achieved just by considering every case as normal. To avoid this, the priors can be /// specified, where the anomaly probability is artificially increased (up to 0.5 or even /// greater), so the weight of the misclassified anomalies becomes much bigger, and the tree is /// adjusted properly. /// /// You can also think about this parameter as weights of prediction categories which determine /// relative weights that you give to misclassification. That is, if the weight of the first /// category is 1 and the weight of the second category is 10, then each mistake in predicting /// the second category is equivalent to making 10 mistakes in predicting the first category. /// Default value is empty Mat. /// ## See also /// setPriors getPriors fn set_priors(&mut self, val: &core::Mat) -> Result<()> { unsafe { sys::cv_ml_DTrees_setPriors_const_MatR(self.as_raw_mut_DTrees(), val.as_raw_Mat()) }.into_result() } /// Returns indices of root nodes fn get_roots(&self) -> Result<core::Vector::<i32>> { unsafe { sys::cv_ml_DTrees_getRoots_const(self.as_raw_DTrees()) }.into_result().map(|r| unsafe { core::Vector::<i32>::opencv_from_extern(r) } ) } /// Returns all the nodes /// /// all the node indices are indices in the returned vector fn get_nodes(&self) -> Result<core::Vector::<crate::ml::DTrees_Node>> { unsafe { sys::cv_ml_DTrees_getNodes_const(self.as_raw_DTrees()) }.into_result().map(|r| unsafe { core::Vector::<crate::ml::DTrees_Node>::opencv_from_extern(r) } ) } /// Returns all the splits /// /// all the split indices are indices in the returned vector fn get_splits(&self) -> Result<core::Vector::<crate::ml::DTrees_Split>> { unsafe { sys::cv_ml_DTrees_getSplits_const(self.as_raw_DTrees()) }.into_result().map(|r| unsafe { core::Vector::<crate::ml::DTrees_Split>::opencv_from_extern(r) } ) } /// Returns all the bitsets for categorical splits /// /// Split::subsetOfs is an offset in the returned vector fn get_subsets(&self) -> Result<core::Vector::<i32>> { unsafe { sys::cv_ml_DTrees_getSubsets_const(self.as_raw_DTrees()) }.into_result().map(|r| unsafe { core::Vector::<i32>::opencv_from_extern(r) } ) } } impl dyn DTrees + '_ { /// Creates the empty model /// /// The static method creates empty decision tree with the specified parameters. It should be then /// trained using train method (see StatModel::train). Alternatively, you can load the model from /// file using Algorithm::load\<DTrees\>(filename). pub fn create() -> Result<core::Ptr::<dyn crate::ml::DTrees>> { unsafe { sys::cv_ml_DTrees_create() }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::ml::DTrees>::opencv_from_extern(r) } ) } /// Loads and creates a serialized DTrees from a file /// /// Use DTree::save to serialize and store an DTree to disk. /// Load the DTree from this file again, by calling this function with the path to the file. /// Optionally specify the node for the file containing the classifier /// /// ## Parameters /// * filepath: path to serialized DTree /// * nodeName: name of node containing the classifier /// /// ## C++ default parameters /// * node_name: String() pub fn load(filepath: &str, node_name: &str) -> Result<core::Ptr::<dyn crate::ml::DTrees>> { extern_container_arg!(filepath); extern_container_arg!(node_name); unsafe { sys::cv_ml_DTrees_load_const_StringR_const_StringR(filepath.opencv_as_extern(), node_name.opencv_as_extern()) }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::ml::DTrees>::opencv_from_extern(r) } ) } } /// The class represents a decision tree node. pub trait DTrees_NodeTrait { fn as_raw_DTrees_Node(&self) -> *const c_void; fn as_raw_mut_DTrees_Node(&mut self) -> *mut c_void; /// Value at the node: a class label in case of classification or estimated /// function value in case of regression. fn value(&self) -> f64 { unsafe { sys::cv_ml_DTrees_Node_getPropValue_const(self.as_raw_DTrees_Node()) }.into_result().expect("Infallible function failed: value") } /// Value at the node: a class label in case of classification or estimated /// function value in case of regression. fn set_value(&mut self, val: f64) -> () { unsafe { sys::cv_ml_DTrees_Node_setPropValue_double(self.as_raw_mut_DTrees_Node(), val) }.into_result().expect("Infallible function failed: set_value") } /// Class index normalized to 0..class_count-1 range and assigned to the /// node. It is used internally in classification trees and tree ensembles. fn class_idx(&self) -> i32 { unsafe { sys::cv_ml_DTrees_Node_getPropClassIdx_const(self.as_raw_DTrees_Node()) }.into_result().expect("Infallible function failed: class_idx") } /// Class index normalized to 0..class_count-1 range and assigned to the /// node. It is used internally in classification trees and tree ensembles. fn set_class_idx(&mut self, val: i32) -> () { unsafe { sys::cv_ml_DTrees_Node_setPropClassIdx_int(self.as_raw_mut_DTrees_Node(), val) }.into_result().expect("Infallible function failed: set_class_idx") } /// Index of the parent node fn parent(&self) -> i32 { unsafe { sys::cv_ml_DTrees_Node_getPropParent_const(self.as_raw_DTrees_Node()) }.into_result().expect("Infallible function failed: parent") } /// Index of the parent node fn set_parent(&mut self, val: i32) -> () { unsafe { sys::cv_ml_DTrees_Node_setPropParent_int(self.as_raw_mut_DTrees_Node(), val) }.into_result().expect("Infallible function failed: set_parent") } /// Index of the left child node fn left(&self) -> i32 { unsafe { sys::cv_ml_DTrees_Node_getPropLeft_const(self.as_raw_DTrees_Node()) }.into_result().expect("Infallible function failed: left") } /// Index of the left child node fn set_left(&mut self, val: i32) -> () { unsafe { sys::cv_ml_DTrees_Node_setPropLeft_int(self.as_raw_mut_DTrees_Node(), val) }.into_result().expect("Infallible function failed: set_left") } /// Index of right child node fn right(&self) -> i32 { unsafe { sys::cv_ml_DTrees_Node_getPropRight_const(self.as_raw_DTrees_Node()) }.into_result().expect("Infallible function failed: right") } /// Index of right child node fn set_right(&mut self, val: i32) -> () { unsafe { sys::cv_ml_DTrees_Node_setPropRight_int(self.as_raw_mut_DTrees_Node(), val) }.into_result().expect("Infallible function failed: set_right") } /// Default direction where to go (-1: left or +1: right). It helps in the /// case of missing values. fn default_dir(&self) -> i32 { unsafe { sys::cv_ml_DTrees_Node_getPropDefaultDir_const(self.as_raw_DTrees_Node()) }.into_result().expect("Infallible function failed: default_dir") } /// Default direction where to go (-1: left or +1: right). It helps in the /// case of missing values. fn set_default_dir(&mut self, val: i32) -> () { unsafe { sys::cv_ml_DTrees_Node_setPropDefaultDir_int(self.as_raw_mut_DTrees_Node(), val) }.into_result().expect("Infallible function failed: set_default_dir") } /// Index of the first split fn split(&self) -> i32 { unsafe { sys::cv_ml_DTrees_Node_getPropSplit_const(self.as_raw_DTrees_Node()) }.into_result().expect("Infallible function failed: split") } /// Index of the first split fn set_split(&mut self, val: i32) -> () { unsafe { sys::cv_ml_DTrees_Node_setPropSplit_int(self.as_raw_mut_DTrees_Node(), val) }.into_result().expect("Infallible function failed: set_split") } } /// The class represents a decision tree node. pub struct DTrees_Node { ptr: *mut c_void } opencv_type_boxed! { DTrees_Node } impl Drop for DTrees_Node { fn drop(&mut self) { extern "C" { fn cv_DTrees_Node_delete(instance: *mut c_void); } unsafe { cv_DTrees_Node_delete(self.as_raw_mut_DTrees_Node()) }; } } impl DTrees_Node { #[inline] pub fn as_raw_DTrees_Node(&self) -> *const c_void { self.as_raw() } #[inline] pub fn as_raw_mut_DTrees_Node(&mut self) -> *mut c_void { self.as_raw_mut() } } unsafe impl Send for DTrees_Node {} impl crate::ml::DTrees_NodeTrait for DTrees_Node { #[inline] fn as_raw_DTrees_Node(&self) -> *const c_void { self.as_raw() } #[inline] fn as_raw_mut_DTrees_Node(&mut self) -> *mut c_void { self.as_raw_mut() } } impl DTrees_Node { pub fn default() -> Result<crate::ml::DTrees_Node> { unsafe { sys::cv_ml_DTrees_Node_Node() }.into_result().map(|r| unsafe { crate::ml::DTrees_Node::opencv_from_extern(r) } ) } } /// The class represents split in a decision tree. pub trait DTrees_SplitTrait { fn as_raw_DTrees_Split(&self) -> *const c_void; fn as_raw_mut_DTrees_Split(&mut self) -> *mut c_void; /// Index of variable on which the split is created. fn var_idx(&self) -> i32 { unsafe { sys::cv_ml_DTrees_Split_getPropVarIdx_const(self.as_raw_DTrees_Split()) }.into_result().expect("Infallible function failed: var_idx") } /// Index of variable on which the split is created. fn set_var_idx(&mut self, val: i32) -> () { unsafe { sys::cv_ml_DTrees_Split_setPropVarIdx_int(self.as_raw_mut_DTrees_Split(), val) }.into_result().expect("Infallible function failed: set_var_idx") } /// If true, then the inverse split rule is used (i.e. left and right /// branches are exchanged in the rule expressions below). fn inversed(&self) -> bool { unsafe { sys::cv_ml_DTrees_Split_getPropInversed_const(self.as_raw_DTrees_Split()) }.into_result().expect("Infallible function failed: inversed") } /// If true, then the inverse split rule is used (i.e. left and right /// branches are exchanged in the rule expressions below). fn set_inversed(&mut self, val: bool) -> () { unsafe { sys::cv_ml_DTrees_Split_setPropInversed_bool(self.as_raw_mut_DTrees_Split(), val) }.into_result().expect("Infallible function failed: set_inversed") } /// The split quality, a positive number. It is used to choose the best split. fn quality(&self) -> f32 { unsafe { sys::cv_ml_DTrees_Split_getPropQuality_const(self.as_raw_DTrees_Split()) }.into_result().expect("Infallible function failed: quality") } /// The split quality, a positive number. It is used to choose the best split. fn set_quality(&mut self, val: f32) -> () { unsafe { sys::cv_ml_DTrees_Split_setPropQuality_float(self.as_raw_mut_DTrees_Split(), val) }.into_result().expect("Infallible function failed: set_quality") } /// Index of the next split in the list of splits for the node fn next(&self) -> i32 { unsafe { sys::cv_ml_DTrees_Split_getPropNext_const(self.as_raw_DTrees_Split()) }.into_result().expect("Infallible function failed: next") } /// Index of the next split in the list of splits for the node fn set_next(&mut self, val: i32) -> () { unsafe { sys::cv_ml_DTrees_Split_setPropNext_int(self.as_raw_mut_DTrees_Split(), val) }.into_result().expect("Infallible function failed: set_next") } /// < The threshold value in case of split on an ordered variable. /// The rule is: /// ```ignore /// if var_value < c /// then next_node <- left /// else next_node <- right /// ``` /// fn c(&self) -> f32 { unsafe { sys::cv_ml_DTrees_Split_getPropC_const(self.as_raw_DTrees_Split()) }.into_result().expect("Infallible function failed: c") } /// < The threshold value in case of split on an ordered variable. /// The rule is: /// ```ignore /// if var_value < c /// then next_node <- left /// else next_node <- right /// ``` /// fn set_c(&mut self, val: f32) -> () { unsafe { sys::cv_ml_DTrees_Split_setPropC_float(self.as_raw_mut_DTrees_Split(), val) }.into_result().expect("Infallible function failed: set_c") } /// < Offset of the bitset used by the split on a categorical variable. /// The rule is: /// ```ignore /// if bitset[var_value] == 1 /// then next_node <- left /// else next_node <- right /// ``` /// fn subset_ofs(&self) -> i32 { unsafe { sys::cv_ml_DTrees_Split_getPropSubsetOfs_const(self.as_raw_DTrees_Split()) }.into_result().expect("Infallible function failed: subset_ofs") } /// < Offset of the bitset used by the split on a categorical variable. /// The rule is: /// ```ignore /// if bitset[var_value] == 1 /// then next_node <- left /// else next_node <- right /// ``` /// fn set_subset_ofs(&mut self, val: i32) -> () { unsafe { sys::cv_ml_DTrees_Split_setPropSubsetOfs_int(self.as_raw_mut_DTrees_Split(), val) }.into_result().expect("Infallible function failed: set_subset_ofs") } } /// The class represents split in a decision tree. pub struct DTrees_Split { ptr: *mut c_void } opencv_type_boxed! { DTrees_Split } impl Drop for DTrees_Split { fn drop(&mut self) { extern "C" { fn cv_DTrees_Split_delete(instance: *mut c_void); } unsafe { cv_DTrees_Split_delete(self.as_raw_mut_DTrees_Split()) }; } } impl DTrees_Split { #[inline] pub fn as_raw_DTrees_Split(&self) -> *const c_void { self.as_raw() } #[inline] pub fn as_raw_mut_DTrees_Split(&mut self) -> *mut c_void { self.as_raw_mut() } } unsafe impl Send for DTrees_Split {} impl crate::ml::DTrees_SplitTrait for DTrees_Split { #[inline] fn as_raw_DTrees_Split(&self) -> *const c_void { self.as_raw() } #[inline] fn as_raw_mut_DTrees_Split(&mut self) -> *mut c_void { self.as_raw_mut() } } impl DTrees_Split { pub fn default() -> Result<crate::ml::DTrees_Split> { unsafe { sys::cv_ml_DTrees_Split_Split() }.into_result().map(|r| unsafe { crate::ml::DTrees_Split::opencv_from_extern(r) } ) } } /// The class implements the Expectation Maximization algorithm. /// ## See also /// @ref ml_intro_em pub trait EM: crate::ml::StatModel { fn as_raw_EM(&self) -> *const c_void; fn as_raw_mut_EM(&mut self) -> *mut c_void; /// The number of mixture components in the Gaussian mixture model. /// Default value of the parameter is EM::DEFAULT_NCLUSTERS=5. Some of %EM implementation could /// determine the optimal number of mixtures within a specified value range, but that is not the /// case in ML yet. /// ## See also /// setClustersNumber fn get_clusters_number(&self) -> Result<i32> { unsafe { sys::cv_ml_EM_getClustersNumber_const(self.as_raw_EM()) }.into_result() } /// The number of mixture components in the Gaussian mixture model. /// Default value of the parameter is EM::DEFAULT_NCLUSTERS=5. Some of %EM implementation could /// determine the optimal number of mixtures within a specified value range, but that is not the /// case in ML yet. /// ## See also /// setClustersNumber getClustersNumber fn set_clusters_number(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_ml_EM_setClustersNumber_int(self.as_raw_mut_EM(), val) }.into_result() } /// Constraint on covariance matrices which defines type of matrices. /// See EM::Types. /// ## See also /// setCovarianceMatrixType fn get_covariance_matrix_type(&self) -> Result<i32> { unsafe { sys::cv_ml_EM_getCovarianceMatrixType_const(self.as_raw_EM()) }.into_result() } /// Constraint on covariance matrices which defines type of matrices. /// See EM::Types. /// ## See also /// setCovarianceMatrixType getCovarianceMatrixType fn set_covariance_matrix_type(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_ml_EM_setCovarianceMatrixType_int(self.as_raw_mut_EM(), val) }.into_result() } /// The termination criteria of the %EM algorithm. /// The %EM algorithm can be terminated by the number of iterations termCrit.maxCount (number of /// M-steps) or when relative change of likelihood logarithm is less than termCrit.epsilon. Default /// maximum number of iterations is EM::DEFAULT_MAX_ITERS=100. /// ## See also /// setTermCriteria fn get_term_criteria(&self) -> Result<core::TermCriteria> { unsafe { sys::cv_ml_EM_getTermCriteria_const(self.as_raw_EM()) }.into_result() } /// The termination criteria of the %EM algorithm. /// The %EM algorithm can be terminated by the number of iterations termCrit.maxCount (number of /// M-steps) or when relative change of likelihood logarithm is less than termCrit.epsilon. Default /// maximum number of iterations is EM::DEFAULT_MAX_ITERS=100. /// ## See also /// setTermCriteria getTermCriteria fn set_term_criteria(&mut self, val: core::TermCriteria) -> Result<()> { unsafe { sys::cv_ml_EM_setTermCriteria_const_TermCriteriaR(self.as_raw_mut_EM(), &val) }.into_result() } /// Returns weights of the mixtures /// /// Returns vector with the number of elements equal to the number of mixtures. fn get_weights(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_EM_getWeights_const(self.as_raw_EM()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } /// Returns the cluster centers (means of the Gaussian mixture) /// /// Returns matrix with the number of rows equal to the number of mixtures and number of columns /// equal to the space dimensionality. fn get_means(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_EM_getMeans_const(self.as_raw_EM()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } /// Returns covariation matrices /// /// Returns vector of covariation matrices. Number of matrices is the number of gaussian mixtures, /// each matrix is a square floating-point matrix NxN, where N is the space dimensionality. fn get_covs(&self, covs: &mut core::Vector::<core::Mat>) -> Result<()> { unsafe { sys::cv_ml_EM_getCovs_const_vector_Mat_R(self.as_raw_EM(), covs.as_raw_mut_VectorOfMat()) }.into_result() } /// Returns posterior probabilities for the provided samples /// /// ## Parameters /// * samples: The input samples, floating-point matrix /// * results: The optional output ![inline formula](https://latex.codecogs.com/png.latex?%20nSamples%20%5Ctimes%20nClusters) matrix of results. It contains /// posterior probabilities for each sample from the input /// * flags: This parameter will be ignored /// /// ## C++ default parameters /// * results: noArray() /// * flags: 0 fn predict(&self, samples: &dyn core::ToInputArray, results: &mut dyn core::ToOutputArray, flags: i32) -> Result<f32> { input_array_arg!(samples); output_array_arg!(results); unsafe { sys::cv_ml_EM_predict_const_const__InputArrayR_const__OutputArrayR_int(self.as_raw_EM(), samples.as_raw__InputArray(), results.as_raw__OutputArray(), flags) }.into_result() } /// Returns a likelihood logarithm value and an index of the most probable mixture component /// for the given sample. /// /// ## Parameters /// * sample: A sample for classification. It should be a one-channel matrix of /// ![inline formula](https://latex.codecogs.com/png.latex?1%20%5Ctimes%20dims) or ![inline formula](https://latex.codecogs.com/png.latex?dims%20%5Ctimes%201) size. /// * probs: Optional output matrix that contains posterior probabilities of each component /// given the sample. It has ![inline formula](https://latex.codecogs.com/png.latex?1%20%5Ctimes%20nclusters) size and CV_64FC1 type. /// /// The method returns a two-element double vector. Zero element is a likelihood logarithm value for /// the sample. First element is an index of the most probable mixture component for the given /// sample. fn predict2(&self, sample: &dyn core::ToInputArray, probs: &mut dyn core::ToOutputArray) -> Result<core::Vec2d> { input_array_arg!(sample); output_array_arg!(probs); unsafe { sys::cv_ml_EM_predict2_const_const__InputArrayR_const__OutputArrayR(self.as_raw_EM(), sample.as_raw__InputArray(), probs.as_raw__OutputArray()) }.into_result() } /// Estimate the Gaussian mixture parameters from a samples set. /// /// This variation starts with Expectation step. Initial values of the model parameters will be /// estimated by the k-means algorithm. /// /// Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take /// responses (class labels or function values) as input. Instead, it computes the *Maximum /// Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the /// parameters inside the structure: ![inline formula](https://latex.codecogs.com/png.latex?p%5F%7Bi%2Ck%7D) in probs, ![inline formula](https://latex.codecogs.com/png.latex?a%5Fk) in means , ![inline formula](https://latex.codecogs.com/png.latex?S%5Fk) in /// covs[k], ![inline formula](https://latex.codecogs.com/png.latex?%5Cpi%5Fk) in weights , and optionally computes the output "class label" for each /// sample: ![inline formula](https://latex.codecogs.com/png.latex?%5Ctexttt%7Blabels%7D%5Fi%3D%5Ctexttt%7Barg%20max%7D%5Fk%28p%5F%7Bi%2Ck%7D%29%2C%20i%3D1%2E%2EN) (indices of the most /// probable mixture component for each sample). /// /// The trained model can be used further for prediction, just like any other classifier. The /// trained model is similar to the NormalBayesClassifier. /// /// ## Parameters /// * samples: Samples from which the Gaussian mixture model will be estimated. It should be a /// one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type /// it will be converted to the inner matrix of such type for the further computing. /// * logLikelihoods: The optional output matrix that contains a likelihood logarithm value for /// each sample. It has ![inline formula](https://latex.codecogs.com/png.latex?nsamples%20%5Ctimes%201) size and CV_64FC1 type. /// * labels: The optional output "class label" for each sample: /// ![inline formula](https://latex.codecogs.com/png.latex?%5Ctexttt%7Blabels%7D%5Fi%3D%5Ctexttt%7Barg%20max%7D%5Fk%28p%5F%7Bi%2Ck%7D%29%2C%20i%3D1%2E%2EN) (indices of the most probable /// mixture component for each sample). It has ![inline formula](https://latex.codecogs.com/png.latex?nsamples%20%5Ctimes%201) size and CV_32SC1 type. /// * probs: The optional output matrix that contains posterior probabilities of each Gaussian /// mixture component given the each sample. It has ![inline formula](https://latex.codecogs.com/png.latex?nsamples%20%5Ctimes%20nclusters) size and /// CV_64FC1 type. /// /// ## C++ default parameters /// * log_likelihoods: noArray() /// * labels: noArray() /// * probs: noArray() fn train_em(&mut self, samples: &dyn core::ToInputArray, log_likelihoods: &mut dyn core::ToOutputArray, labels: &mut dyn core::ToOutputArray, probs: &mut dyn core::ToOutputArray) -> Result<bool> { input_array_arg!(samples); output_array_arg!(log_likelihoods); output_array_arg!(labels); output_array_arg!(probs); unsafe { sys::cv_ml_EM_trainEM_const__InputArrayR_const__OutputArrayR_const__OutputArrayR_const__OutputArrayR(self.as_raw_mut_EM(), samples.as_raw__InputArray(), log_likelihoods.as_raw__OutputArray(), labels.as_raw__OutputArray(), probs.as_raw__OutputArray()) }.into_result() } /// Estimate the Gaussian mixture parameters from a samples set. /// /// This variation starts with Expectation step. You need to provide initial means ![inline formula](https://latex.codecogs.com/png.latex?a%5Fk) of /// mixture components. Optionally you can pass initial weights ![inline formula](https://latex.codecogs.com/png.latex?%5Cpi%5Fk) and covariance matrices /// ![inline formula](https://latex.codecogs.com/png.latex?S%5Fk) of mixture components. /// /// ## Parameters /// * samples: Samples from which the Gaussian mixture model will be estimated. It should be a /// one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type /// it will be converted to the inner matrix of such type for the further computing. /// * means0: Initial means ![inline formula](https://latex.codecogs.com/png.latex?a%5Fk) of mixture components. It is a one-channel matrix of /// ![inline formula](https://latex.codecogs.com/png.latex?nclusters%20%5Ctimes%20dims) size. If the matrix does not have CV_64F type it will be /// converted to the inner matrix of such type for the further computing. /// * covs0: The vector of initial covariance matrices ![inline formula](https://latex.codecogs.com/png.latex?S%5Fk) of mixture components. Each of /// covariance matrices is a one-channel matrix of ![inline formula](https://latex.codecogs.com/png.latex?dims%20%5Ctimes%20dims) size. If the matrices /// do not have CV_64F type they will be converted to the inner matrices of such type for the /// further computing. /// * weights0: Initial weights ![inline formula](https://latex.codecogs.com/png.latex?%5Cpi%5Fk) of mixture components. It should be a one-channel /// floating-point matrix with ![inline formula](https://latex.codecogs.com/png.latex?1%20%5Ctimes%20nclusters) or ![inline formula](https://latex.codecogs.com/png.latex?nclusters%20%5Ctimes%201) size. /// * logLikelihoods: The optional output matrix that contains a likelihood logarithm value for /// each sample. It has ![inline formula](https://latex.codecogs.com/png.latex?nsamples%20%5Ctimes%201) size and CV_64FC1 type. /// * labels: The optional output "class label" for each sample: /// ![inline formula](https://latex.codecogs.com/png.latex?%5Ctexttt%7Blabels%7D%5Fi%3D%5Ctexttt%7Barg%20max%7D%5Fk%28p%5F%7Bi%2Ck%7D%29%2C%20i%3D1%2E%2EN) (indices of the most probable /// mixture component for each sample). It has ![inline formula](https://latex.codecogs.com/png.latex?nsamples%20%5Ctimes%201) size and CV_32SC1 type. /// * probs: The optional output matrix that contains posterior probabilities of each Gaussian /// mixture component given the each sample. It has ![inline formula](https://latex.codecogs.com/png.latex?nsamples%20%5Ctimes%20nclusters) size and /// CV_64FC1 type. /// /// ## C++ default parameters /// * covs0: noArray() /// * weights0: noArray() /// * log_likelihoods: noArray() /// * labels: noArray() /// * probs: noArray() fn train_e(&mut self, samples: &dyn core::ToInputArray, means0: &dyn core::ToInputArray, covs0: &dyn core::ToInputArray, weights0: &dyn core::ToInputArray, log_likelihoods: &mut dyn core::ToOutputArray, labels: &mut dyn core::ToOutputArray, probs: &mut dyn core::ToOutputArray) -> Result<bool> { input_array_arg!(samples); input_array_arg!(means0); input_array_arg!(covs0); input_array_arg!(weights0); output_array_arg!(log_likelihoods); output_array_arg!(labels); output_array_arg!(probs); unsafe { sys::cv_ml_EM_trainE_const__InputArrayR_const__InputArrayR_const__InputArrayR_const__InputArrayR_const__OutputArrayR_const__OutputArrayR_const__OutputArrayR(self.as_raw_mut_EM(), samples.as_raw__InputArray(), means0.as_raw__InputArray(), covs0.as_raw__InputArray(), weights0.as_raw__InputArray(), log_likelihoods.as_raw__OutputArray(), labels.as_raw__OutputArray(), probs.as_raw__OutputArray()) }.into_result() } /// Estimate the Gaussian mixture parameters from a samples set. /// /// This variation starts with Maximization step. You need to provide initial probabilities /// ![inline formula](https://latex.codecogs.com/png.latex?p%5F%7Bi%2Ck%7D) to use this option. /// /// ## Parameters /// * samples: Samples from which the Gaussian mixture model will be estimated. It should be a /// one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type /// it will be converted to the inner matrix of such type for the further computing. /// * probs0: the probabilities /// * logLikelihoods: The optional output matrix that contains a likelihood logarithm value for /// each sample. It has ![inline formula](https://latex.codecogs.com/png.latex?nsamples%20%5Ctimes%201) size and CV_64FC1 type. /// * labels: The optional output "class label" for each sample: /// ![inline formula](https://latex.codecogs.com/png.latex?%5Ctexttt%7Blabels%7D%5Fi%3D%5Ctexttt%7Barg%20max%7D%5Fk%28p%5F%7Bi%2Ck%7D%29%2C%20i%3D1%2E%2EN) (indices of the most probable /// mixture component for each sample). It has ![inline formula](https://latex.codecogs.com/png.latex?nsamples%20%5Ctimes%201) size and CV_32SC1 type. /// * probs: The optional output matrix that contains posterior probabilities of each Gaussian /// mixture component given the each sample. It has ![inline formula](https://latex.codecogs.com/png.latex?nsamples%20%5Ctimes%20nclusters) size and /// CV_64FC1 type. /// /// ## C++ default parameters /// * log_likelihoods: noArray() /// * labels: noArray() /// * probs: noArray() fn train_m(&mut self, samples: &dyn core::ToInputArray, probs0: &dyn core::ToInputArray, log_likelihoods: &mut dyn core::ToOutputArray, labels: &mut dyn core::ToOutputArray, probs: &mut dyn core::ToOutputArray) -> Result<bool> { input_array_arg!(samples); input_array_arg!(probs0); output_array_arg!(log_likelihoods); output_array_arg!(labels); output_array_arg!(probs); unsafe { sys::cv_ml_EM_trainM_const__InputArrayR_const__InputArrayR_const__OutputArrayR_const__OutputArrayR_const__OutputArrayR(self.as_raw_mut_EM(), samples.as_raw__InputArray(), probs0.as_raw__InputArray(), log_likelihoods.as_raw__OutputArray(), labels.as_raw__OutputArray(), probs.as_raw__OutputArray()) }.into_result() } } impl dyn EM + '_ { /// Creates empty %EM model. /// The model should be trained then using StatModel::train(traindata, flags) method. Alternatively, you /// can use one of the EM::train\* methods or load it from file using Algorithm::load\<EM\>(filename). pub fn create() -> Result<core::Ptr::<dyn crate::ml::EM>> { unsafe { sys::cv_ml_EM_create() }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::ml::EM>::opencv_from_extern(r) } ) } /// Loads and creates a serialized EM from a file /// /// Use EM::save to serialize and store an EM to disk. /// Load the EM from this file again, by calling this function with the path to the file. /// Optionally specify the node for the file containing the classifier /// /// ## Parameters /// * filepath: path to serialized EM /// * nodeName: name of node containing the classifier /// /// ## C++ default parameters /// * node_name: String() pub fn load(filepath: &str, node_name: &str) -> Result<core::Ptr::<dyn crate::ml::EM>> { extern_container_arg!(filepath); extern_container_arg!(node_name); unsafe { sys::cv_ml_EM_load_const_StringR_const_StringR(filepath.opencv_as_extern(), node_name.opencv_as_extern()) }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::ml::EM>::opencv_from_extern(r) } ) } } /// The class implements K-Nearest Neighbors model /// ## See also /// @ref ml_intro_knn pub trait KNearest: crate::ml::StatModel { fn as_raw_KNearest(&self) -> *const c_void; fn as_raw_mut_KNearest(&mut self) -> *mut c_void; /// Default number of neighbors to use in predict method. /// ## See also /// setDefaultK fn get_default_k(&self) -> Result<i32> { unsafe { sys::cv_ml_KNearest_getDefaultK_const(self.as_raw_KNearest()) }.into_result() } /// Default number of neighbors to use in predict method. /// ## See also /// setDefaultK getDefaultK fn set_default_k(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_ml_KNearest_setDefaultK_int(self.as_raw_mut_KNearest(), val) }.into_result() } /// Whether classification or regression model should be trained. /// ## See also /// setIsClassifier fn get_is_classifier(&self) -> Result<bool> { unsafe { sys::cv_ml_KNearest_getIsClassifier_const(self.as_raw_KNearest()) }.into_result() } /// Whether classification or regression model should be trained. /// ## See also /// setIsClassifier getIsClassifier fn set_is_classifier(&mut self, val: bool) -> Result<()> { unsafe { sys::cv_ml_KNearest_setIsClassifier_bool(self.as_raw_mut_KNearest(), val) }.into_result() } /// Parameter for KDTree implementation. /// ## See also /// setEmax fn get_emax(&self) -> Result<i32> { unsafe { sys::cv_ml_KNearest_getEmax_const(self.as_raw_KNearest()) }.into_result() } /// Parameter for KDTree implementation. /// ## See also /// setEmax getEmax fn set_emax(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_ml_KNearest_setEmax_int(self.as_raw_mut_KNearest(), val) }.into_result() } /// %Algorithm type, one of KNearest::Types. /// ## See also /// setAlgorithmType fn get_algorithm_type(&self) -> Result<i32> { unsafe { sys::cv_ml_KNearest_getAlgorithmType_const(self.as_raw_KNearest()) }.into_result() } /// %Algorithm type, one of KNearest::Types. /// ## See also /// setAlgorithmType getAlgorithmType fn set_algorithm_type(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_ml_KNearest_setAlgorithmType_int(self.as_raw_mut_KNearest(), val) }.into_result() } /// Finds the neighbors and predicts responses for input vectors. /// /// ## Parameters /// * samples: Input samples stored by rows. It is a single-precision floating-point matrix of /// `<number_of_samples> * k` size. /// * k: Number of used nearest neighbors. Should be greater than 1. /// * results: Vector with results of prediction (regression or classification) for each input /// sample. It is a single-precision floating-point vector with `<number_of_samples>` elements. /// * neighborResponses: Optional output values for corresponding neighbors. It is a single- /// precision floating-point matrix of `<number_of_samples> * k` size. /// * dist: Optional output distances from the input vectors to the corresponding neighbors. It /// is a single-precision floating-point matrix of `<number_of_samples> * k` size. /// /// For each input vector (a row of the matrix samples), the method finds the k nearest neighbors. /// In case of regression, the predicted result is a mean value of the particular vector's neighbor /// responses. In case of classification, the class is determined by voting. /// /// For each input vector, the neighbors are sorted by their distances to the vector. /// /// In case of C++ interface you can use output pointers to empty matrices and the function will /// allocate memory itself. /// /// If only a single input vector is passed, all output matrices are optional and the predicted /// value is returned by the method. /// /// The function is parallelized with the TBB library. /// /// ## C++ default parameters /// * neighbor_responses: noArray() /// * dist: noArray() fn find_nearest(&self, samples: &dyn core::ToInputArray, k: i32, results: &mut dyn core::ToOutputArray, neighbor_responses: &mut dyn core::ToOutputArray, dist: &mut dyn core::ToOutputArray) -> Result<f32> { input_array_arg!(samples); output_array_arg!(results); output_array_arg!(neighbor_responses); output_array_arg!(dist); unsafe { sys::cv_ml_KNearest_findNearest_const_const__InputArrayR_int_const__OutputArrayR_const__OutputArrayR_const__OutputArrayR(self.as_raw_KNearest(), samples.as_raw__InputArray(), k, results.as_raw__OutputArray(), neighbor_responses.as_raw__OutputArray(), dist.as_raw__OutputArray()) }.into_result() } } impl dyn KNearest + '_ { /// Creates the empty model /// /// The static method creates empty %KNearest classifier. It should be then trained using StatModel::train method. pub fn create() -> Result<core::Ptr::<dyn crate::ml::KNearest>> { unsafe { sys::cv_ml_KNearest_create() }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::ml::KNearest>::opencv_from_extern(r) } ) } /// Loads and creates a serialized knearest from a file /// /// Use KNearest::save to serialize and store an KNearest to disk. /// Load the KNearest from this file again, by calling this function with the path to the file. /// /// ## Parameters /// * filepath: path to serialized KNearest pub fn load(filepath: &str) -> Result<core::Ptr::<dyn crate::ml::KNearest>> { extern_container_arg!(filepath); unsafe { sys::cv_ml_KNearest_load_const_StringR(filepath.opencv_as_extern()) }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::ml::KNearest>::opencv_from_extern(r) } ) } } /// Implements Logistic Regression classifier. /// ## See also /// @ref ml_intro_lr pub trait LogisticRegression: crate::ml::StatModel { fn as_raw_LogisticRegression(&self) -> *const c_void; fn as_raw_mut_LogisticRegression(&mut self) -> *mut c_void; /// Learning rate. /// ## See also /// setLearningRate fn get_learning_rate(&self) -> Result<f64> { unsafe { sys::cv_ml_LogisticRegression_getLearningRate_const(self.as_raw_LogisticRegression()) }.into_result() } /// Learning rate. /// ## See also /// setLearningRate getLearningRate fn set_learning_rate(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_ml_LogisticRegression_setLearningRate_double(self.as_raw_mut_LogisticRegression(), val) }.into_result() } /// Number of iterations. /// ## See also /// setIterations fn get_iterations(&self) -> Result<i32> { unsafe { sys::cv_ml_LogisticRegression_getIterations_const(self.as_raw_LogisticRegression()) }.into_result() } /// Number of iterations. /// ## See also /// setIterations getIterations fn set_iterations(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_ml_LogisticRegression_setIterations_int(self.as_raw_mut_LogisticRegression(), val) }.into_result() } /// Kind of regularization to be applied. See LogisticRegression::RegKinds. /// ## See also /// setRegularization fn get_regularization(&self) -> Result<i32> { unsafe { sys::cv_ml_LogisticRegression_getRegularization_const(self.as_raw_LogisticRegression()) }.into_result() } /// Kind of regularization to be applied. See LogisticRegression::RegKinds. /// ## See also /// setRegularization getRegularization fn set_regularization(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_ml_LogisticRegression_setRegularization_int(self.as_raw_mut_LogisticRegression(), val) }.into_result() } /// Kind of training method used. See LogisticRegression::Methods. /// ## See also /// setTrainMethod fn get_train_method(&self) -> Result<i32> { unsafe { sys::cv_ml_LogisticRegression_getTrainMethod_const(self.as_raw_LogisticRegression()) }.into_result() } /// Kind of training method used. See LogisticRegression::Methods. /// ## See also /// setTrainMethod getTrainMethod fn set_train_method(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_ml_LogisticRegression_setTrainMethod_int(self.as_raw_mut_LogisticRegression(), val) }.into_result() } /// Specifies the number of training samples taken in each step of Mini-Batch Gradient /// Descent. Will only be used if using LogisticRegression::MINI_BATCH training algorithm. It /// has to take values less than the total number of training samples. /// ## See also /// setMiniBatchSize fn get_mini_batch_size(&self) -> Result<i32> { unsafe { sys::cv_ml_LogisticRegression_getMiniBatchSize_const(self.as_raw_LogisticRegression()) }.into_result() } /// Specifies the number of training samples taken in each step of Mini-Batch Gradient /// Descent. Will only be used if using LogisticRegression::MINI_BATCH training algorithm. It /// has to take values less than the total number of training samples. /// ## See also /// setMiniBatchSize getMiniBatchSize fn set_mini_batch_size(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_ml_LogisticRegression_setMiniBatchSize_int(self.as_raw_mut_LogisticRegression(), val) }.into_result() } /// Termination criteria of the algorithm. /// ## See also /// setTermCriteria fn get_term_criteria(&self) -> Result<core::TermCriteria> { unsafe { sys::cv_ml_LogisticRegression_getTermCriteria_const(self.as_raw_LogisticRegression()) }.into_result() } /// Termination criteria of the algorithm. /// ## See also /// setTermCriteria getTermCriteria fn set_term_criteria(&mut self, val: core::TermCriteria) -> Result<()> { unsafe { sys::cv_ml_LogisticRegression_setTermCriteria_TermCriteria(self.as_raw_mut_LogisticRegression(), val.opencv_as_extern()) }.into_result() } /// Predicts responses for input samples and returns a float type. /// /// ## Parameters /// * samples: The input data for the prediction algorithm. Matrix [m x n], where each row /// contains variables (features) of one object being classified. Should have data type CV_32F. /// * results: Predicted labels as a column matrix of type CV_32S. /// * flags: Not used. /// /// ## C++ default parameters /// * results: noArray() /// * flags: 0 fn predict(&self, samples: &dyn core::ToInputArray, results: &mut dyn core::ToOutputArray, flags: i32) -> Result<f32> { input_array_arg!(samples); output_array_arg!(results); unsafe { sys::cv_ml_LogisticRegression_predict_const_const__InputArrayR_const__OutputArrayR_int(self.as_raw_LogisticRegression(), samples.as_raw__InputArray(), results.as_raw__OutputArray(), flags) }.into_result() } /// This function returns the trained parameters arranged across rows. /// /// For a two class classification problem, it returns a row matrix. It returns learnt parameters of /// the Logistic Regression as a matrix of type CV_32F. fn get_learnt_thetas(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_LogisticRegression_get_learnt_thetas_const(self.as_raw_LogisticRegression()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } } impl dyn LogisticRegression + '_ { /// Creates empty model. /// /// Creates Logistic Regression model with parameters given. pub fn create() -> Result<core::Ptr::<dyn crate::ml::LogisticRegression>> { unsafe { sys::cv_ml_LogisticRegression_create() }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::ml::LogisticRegression>::opencv_from_extern(r) } ) } /// Loads and creates a serialized LogisticRegression from a file /// /// Use LogisticRegression::save to serialize and store an LogisticRegression to disk. /// Load the LogisticRegression from this file again, by calling this function with the path to the file. /// Optionally specify the node for the file containing the classifier /// /// ## Parameters /// * filepath: path to serialized LogisticRegression /// * nodeName: name of node containing the classifier /// /// ## C++ default parameters /// * node_name: String() pub fn load(filepath: &str, node_name: &str) -> Result<core::Ptr::<dyn crate::ml::LogisticRegression>> { extern_container_arg!(filepath); extern_container_arg!(node_name); unsafe { sys::cv_ml_LogisticRegression_load_const_StringR_const_StringR(filepath.opencv_as_extern(), node_name.opencv_as_extern()) }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::ml::LogisticRegression>::opencv_from_extern(r) } ) } } /// Bayes classifier for normally distributed data. /// ## See also /// @ref ml_intro_bayes pub trait NormalBayesClassifier: crate::ml::StatModel { fn as_raw_NormalBayesClassifier(&self) -> *const c_void; fn as_raw_mut_NormalBayesClassifier(&mut self) -> *mut c_void; /// Predicts the response for sample(s). /// /// The method estimates the most probable classes for input vectors. Input vectors (one or more) /// are stored as rows of the matrix inputs. In case of multiple input vectors, there should be one /// output vector outputs. The predicted class for a single input vector is returned by the method. /// The vector outputProbs contains the output probabilities corresponding to each element of /// result. /// /// ## C++ default parameters /// * flags: 0 fn predict_prob(&self, inputs: &dyn core::ToInputArray, outputs: &mut dyn core::ToOutputArray, output_probs: &mut dyn core::ToOutputArray, flags: i32) -> Result<f32> { input_array_arg!(inputs); output_array_arg!(outputs); output_array_arg!(output_probs); unsafe { sys::cv_ml_NormalBayesClassifier_predictProb_const_const__InputArrayR_const__OutputArrayR_const__OutputArrayR_int(self.as_raw_NormalBayesClassifier(), inputs.as_raw__InputArray(), outputs.as_raw__OutputArray(), output_probs.as_raw__OutputArray(), flags) }.into_result() } } impl dyn NormalBayesClassifier + '_ { /// Creates empty model /// Use StatModel::train to train the model after creation. pub fn create() -> Result<core::Ptr::<dyn crate::ml::NormalBayesClassifier>> { unsafe { sys::cv_ml_NormalBayesClassifier_create() }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::ml::NormalBayesClassifier>::opencv_from_extern(r) } ) } /// Loads and creates a serialized NormalBayesClassifier from a file /// /// Use NormalBayesClassifier::save to serialize and store an NormalBayesClassifier to disk. /// Load the NormalBayesClassifier from this file again, by calling this function with the path to the file. /// Optionally specify the node for the file containing the classifier /// /// ## Parameters /// * filepath: path to serialized NormalBayesClassifier /// * nodeName: name of node containing the classifier /// /// ## C++ default parameters /// * node_name: String() pub fn load(filepath: &str, node_name: &str) -> Result<core::Ptr::<dyn crate::ml::NormalBayesClassifier>> { extern_container_arg!(filepath); extern_container_arg!(node_name); unsafe { sys::cv_ml_NormalBayesClassifier_load_const_StringR_const_StringR(filepath.opencv_as_extern(), node_name.opencv_as_extern()) }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::ml::NormalBayesClassifier>::opencv_from_extern(r) } ) } } /// The structure represents the logarithmic grid range of statmodel parameters. /// /// It is used for optimizing statmodel accuracy by varying model parameters, the accuracy estimate /// being computed by cross-validation. pub trait ParamGridTrait { fn as_raw_ParamGrid(&self) -> *const c_void; fn as_raw_mut_ParamGrid(&mut self) -> *mut c_void; /// Minimum value of the statmodel parameter. Default value is 0. fn min_val(&self) -> f64 { unsafe { sys::cv_ml_ParamGrid_getPropMinVal_const(self.as_raw_ParamGrid()) }.into_result().expect("Infallible function failed: min_val") } /// Minimum value of the statmodel parameter. Default value is 0. fn set_min_val(&mut self, val: f64) -> () { unsafe { sys::cv_ml_ParamGrid_setPropMinVal_double(self.as_raw_mut_ParamGrid(), val) }.into_result().expect("Infallible function failed: set_min_val") } /// Maximum value of the statmodel parameter. Default value is 0. fn max_val(&self) -> f64 { unsafe { sys::cv_ml_ParamGrid_getPropMaxVal_const(self.as_raw_ParamGrid()) }.into_result().expect("Infallible function failed: max_val") } /// Maximum value of the statmodel parameter. Default value is 0. fn set_max_val(&mut self, val: f64) -> () { unsafe { sys::cv_ml_ParamGrid_setPropMaxVal_double(self.as_raw_mut_ParamGrid(), val) }.into_result().expect("Infallible function failed: set_max_val") } /// Logarithmic step for iterating the statmodel parameter. /// /// The grid determines the following iteration sequence of the statmodel parameter values: /// ![block formula](https://latex.codecogs.com/png.latex?%28minVal%2C%20minVal%2Astep%2C%20minVal%2A%7Bstep%7D%5E2%2C%20%5Cdots%2C%20%20minVal%2A%7BlogStep%7D%5En%29%2C) /// where ![inline formula](https://latex.codecogs.com/png.latex?n) is the maximal index satisfying /// ![block formula](https://latex.codecogs.com/png.latex?%5Ctexttt%7BminVal%7D%20%2A%20%5Ctexttt%7BlogStep%7D%20%5En%20%3C%20%20%5Ctexttt%7BmaxVal%7D) /// The grid is logarithmic, so logStep must always be greater than 1. Default value is 1. fn log_step(&self) -> f64 { unsafe { sys::cv_ml_ParamGrid_getPropLogStep_const(self.as_raw_ParamGrid()) }.into_result().expect("Infallible function failed: log_step") } /// Logarithmic step for iterating the statmodel parameter. /// /// The grid determines the following iteration sequence of the statmodel parameter values: /// ![block formula](https://latex.codecogs.com/png.latex?%28minVal%2C%20minVal%2Astep%2C%20minVal%2A%7Bstep%7D%5E2%2C%20%5Cdots%2C%20%20minVal%2A%7BlogStep%7D%5En%29%2C) /// where ![inline formula](https://latex.codecogs.com/png.latex?n) is the maximal index satisfying /// ![block formula](https://latex.codecogs.com/png.latex?%5Ctexttt%7BminVal%7D%20%2A%20%5Ctexttt%7BlogStep%7D%20%5En%20%3C%20%20%5Ctexttt%7BmaxVal%7D) /// The grid is logarithmic, so logStep must always be greater than 1. Default value is 1. fn set_log_step(&mut self, val: f64) -> () { unsafe { sys::cv_ml_ParamGrid_setPropLogStep_double(self.as_raw_mut_ParamGrid(), val) }.into_result().expect("Infallible function failed: set_log_step") } } /// The structure represents the logarithmic grid range of statmodel parameters. /// /// It is used for optimizing statmodel accuracy by varying model parameters, the accuracy estimate /// being computed by cross-validation. pub struct ParamGrid { ptr: *mut c_void } opencv_type_boxed! { ParamGrid } impl Drop for ParamGrid { fn drop(&mut self) { extern "C" { fn cv_ParamGrid_delete(instance: *mut c_void); } unsafe { cv_ParamGrid_delete(self.as_raw_mut_ParamGrid()) }; } } impl ParamGrid { #[inline] pub fn as_raw_ParamGrid(&self) -> *const c_void { self.as_raw() } #[inline] pub fn as_raw_mut_ParamGrid(&mut self) -> *mut c_void { self.as_raw_mut() } } unsafe impl Send for ParamGrid {} impl crate::ml::ParamGridTrait for ParamGrid { #[inline] fn as_raw_ParamGrid(&self) -> *const c_void { self.as_raw() } #[inline] fn as_raw_mut_ParamGrid(&mut self) -> *mut c_void { self.as_raw_mut() } } impl ParamGrid { /// Default constructor pub fn default() -> Result<crate::ml::ParamGrid> { unsafe { sys::cv_ml_ParamGrid_ParamGrid() }.into_result().map(|r| unsafe { crate::ml::ParamGrid::opencv_from_extern(r) } ) } /// Constructor with parameters pub fn for_range(_min_val: f64, _max_val: f64, _log_step: f64) -> Result<crate::ml::ParamGrid> { unsafe { sys::cv_ml_ParamGrid_ParamGrid_double_double_double(_min_val, _max_val, _log_step) }.into_result().map(|r| unsafe { crate::ml::ParamGrid::opencv_from_extern(r) } ) } /// Creates a ParamGrid Ptr that can be given to the %SVM::trainAuto method /// /// ## Parameters /// * minVal: minimum value of the parameter grid /// * maxVal: maximum value of the parameter grid /// * logstep: Logarithmic step for iterating the statmodel parameter /// /// ## C++ default parameters /// * min_val: 0. /// * max_val: 0. /// * logstep: 1. pub fn create(min_val: f64, max_val: f64, logstep: f64) -> Result<core::Ptr::<crate::ml::ParamGrid>> { unsafe { sys::cv_ml_ParamGrid_create_double_double_double(min_val, max_val, logstep) }.into_result().map(|r| unsafe { core::Ptr::<crate::ml::ParamGrid>::opencv_from_extern(r) } ) } } /// The class implements the random forest predictor. /// ## See also /// @ref ml_intro_rtrees pub trait RTrees: crate::ml::DTrees { fn as_raw_RTrees(&self) -> *const c_void; fn as_raw_mut_RTrees(&mut self) -> *mut c_void; /// If true then variable importance will be calculated and then it can be retrieved by RTrees::getVarImportance. /// Default value is false. /// ## See also /// setCalculateVarImportance fn get_calculate_var_importance(&self) -> Result<bool> { unsafe { sys::cv_ml_RTrees_getCalculateVarImportance_const(self.as_raw_RTrees()) }.into_result() } /// If true then variable importance will be calculated and then it can be retrieved by RTrees::getVarImportance. /// Default value is false. /// ## See also /// setCalculateVarImportance getCalculateVarImportance fn set_calculate_var_importance(&mut self, val: bool) -> Result<()> { unsafe { sys::cv_ml_RTrees_setCalculateVarImportance_bool(self.as_raw_mut_RTrees(), val) }.into_result() } /// The size of the randomly selected subset of features at each tree node and that are used /// to find the best split(s). /// If you set it to 0 then the size will be set to the square root of the total number of /// features. Default value is 0. /// ## See also /// setActiveVarCount fn get_active_var_count(&self) -> Result<i32> { unsafe { sys::cv_ml_RTrees_getActiveVarCount_const(self.as_raw_RTrees()) }.into_result() } /// The size of the randomly selected subset of features at each tree node and that are used /// to find the best split(s). /// If you set it to 0 then the size will be set to the square root of the total number of /// features. Default value is 0. /// ## See also /// setActiveVarCount getActiveVarCount fn set_active_var_count(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_ml_RTrees_setActiveVarCount_int(self.as_raw_mut_RTrees(), val) }.into_result() } /// The termination criteria that specifies when the training algorithm stops. /// Either when the specified number of trees is trained and added to the ensemble or when /// sufficient accuracy (measured as OOB error) is achieved. Typically the more trees you have the /// better the accuracy. However, the improvement in accuracy generally diminishes and asymptotes /// pass a certain number of trees. Also to keep in mind, the number of tree increases the /// prediction time linearly. Default value is TermCriteria(TermCriteria::MAX_ITERS + /// TermCriteria::EPS, 50, 0.1) /// ## See also /// setTermCriteria fn get_term_criteria(&self) -> Result<core::TermCriteria> { unsafe { sys::cv_ml_RTrees_getTermCriteria_const(self.as_raw_RTrees()) }.into_result() } /// The termination criteria that specifies when the training algorithm stops. /// Either when the specified number of trees is trained and added to the ensemble or when /// sufficient accuracy (measured as OOB error) is achieved. Typically the more trees you have the /// better the accuracy. However, the improvement in accuracy generally diminishes and asymptotes /// pass a certain number of trees. Also to keep in mind, the number of tree increases the /// prediction time linearly. Default value is TermCriteria(TermCriteria::MAX_ITERS + /// TermCriteria::EPS, 50, 0.1) /// ## See also /// setTermCriteria getTermCriteria fn set_term_criteria(&mut self, val: core::TermCriteria) -> Result<()> { unsafe { sys::cv_ml_RTrees_setTermCriteria_const_TermCriteriaR(self.as_raw_mut_RTrees(), &val) }.into_result() } /// Returns the variable importance array. /// The method returns the variable importance vector, computed at the training stage when /// CalculateVarImportance is set to true. If this flag was set to false, the empty matrix is /// returned. fn get_var_importance(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_RTrees_getVarImportance_const(self.as_raw_RTrees()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } /// Returns the result of each individual tree in the forest. /// In case the model is a regression problem, the method will return each of the trees' /// results for each of the sample cases. If the model is a classifier, it will return /// a Mat with samples + 1 rows, where the first row gives the class number and the /// following rows return the votes each class had for each sample. /// ## Parameters /// * samples: Array containing the samples for which votes will be calculated. /// * results: Array where the result of the calculation will be written. /// * flags: Flags for defining the type of RTrees. fn get_votes(&self, samples: &dyn core::ToInputArray, results: &mut dyn core::ToOutputArray, flags: i32) -> Result<()> { input_array_arg!(samples); output_array_arg!(results); unsafe { sys::cv_ml_RTrees_getVotes_const_const__InputArrayR_const__OutputArrayR_int(self.as_raw_RTrees(), samples.as_raw__InputArray(), results.as_raw__OutputArray(), flags) }.into_result() } fn get_oob_error(&self) -> Result<f64> { unsafe { sys::cv_ml_RTrees_getOOBError_const(self.as_raw_RTrees()) }.into_result() } } impl dyn RTrees + '_ { /// Creates the empty model. /// Use StatModel::train to train the model, StatModel::train to create and train the model, /// Algorithm::load to load the pre-trained model. pub fn create() -> Result<core::Ptr::<dyn crate::ml::RTrees>> { unsafe { sys::cv_ml_RTrees_create() }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::ml::RTrees>::opencv_from_extern(r) } ) } /// Loads and creates a serialized RTree from a file /// /// Use RTree::save to serialize and store an RTree to disk. /// Load the RTree from this file again, by calling this function with the path to the file. /// Optionally specify the node for the file containing the classifier /// /// ## Parameters /// * filepath: path to serialized RTree /// * nodeName: name of node containing the classifier /// /// ## C++ default parameters /// * node_name: String() pub fn load(filepath: &str, node_name: &str) -> Result<core::Ptr::<dyn crate::ml::RTrees>> { extern_container_arg!(filepath); extern_container_arg!(node_name); unsafe { sys::cv_ml_RTrees_load_const_StringR_const_StringR(filepath.opencv_as_extern(), node_name.opencv_as_extern()) }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::ml::RTrees>::opencv_from_extern(r) } ) } } /// Support Vector Machines. /// ## See also /// @ref ml_intro_svm pub trait SVM: crate::ml::StatModel { fn as_raw_SVM(&self) -> *const c_void; fn as_raw_mut_SVM(&mut self) -> *mut c_void; /// Type of a %SVM formulation. /// See SVM::Types. Default value is SVM::C_SVC. /// ## See also /// setType fn get_type(&self) -> Result<i32> { unsafe { sys::cv_ml_SVM_getType_const(self.as_raw_SVM()) }.into_result() } /// Type of a %SVM formulation. /// See SVM::Types. Default value is SVM::C_SVC. /// ## See also /// setType getType fn set_type(&mut self, val: i32) -> Result<()> { unsafe { sys::cv_ml_SVM_setType_int(self.as_raw_mut_SVM(), val) }.into_result() } /// Parameter ![inline formula](https://latex.codecogs.com/png.latex?%5Cgamma) of a kernel function. /// For SVM::POLY, SVM::RBF, SVM::SIGMOID or SVM::CHI2. Default value is 1. /// ## See also /// setGamma fn get_gamma(&self) -> Result<f64> { unsafe { sys::cv_ml_SVM_getGamma_const(self.as_raw_SVM()) }.into_result() } /// Parameter ![inline formula](https://latex.codecogs.com/png.latex?%5Cgamma) of a kernel function. /// For SVM::POLY, SVM::RBF, SVM::SIGMOID or SVM::CHI2. Default value is 1. /// ## See also /// setGamma getGamma fn set_gamma(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_ml_SVM_setGamma_double(self.as_raw_mut_SVM(), val) }.into_result() } /// Parameter _coef0_ of a kernel function. /// For SVM::POLY or SVM::SIGMOID. Default value is 0. /// ## See also /// setCoef0 fn get_coef0(&self) -> Result<f64> { unsafe { sys::cv_ml_SVM_getCoef0_const(self.as_raw_SVM()) }.into_result() } /// Parameter _coef0_ of a kernel function. /// For SVM::POLY or SVM::SIGMOID. Default value is 0. /// ## See also /// setCoef0 getCoef0 fn set_coef0(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_ml_SVM_setCoef0_double(self.as_raw_mut_SVM(), val) }.into_result() } /// Parameter _degree_ of a kernel function. /// For SVM::POLY. Default value is 0. /// ## See also /// setDegree fn get_degree(&self) -> Result<f64> { unsafe { sys::cv_ml_SVM_getDegree_const(self.as_raw_SVM()) }.into_result() } /// Parameter _degree_ of a kernel function. /// For SVM::POLY. Default value is 0. /// ## See also /// setDegree getDegree fn set_degree(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_ml_SVM_setDegree_double(self.as_raw_mut_SVM(), val) }.into_result() } /// Parameter _C_ of a %SVM optimization problem. /// For SVM::C_SVC, SVM::EPS_SVR or SVM::NU_SVR. Default value is 0. /// ## See also /// setC fn get_c(&self) -> Result<f64> { unsafe { sys::cv_ml_SVM_getC_const(self.as_raw_SVM()) }.into_result() } /// Parameter _C_ of a %SVM optimization problem. /// For SVM::C_SVC, SVM::EPS_SVR or SVM::NU_SVR. Default value is 0. /// ## See also /// setC getC fn set_c(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_ml_SVM_setC_double(self.as_raw_mut_SVM(), val) }.into_result() } /// Parameter ![inline formula](https://latex.codecogs.com/png.latex?%5Cnu) of a %SVM optimization problem. /// For SVM::NU_SVC, SVM::ONE_CLASS or SVM::NU_SVR. Default value is 0. /// ## See also /// setNu fn get_nu(&self) -> Result<f64> { unsafe { sys::cv_ml_SVM_getNu_const(self.as_raw_SVM()) }.into_result() } /// Parameter ![inline formula](https://latex.codecogs.com/png.latex?%5Cnu) of a %SVM optimization problem. /// For SVM::NU_SVC, SVM::ONE_CLASS or SVM::NU_SVR. Default value is 0. /// ## See also /// setNu getNu fn set_nu(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_ml_SVM_setNu_double(self.as_raw_mut_SVM(), val) }.into_result() } /// Parameter ![inline formula](https://latex.codecogs.com/png.latex?%5Cepsilon) of a %SVM optimization problem. /// For SVM::EPS_SVR. Default value is 0. /// ## See also /// setP fn get_p(&self) -> Result<f64> { unsafe { sys::cv_ml_SVM_getP_const(self.as_raw_SVM()) }.into_result() } /// Parameter ![inline formula](https://latex.codecogs.com/png.latex?%5Cepsilon) of a %SVM optimization problem. /// For SVM::EPS_SVR. Default value is 0. /// ## See also /// setP getP fn set_p(&mut self, val: f64) -> Result<()> { unsafe { sys::cv_ml_SVM_setP_double(self.as_raw_mut_SVM(), val) }.into_result() } /// Optional weights in the SVM::C_SVC problem, assigned to particular classes. /// They are multiplied by _C_ so the parameter _C_ of class _i_ becomes `classWeights(i) * C`. Thus /// these weights affect the misclassification penalty for different classes. The larger weight, /// the larger penalty on misclassification of data from the corresponding class. Default value is /// empty Mat. /// ## See also /// setClassWeights fn get_class_weights(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_SVM_getClassWeights_const(self.as_raw_SVM()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } /// Optional weights in the SVM::C_SVC problem, assigned to particular classes. /// They are multiplied by _C_ so the parameter _C_ of class _i_ becomes `classWeights(i) * C`. Thus /// these weights affect the misclassification penalty for different classes. The larger weight, /// the larger penalty on misclassification of data from the corresponding class. Default value is /// empty Mat. /// ## See also /// setClassWeights getClassWeights fn set_class_weights(&mut self, val: &core::Mat) -> Result<()> { unsafe { sys::cv_ml_SVM_setClassWeights_const_MatR(self.as_raw_mut_SVM(), val.as_raw_Mat()) }.into_result() } /// Termination criteria of the iterative %SVM training procedure which solves a partial /// case of constrained quadratic optimization problem. /// You can specify tolerance and/or the maximum number of iterations. Default value is /// `TermCriteria( TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, FLT_EPSILON )`; /// ## See also /// setTermCriteria fn get_term_criteria(&self) -> Result<core::TermCriteria> { unsafe { sys::cv_ml_SVM_getTermCriteria_const(self.as_raw_SVM()) }.into_result() } /// Termination criteria of the iterative %SVM training procedure which solves a partial /// case of constrained quadratic optimization problem. /// You can specify tolerance and/or the maximum number of iterations. Default value is /// `TermCriteria( TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, FLT_EPSILON )`; /// ## See also /// setTermCriteria getTermCriteria fn set_term_criteria(&mut self, val: core::TermCriteria) -> Result<()> { unsafe { sys::cv_ml_SVM_setTermCriteria_const_TermCriteriaR(self.as_raw_mut_SVM(), &val) }.into_result() } /// Type of a %SVM kernel. /// See SVM::KernelTypes. Default value is SVM::RBF. fn get_kernel_type(&self) -> Result<i32> { unsafe { sys::cv_ml_SVM_getKernelType_const(self.as_raw_SVM()) }.into_result() } /// Initialize with one of predefined kernels. /// See SVM::KernelTypes. fn set_kernel(&mut self, kernel_type: i32) -> Result<()> { unsafe { sys::cv_ml_SVM_setKernel_int(self.as_raw_mut_SVM(), kernel_type) }.into_result() } /// Initialize with custom kernel. /// See SVM::Kernel class for implementation details fn set_custom_kernel(&mut self, _kernel: &core::Ptr::<dyn crate::ml::SVM_Kernel>) -> Result<()> { unsafe { sys::cv_ml_SVM_setCustomKernel_const_Ptr_Kernel_R(self.as_raw_mut_SVM(), _kernel.as_raw_PtrOfSVM_Kernel()) }.into_result() } /// Trains an %SVM with optimal parameters. /// /// ## Parameters /// * data: the training data that can be constructed using TrainData::create or /// TrainData::loadFromCSV. /// * kFold: Cross-validation parameter. The training set is divided into kFold subsets. One /// subset is used to test the model, the others form the train set. So, the %SVM algorithm is /// executed kFold times. /// * Cgrid: grid for C /// * gammaGrid: grid for gamma /// * pGrid: grid for p /// * nuGrid: grid for nu /// * coeffGrid: grid for coeff /// * degreeGrid: grid for degree /// * balanced: If true and the problem is 2-class classification then the method creates more /// balanced cross-validation subsets that is proportions between classes in subsets are close /// to such proportion in the whole train dataset. /// /// The method trains the %SVM model automatically by choosing the optimal parameters C, gamma, p, /// nu, coef0, degree. Parameters are considered optimal when the cross-validation /// estimate of the test set error is minimal. /// /// If there is no need to optimize a parameter, the corresponding grid step should be set to any /// value less than or equal to 1. For example, to avoid optimization in gamma, set `gammaGrid.step /// = 0`, `gammaGrid.minVal`, `gamma_grid.maxVal` as arbitrary numbers. In this case, the value /// `Gamma` is taken for gamma. /// /// And, finally, if the optimization in a parameter is required but the corresponding grid is /// unknown, you may call the function SVM::getDefaultGrid. To generate a grid, for example, for /// gamma, call `SVM::getDefaultGrid(SVM::GAMMA)`. /// /// This function works for the classification (SVM::C_SVC or SVM::NU_SVC) as well as for the /// regression (SVM::EPS_SVR or SVM::NU_SVR). If it is SVM::ONE_CLASS, no optimization is made and /// the usual %SVM with parameters specified in params is executed. /// /// ## C++ default parameters /// * k_fold: 10 /// * cgrid: getDefaultGrid(C) /// * gamma_grid: getDefaultGrid(GAMMA) /// * p_grid: getDefaultGrid(P) /// * nu_grid: getDefaultGrid(NU) /// * coeff_grid: getDefaultGrid(COEF) /// * degree_grid: getDefaultGrid(DEGREE) /// * balanced: false fn train_auto(&mut self, data: &core::Ptr::<dyn crate::ml::TrainData>, k_fold: i32, mut cgrid: crate::ml::ParamGrid, mut gamma_grid: crate::ml::ParamGrid, mut p_grid: crate::ml::ParamGrid, mut nu_grid: crate::ml::ParamGrid, mut coeff_grid: crate::ml::ParamGrid, mut degree_grid: crate::ml::ParamGrid, balanced: bool) -> Result<bool> { unsafe { sys::cv_ml_SVM_trainAuto_const_Ptr_TrainData_R_int_ParamGrid_ParamGrid_ParamGrid_ParamGrid_ParamGrid_ParamGrid_bool(self.as_raw_mut_SVM(), data.as_raw_PtrOfTrainData(), k_fold, cgrid.as_raw_mut_ParamGrid(), gamma_grid.as_raw_mut_ParamGrid(), p_grid.as_raw_mut_ParamGrid(), nu_grid.as_raw_mut_ParamGrid(), coeff_grid.as_raw_mut_ParamGrid(), degree_grid.as_raw_mut_ParamGrid(), balanced) }.into_result() } /// Trains an %SVM with optimal parameters /// /// ## Parameters /// * samples: training samples /// * layout: See ml::SampleTypes. /// * responses: vector of responses associated with the training samples. /// * kFold: Cross-validation parameter. The training set is divided into kFold subsets. One /// subset is used to test the model, the others form the train set. So, the %SVM algorithm is /// * Cgrid: grid for C /// * gammaGrid: grid for gamma /// * pGrid: grid for p /// * nuGrid: grid for nu /// * coeffGrid: grid for coeff /// * degreeGrid: grid for degree /// * balanced: If true and the problem is 2-class classification then the method creates more /// balanced cross-validation subsets that is proportions between classes in subsets are close /// to such proportion in the whole train dataset. /// /// The method trains the %SVM model automatically by choosing the optimal parameters C, gamma, p, /// nu, coef0, degree. Parameters are considered optimal when the cross-validation /// estimate of the test set error is minimal. /// /// This function only makes use of SVM::getDefaultGrid for parameter optimization and thus only /// offers rudimentary parameter options. /// /// This function works for the classification (SVM::C_SVC or SVM::NU_SVC) as well as for the /// regression (SVM::EPS_SVR or SVM::NU_SVR). If it is SVM::ONE_CLASS, no optimization is made and /// the usual %SVM with parameters specified in params is executed. /// /// ## C++ default parameters /// * k_fold: 10 /// * cgrid: SVM::getDefaultGridPtr(SVM::C) /// * gamma_grid: SVM::getDefaultGridPtr(SVM::GAMMA) /// * p_grid: SVM::getDefaultGridPtr(SVM::P) /// * nu_grid: SVM::getDefaultGridPtr(SVM::NU) /// * coeff_grid: SVM::getDefaultGridPtr(SVM::COEF) /// * degree_grid: SVM::getDefaultGridPtr(SVM::DEGREE) /// * balanced: false fn train_auto_with_data(&mut self, samples: &dyn core::ToInputArray, layout: i32, responses: &dyn core::ToInputArray, k_fold: i32, mut cgrid: core::Ptr::<crate::ml::ParamGrid>, mut gamma_grid: core::Ptr::<crate::ml::ParamGrid>, mut p_grid: core::Ptr::<crate::ml::ParamGrid>, mut nu_grid: core::Ptr::<crate::ml::ParamGrid>, mut coeff_grid: core::Ptr::<crate::ml::ParamGrid>, mut degree_grid: core::Ptr::<crate::ml::ParamGrid>, balanced: bool) -> Result<bool> { input_array_arg!(samples); input_array_arg!(responses); unsafe { sys::cv_ml_SVM_trainAuto_const__InputArrayR_int_const__InputArrayR_int_Ptr_ParamGrid__Ptr_ParamGrid__Ptr_ParamGrid__Ptr_ParamGrid__Ptr_ParamGrid__Ptr_ParamGrid__bool(self.as_raw_mut_SVM(), samples.as_raw__InputArray(), layout, responses.as_raw__InputArray(), k_fold, cgrid.as_raw_mut_PtrOfParamGrid(), gamma_grid.as_raw_mut_PtrOfParamGrid(), p_grid.as_raw_mut_PtrOfParamGrid(), nu_grid.as_raw_mut_PtrOfParamGrid(), coeff_grid.as_raw_mut_PtrOfParamGrid(), degree_grid.as_raw_mut_PtrOfParamGrid(), balanced) }.into_result() } /// Retrieves all the support vectors /// /// The method returns all the support vectors as a floating-point matrix, where support vectors are /// stored as matrix rows. fn get_support_vectors(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_SVM_getSupportVectors_const(self.as_raw_SVM()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } /// Retrieves all the uncompressed support vectors of a linear %SVM /// /// The method returns all the uncompressed support vectors of a linear %SVM that the compressed /// support vector, used for prediction, was derived from. They are returned in a floating-point /// matrix, where the support vectors are stored as matrix rows. fn get_uncompressed_support_vectors(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_SVM_getUncompressedSupportVectors_const(self.as_raw_SVM()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } /// Retrieves the decision function /// /// ## Parameters /// * i: the index of the decision function. If the problem solved is regression, 1-class or /// 2-class classification, then there will be just one decision function and the index should /// always be 0. Otherwise, in the case of N-class classification, there will be ![inline formula](https://latex.codecogs.com/png.latex?N%28N%2D1%29%2F2) /// decision functions. /// * alpha: the optional output vector for weights, corresponding to different support vectors. /// In the case of linear %SVM all the alpha's will be 1's. /// * svidx: the optional output vector of indices of support vectors within the matrix of /// support vectors (which can be retrieved by SVM::getSupportVectors). In the case of linear /// %SVM each decision function consists of a single "compressed" support vector. /// /// The method returns rho parameter of the decision function, a scalar subtracted from the weighted /// sum of kernel responses. fn get_decision_function(&self, i: i32, alpha: &mut dyn core::ToOutputArray, svidx: &mut dyn core::ToOutputArray) -> Result<f64> { output_array_arg!(alpha); output_array_arg!(svidx); unsafe { sys::cv_ml_SVM_getDecisionFunction_const_int_const__OutputArrayR_const__OutputArrayR(self.as_raw_SVM(), i, alpha.as_raw__OutputArray(), svidx.as_raw__OutputArray()) }.into_result() } } impl dyn SVM + '_ { /// Generates a grid for %SVM parameters. /// /// ## Parameters /// * param_id: %SVM parameters IDs that must be one of the SVM::ParamTypes. The grid is /// generated for the parameter with this ID. /// /// The function generates a grid for the specified parameter of the %SVM algorithm. The grid may be /// passed to the function SVM::trainAuto. pub fn get_default_grid(param_id: i32) -> Result<crate::ml::ParamGrid> { unsafe { sys::cv_ml_SVM_getDefaultGrid_int(param_id) }.into_result().map(|r| unsafe { crate::ml::ParamGrid::opencv_from_extern(r) } ) } /// Generates a grid for %SVM parameters. /// /// ## Parameters /// * param_id: %SVM parameters IDs that must be one of the SVM::ParamTypes. The grid is /// generated for the parameter with this ID. /// /// The function generates a grid pointer for the specified parameter of the %SVM algorithm. /// The grid may be passed to the function SVM::trainAuto. pub fn get_default_grid_ptr(param_id: i32) -> Result<core::Ptr::<crate::ml::ParamGrid>> { unsafe { sys::cv_ml_SVM_getDefaultGridPtr_int(param_id) }.into_result().map(|r| unsafe { core::Ptr::<crate::ml::ParamGrid>::opencv_from_extern(r) } ) } /// Creates empty model. /// Use StatModel::train to train the model. Since %SVM has several parameters, you may want to /// find the best parameters for your problem, it can be done with SVM::trainAuto. pub fn create() -> Result<core::Ptr::<dyn crate::ml::SVM>> { unsafe { sys::cv_ml_SVM_create() }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::ml::SVM>::opencv_from_extern(r) } ) } /// Loads and creates a serialized svm from a file /// /// Use SVM::save to serialize and store an SVM to disk. /// Load the SVM from this file again, by calling this function with the path to the file. /// /// ## Parameters /// * filepath: path to serialized svm pub fn load(filepath: &str) -> Result<core::Ptr::<dyn crate::ml::SVM>> { extern_container_arg!(filepath); unsafe { sys::cv_ml_SVM_load_const_StringR(filepath.opencv_as_extern()) }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::ml::SVM>::opencv_from_extern(r) } ) } } pub trait SVM_Kernel: core::AlgorithmTrait { fn as_raw_SVM_Kernel(&self) -> *const c_void; fn as_raw_mut_SVM_Kernel(&mut self) -> *mut c_void; fn get_type(&self) -> Result<i32> { unsafe { sys::cv_ml_SVM_Kernel_getType_const(self.as_raw_SVM_Kernel()) }.into_result() } fn calc(&mut self, vcount: i32, n: i32, vecs: &f32, another: &f32, results: &mut f32) -> Result<()> { unsafe { sys::cv_ml_SVM_Kernel_calc_int_int_const_floatX_const_floatX_floatX(self.as_raw_mut_SVM_Kernel(), vcount, n, vecs, another, results) }.into_result() } } /// ! /// Stochastic Gradient Descent SVM classifier /// /// SVMSGD provides a fast and easy-to-use implementation of the SVM classifier using the Stochastic Gradient Descent approach, /// as presented in [bottou2010large](https://docs.opencv.org/4.3.0/d0/de3/citelist.html#CITEREF_bottou2010large). /// /// The classifier has following parameters: /// - model type, /// - margin type, /// - margin regularization (![inline formula](https://latex.codecogs.com/png.latex?%5Clambda)), /// - initial step size (![inline formula](https://latex.codecogs.com/png.latex?%5Cgamma%5F0)), /// - step decreasing power (![inline formula](https://latex.codecogs.com/png.latex?c)), /// - and termination criteria. /// /// The model type may have one of the following values: \ref SGD and \ref ASGD. /// /// - \ref SGD is the classic version of SVMSGD classifier: every next step is calculated by the formula /// ![block formula](https://latex.codecogs.com/png.latex?w%5F%7Bt%2B1%7D%20%3D%20w%5Ft%20%2D%20%5Cgamma%28t%29%20%5Cfrac%7BdQ%5Fi%7D%7Bdw%7D%20%7C%5F%7Bw%20%3D%20w%5Ft%7D) /// where /// - ![inline formula](https://latex.codecogs.com/png.latex?w%5Ft) is the weights vector for decision function at step ![inline formula](https://latex.codecogs.com/png.latex?t), /// - ![inline formula](https://latex.codecogs.com/png.latex?%5Cgamma%28t%29) is the step size of model parameters at the iteration ![inline formula](https://latex.codecogs.com/png.latex?t), it is decreased on each step by the formula /// ![inline formula](https://latex.codecogs.com/png.latex?%5Cgamma%28t%29%20%3D%20%5Cgamma%5F0%20%20%281%20%2B%20%5Clambda%20%20%5Cgamma%5F0%20t%29%20%5E%20%7B%2Dc%7D) /// - ![inline formula](https://latex.codecogs.com/png.latex?Q%5Fi) is the target functional from SVM task for sample with number ![inline formula](https://latex.codecogs.com/png.latex?i), this sample is chosen stochastically on each step of the algorithm. /// /// - \ref ASGD is Average Stochastic Gradient Descent SVM Classifier. ASGD classifier averages weights vector on each step of algorithm by the formula /// ![inline formula](https://latex.codecogs.com/png.latex?%5Cwidehat%7Bw%7D%5F%7Bt%2B1%7D%20%3D%20%5Cfrac%7Bt%7D%7B1%2Bt%7D%5Cwidehat%7Bw%7D%5F%7Bt%7D%20%2B%20%5Cfrac%7B1%7D%7B1%2Bt%7Dw%5F%7Bt%2B1%7D) /// /// The recommended model type is ASGD (following [bottou2010large](https://docs.opencv.org/4.3.0/d0/de3/citelist.html#CITEREF_bottou2010large)). /// /// The margin type may have one of the following values: \ref SOFT_MARGIN or \ref HARD_MARGIN. /// /// - You should use \ref HARD_MARGIN type, if you have linearly separable sets. /// - You should use \ref SOFT_MARGIN type, if you have non-linearly separable sets or sets with outliers. /// - In the general case (if you know nothing about linear separability of your sets), use SOFT_MARGIN. /// /// The other parameters may be described as follows: /// - Margin regularization parameter is responsible for weights decreasing at each step and for the strength of restrictions on outliers /// (the less the parameter, the less probability that an outlier will be ignored). /// Recommended value for SGD model is 0.0001, for ASGD model is 0.00001. /// /// - Initial step size parameter is the initial value for the step size ![inline formula](https://latex.codecogs.com/png.latex?%5Cgamma%28t%29). /// You will have to find the best initial step for your problem. /// /// - Step decreasing power is the power parameter for ![inline formula](https://latex.codecogs.com/png.latex?%5Cgamma%28t%29) decreasing by the formula, mentioned above. /// Recommended value for SGD model is 1, for ASGD model is 0.75. /// /// - Termination criteria can be TermCriteria::COUNT, TermCriteria::EPS or TermCriteria::COUNT + TermCriteria::EPS. /// You will have to find the best termination criteria for your problem. /// /// Note that the parameters margin regularization, initial step size, and step decreasing power should be positive. /// /// To use SVMSGD algorithm do as follows: /// /// - first, create the SVMSGD object. The algorithm will set optimal parameters by default, but you can set your own parameters via functions setSvmsgdType(), /// setMarginType(), setMarginRegularization(), setInitialStepSize(), and setStepDecreasingPower(). /// /// - then the SVM model can be trained using the train features and the correspondent labels by the method train(). /// /// - after that, the label of a new feature vector can be predicted using the method predict(). /// /// ```ignore /// // Create empty object /// cv::Ptr<SVMSGD> svmsgd = SVMSGD::create(); /// /// // Train the Stochastic Gradient Descent SVM /// svmsgd->train(trainData); /// /// // Predict labels for the new samples /// svmsgd->predict(samples, responses); /// ``` /// pub trait SVMSGD: crate::ml::StatModel { fn as_raw_SVMSGD(&self) -> *const c_void; fn as_raw_mut_SVMSGD(&mut self) -> *mut c_void; /// ## Returns /// the weights of the trained model (decision function f(x) = weights * x + shift). fn get_weights(&mut self) -> Result<core::Mat> { unsafe { sys::cv_ml_SVMSGD_getWeights(self.as_raw_mut_SVMSGD()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } /// ## Returns /// the shift of the trained model (decision function f(x) = weights * x + shift). fn get_shift(&mut self) -> Result<f32> { unsafe { sys::cv_ml_SVMSGD_getShift(self.as_raw_mut_SVMSGD()) }.into_result() } /// Function sets optimal parameters values for chosen SVM SGD model. /// ## Parameters /// * svmsgdType: is the type of SVMSGD classifier. /// * marginType: is the type of margin constraint. /// /// ## C++ default parameters /// * svmsgd_type: SVMSGD::ASGD /// * margin_type: SVMSGD::SOFT_MARGIN fn set_optimal_parameters(&mut self, svmsgd_type: i32, margin_type: i32) -> Result<()> { unsafe { sys::cv_ml_SVMSGD_setOptimalParameters_int_int(self.as_raw_mut_SVMSGD(), svmsgd_type, margin_type) }.into_result() } /// %Algorithm type, one of SVMSGD::SvmsgdType. /// ## See also /// setSvmsgdType fn get_svmsgd_type(&self) -> Result<i32> { unsafe { sys::cv_ml_SVMSGD_getSvmsgdType_const(self.as_raw_SVMSGD()) }.into_result() } /// %Algorithm type, one of SVMSGD::SvmsgdType. /// ## See also /// setSvmsgdType getSvmsgdType fn set_svmsgd_type(&mut self, svmsgd_type: i32) -> Result<()> { unsafe { sys::cv_ml_SVMSGD_setSvmsgdType_int(self.as_raw_mut_SVMSGD(), svmsgd_type) }.into_result() } /// %Margin type, one of SVMSGD::MarginType. /// ## See also /// setMarginType fn get_margin_type(&self) -> Result<i32> { unsafe { sys::cv_ml_SVMSGD_getMarginType_const(self.as_raw_SVMSGD()) }.into_result() } /// %Margin type, one of SVMSGD::MarginType. /// ## See also /// setMarginType getMarginType fn set_margin_type(&mut self, margin_type: i32) -> Result<()> { unsafe { sys::cv_ml_SVMSGD_setMarginType_int(self.as_raw_mut_SVMSGD(), margin_type) }.into_result() } /// Parameter marginRegularization of a %SVMSGD optimization problem. /// ## See also /// setMarginRegularization fn get_margin_regularization(&self) -> Result<f32> { unsafe { sys::cv_ml_SVMSGD_getMarginRegularization_const(self.as_raw_SVMSGD()) }.into_result() } /// Parameter marginRegularization of a %SVMSGD optimization problem. /// ## See also /// setMarginRegularization getMarginRegularization fn set_margin_regularization(&mut self, margin_regularization: f32) -> Result<()> { unsafe { sys::cv_ml_SVMSGD_setMarginRegularization_float(self.as_raw_mut_SVMSGD(), margin_regularization) }.into_result() } /// Parameter initialStepSize of a %SVMSGD optimization problem. /// ## See also /// setInitialStepSize fn get_initial_step_size(&self) -> Result<f32> { unsafe { sys::cv_ml_SVMSGD_getInitialStepSize_const(self.as_raw_SVMSGD()) }.into_result() } /// Parameter initialStepSize of a %SVMSGD optimization problem. /// ## See also /// setInitialStepSize getInitialStepSize fn set_initial_step_size(&mut self, initial_step_size: f32) -> Result<()> { unsafe { sys::cv_ml_SVMSGD_setInitialStepSize_float(self.as_raw_mut_SVMSGD(), initial_step_size) }.into_result() } /// Parameter stepDecreasingPower of a %SVMSGD optimization problem. /// ## See also /// setStepDecreasingPower fn get_step_decreasing_power(&self) -> Result<f32> { unsafe { sys::cv_ml_SVMSGD_getStepDecreasingPower_const(self.as_raw_SVMSGD()) }.into_result() } /// Parameter stepDecreasingPower of a %SVMSGD optimization problem. /// ## See also /// setStepDecreasingPower getStepDecreasingPower fn set_step_decreasing_power(&mut self, step_decreasing_power: f32) -> Result<()> { unsafe { sys::cv_ml_SVMSGD_setStepDecreasingPower_float(self.as_raw_mut_SVMSGD(), step_decreasing_power) }.into_result() } /// Termination criteria of the training algorithm. /// You can specify the maximum number of iterations (maxCount) and/or how much the error could /// change between the iterations to make the algorithm continue (epsilon). /// ## See also /// setTermCriteria fn get_term_criteria(&self) -> Result<core::TermCriteria> { unsafe { sys::cv_ml_SVMSGD_getTermCriteria_const(self.as_raw_SVMSGD()) }.into_result() } /// Termination criteria of the training algorithm. /// You can specify the maximum number of iterations (maxCount) and/or how much the error could /// change between the iterations to make the algorithm continue (epsilon). /// ## See also /// setTermCriteria getTermCriteria fn set_term_criteria(&mut self, val: core::TermCriteria) -> Result<()> { unsafe { sys::cv_ml_SVMSGD_setTermCriteria_const_TermCriteriaR(self.as_raw_mut_SVMSGD(), &val) }.into_result() } } impl dyn SVMSGD + '_ { /// Creates empty model. /// Use StatModel::train to train the model. Since %SVMSGD has several parameters, you may want to /// find the best parameters for your problem or use setOptimalParameters() to set some default parameters. pub fn create() -> Result<core::Ptr::<dyn crate::ml::SVMSGD>> { unsafe { sys::cv_ml_SVMSGD_create() }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::ml::SVMSGD>::opencv_from_extern(r) } ) } /// Loads and creates a serialized SVMSGD from a file /// /// Use SVMSGD::save to serialize and store an SVMSGD to disk. /// Load the SVMSGD from this file again, by calling this function with the path to the file. /// Optionally specify the node for the file containing the classifier /// /// ## Parameters /// * filepath: path to serialized SVMSGD /// * nodeName: name of node containing the classifier /// /// ## C++ default parameters /// * node_name: String() pub fn load(filepath: &str, node_name: &str) -> Result<core::Ptr::<dyn crate::ml::SVMSGD>> { extern_container_arg!(filepath); extern_container_arg!(node_name); unsafe { sys::cv_ml_SVMSGD_load_const_StringR_const_StringR(filepath.opencv_as_extern(), node_name.opencv_as_extern()) }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::ml::SVMSGD>::opencv_from_extern(r) } ) } } /// Base class for statistical models in OpenCV ML. pub trait StatModel: core::AlgorithmTrait { fn as_raw_StatModel(&self) -> *const c_void; fn as_raw_mut_StatModel(&mut self) -> *mut c_void; /// Returns the number of variables in training samples fn get_var_count(&self) -> Result<i32> { unsafe { sys::cv_ml_StatModel_getVarCount_const(self.as_raw_StatModel()) }.into_result() } fn empty(&self) -> Result<bool> { unsafe { sys::cv_ml_StatModel_empty_const(self.as_raw_StatModel()) }.into_result() } /// Returns true if the model is trained fn is_trained(&self) -> Result<bool> { unsafe { sys::cv_ml_StatModel_isTrained_const(self.as_raw_StatModel()) }.into_result() } /// Returns true if the model is classifier fn is_classifier(&self) -> Result<bool> { unsafe { sys::cv_ml_StatModel_isClassifier_const(self.as_raw_StatModel()) }.into_result() } /// Trains the statistical model /// /// ## Parameters /// * trainData: training data that can be loaded from file using TrainData::loadFromCSV or /// created with TrainData::create. /// * flags: optional flags, depending on the model. Some of the models can be updated with the /// new training samples, not completely overwritten (such as NormalBayesClassifier or ANN_MLP). /// /// ## C++ default parameters /// * flags: 0 fn train_with_data(&mut self, train_data: &core::Ptr::<dyn crate::ml::TrainData>, flags: i32) -> Result<bool> { unsafe { sys::cv_ml_StatModel_train_const_Ptr_TrainData_R_int(self.as_raw_mut_StatModel(), train_data.as_raw_PtrOfTrainData(), flags) }.into_result() } /// Trains the statistical model /// /// ## Parameters /// * samples: training samples /// * layout: See ml::SampleTypes. /// * responses: vector of responses associated with the training samples. fn train(&mut self, samples: &dyn core::ToInputArray, layout: i32, responses: &dyn core::ToInputArray) -> Result<bool> { input_array_arg!(samples); input_array_arg!(responses); unsafe { sys::cv_ml_StatModel_train_const__InputArrayR_int_const__InputArrayR(self.as_raw_mut_StatModel(), samples.as_raw__InputArray(), layout, responses.as_raw__InputArray()) }.into_result() } /// Computes error on the training or test dataset /// /// ## Parameters /// * data: the training data /// * test: if true, the error is computed over the test subset of the data, otherwise it's /// computed over the training subset of the data. Please note that if you loaded a completely /// different dataset to evaluate already trained classifier, you will probably want not to set /// the test subset at all with TrainData::setTrainTestSplitRatio and specify test=false, so /// that the error is computed for the whole new set. Yes, this sounds a bit confusing. /// * resp: the optional output responses. /// /// The method uses StatModel::predict to compute the error. For regression models the error is /// computed as RMS, for classifiers - as a percent of missclassified samples (0%-100%). fn calc_error(&self, data: &core::Ptr::<dyn crate::ml::TrainData>, test: bool, resp: &mut dyn core::ToOutputArray) -> Result<f32> { output_array_arg!(resp); unsafe { sys::cv_ml_StatModel_calcError_const_const_Ptr_TrainData_R_bool_const__OutputArrayR(self.as_raw_StatModel(), data.as_raw_PtrOfTrainData(), test, resp.as_raw__OutputArray()) }.into_result() } /// Predicts response(s) for the provided sample(s) /// /// ## Parameters /// * samples: The input samples, floating-point matrix /// * results: The optional output matrix of results. /// * flags: The optional flags, model-dependent. See cv::ml::StatModel::Flags. /// /// ## C++ default parameters /// * results: noArray() /// * flags: 0 fn predict(&self, samples: &dyn core::ToInputArray, results: &mut dyn core::ToOutputArray, flags: i32) -> Result<f32> { input_array_arg!(samples); output_array_arg!(results); unsafe { sys::cv_ml_StatModel_predict_const_const__InputArrayR_const__OutputArrayR_int(self.as_raw_StatModel(), samples.as_raw__InputArray(), results.as_raw__OutputArray(), flags) }.into_result() } } /// Class encapsulating training data. /// /// Please note that the class only specifies the interface of training data, but not implementation. /// All the statistical model classes in _ml_ module accepts Ptr\<TrainData\> as parameter. In other /// words, you can create your own class derived from TrainData and pass smart pointer to the instance /// of this class into StatModel::train. /// ## See also /// @ref ml_intro_data pub trait TrainData { fn as_raw_TrainData(&self) -> *const c_void; fn as_raw_mut_TrainData(&mut self) -> *mut c_void; fn get_layout(&self) -> Result<i32> { unsafe { sys::cv_ml_TrainData_getLayout_const(self.as_raw_TrainData()) }.into_result() } fn get_n_train_samples(&self) -> Result<i32> { unsafe { sys::cv_ml_TrainData_getNTrainSamples_const(self.as_raw_TrainData()) }.into_result() } fn get_n_test_samples(&self) -> Result<i32> { unsafe { sys::cv_ml_TrainData_getNTestSamples_const(self.as_raw_TrainData()) }.into_result() } fn get_n_samples(&self) -> Result<i32> { unsafe { sys::cv_ml_TrainData_getNSamples_const(self.as_raw_TrainData()) }.into_result() } fn get_n_vars(&self) -> Result<i32> { unsafe { sys::cv_ml_TrainData_getNVars_const(self.as_raw_TrainData()) }.into_result() } fn get_n_all_vars(&self) -> Result<i32> { unsafe { sys::cv_ml_TrainData_getNAllVars_const(self.as_raw_TrainData()) }.into_result() } fn get_sample(&self, var_idx: &dyn core::ToInputArray, sidx: i32, buf: &mut f32) -> Result<()> { input_array_arg!(var_idx); unsafe { sys::cv_ml_TrainData_getSample_const_const__InputArrayR_int_floatX(self.as_raw_TrainData(), var_idx.as_raw__InputArray(), sidx, buf) }.into_result() } fn get_samples(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_TrainData_getSamples_const(self.as_raw_TrainData()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } fn get_missing(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_TrainData_getMissing_const(self.as_raw_TrainData()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } /// Returns matrix of train samples /// /// ## Parameters /// * layout: The requested layout. If it's different from the initial one, the matrix is /// transposed. See ml::SampleTypes. /// * compressSamples: if true, the function returns only the training samples (specified by /// sampleIdx) /// * compressVars: if true, the function returns the shorter training samples, containing only /// the active variables. /// /// In current implementation the function tries to avoid physical data copying and returns the /// matrix stored inside TrainData (unless the transposition or compression is needed). /// /// ## C++ default parameters /// * layout: ROW_SAMPLE /// * compress_samples: true /// * compress_vars: true fn get_train_samples(&self, layout: i32, compress_samples: bool, compress_vars: bool) -> Result<core::Mat> { unsafe { sys::cv_ml_TrainData_getTrainSamples_const_int_bool_bool(self.as_raw_TrainData(), layout, compress_samples, compress_vars) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } /// Returns the vector of responses /// /// The function returns ordered or the original categorical responses. Usually it's used in /// regression algorithms. fn get_train_responses(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_TrainData_getTrainResponses_const(self.as_raw_TrainData()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } /// Returns the vector of normalized categorical responses /// /// The function returns vector of responses. Each response is integer from `0` to `<number of /// classes>-1`. The actual label value can be retrieved then from the class label vector, see /// TrainData::getClassLabels. fn get_train_norm_cat_responses(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_TrainData_getTrainNormCatResponses_const(self.as_raw_TrainData()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } fn get_test_responses(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_TrainData_getTestResponses_const(self.as_raw_TrainData()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } fn get_test_norm_cat_responses(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_TrainData_getTestNormCatResponses_const(self.as_raw_TrainData()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } fn get_responses(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_TrainData_getResponses_const(self.as_raw_TrainData()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } fn get_norm_cat_responses(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_TrainData_getNormCatResponses_const(self.as_raw_TrainData()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } fn get_sample_weights(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_TrainData_getSampleWeights_const(self.as_raw_TrainData()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } fn get_train_sample_weights(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_TrainData_getTrainSampleWeights_const(self.as_raw_TrainData()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } fn get_test_sample_weights(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_TrainData_getTestSampleWeights_const(self.as_raw_TrainData()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } fn get_var_idx(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_TrainData_getVarIdx_const(self.as_raw_TrainData()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } fn get_var_type(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_TrainData_getVarType_const(self.as_raw_TrainData()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } fn get_var_symbol_flags(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_TrainData_getVarSymbolFlags_const(self.as_raw_TrainData()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } fn get_response_type(&self) -> Result<i32> { unsafe { sys::cv_ml_TrainData_getResponseType_const(self.as_raw_TrainData()) }.into_result() } fn get_train_sample_idx(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_TrainData_getTrainSampleIdx_const(self.as_raw_TrainData()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } fn get_test_sample_idx(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_TrainData_getTestSampleIdx_const(self.as_raw_TrainData()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } fn get_values(&self, vi: i32, sidx: &dyn core::ToInputArray, values: &mut f32) -> Result<()> { input_array_arg!(sidx); unsafe { sys::cv_ml_TrainData_getValues_const_int_const__InputArrayR_floatX(self.as_raw_TrainData(), vi, sidx.as_raw__InputArray(), values) }.into_result() } fn get_norm_cat_values(&self, vi: i32, sidx: &dyn core::ToInputArray, values: &mut i32) -> Result<()> { input_array_arg!(sidx); unsafe { sys::cv_ml_TrainData_getNormCatValues_const_int_const__InputArrayR_intX(self.as_raw_TrainData(), vi, sidx.as_raw__InputArray(), values) }.into_result() } fn get_default_subst_values(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_TrainData_getDefaultSubstValues_const(self.as_raw_TrainData()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } fn get_cat_count(&self, vi: i32) -> Result<i32> { unsafe { sys::cv_ml_TrainData_getCatCount_const_int(self.as_raw_TrainData(), vi) }.into_result() } /// Returns the vector of class labels /// /// The function returns vector of unique labels occurred in the responses. fn get_class_labels(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_TrainData_getClassLabels_const(self.as_raw_TrainData()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } fn get_cat_ofs(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_TrainData_getCatOfs_const(self.as_raw_TrainData()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } fn get_cat_map(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_TrainData_getCatMap_const(self.as_raw_TrainData()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } /// Splits the training data into the training and test parts /// ## See also /// TrainData::setTrainTestSplitRatio /// /// ## C++ default parameters /// * shuffle: true fn set_train_test_split(&mut self, count: i32, shuffle: bool) -> Result<()> { unsafe { sys::cv_ml_TrainData_setTrainTestSplit_int_bool(self.as_raw_mut_TrainData(), count, shuffle) }.into_result() } /// Splits the training data into the training and test parts /// /// The function selects a subset of specified relative size and then returns it as the training /// set. If the function is not called, all the data is used for training. Please, note that for /// each of TrainData::getTrain\* there is corresponding TrainData::getTest\*, so that the test /// subset can be retrieved and processed as well. /// ## See also /// TrainData::setTrainTestSplit /// /// ## C++ default parameters /// * shuffle: true fn set_train_test_split_ratio(&mut self, ratio: f64, shuffle: bool) -> Result<()> { unsafe { sys::cv_ml_TrainData_setTrainTestSplitRatio_double_bool(self.as_raw_mut_TrainData(), ratio, shuffle) }.into_result() } fn shuffle_train_test(&mut self) -> Result<()> { unsafe { sys::cv_ml_TrainData_shuffleTrainTest(self.as_raw_mut_TrainData()) }.into_result() } /// Returns matrix of test samples fn get_test_samples(&self) -> Result<core::Mat> { unsafe { sys::cv_ml_TrainData_getTestSamples_const(self.as_raw_TrainData()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } /// Returns vector of symbolic names captured in loadFromCSV() fn get_names(&self, names: &mut core::Vector::<String>) -> Result<()> { unsafe { sys::cv_ml_TrainData_getNames_const_vector_String_R(self.as_raw_TrainData(), names.as_raw_mut_VectorOfString()) }.into_result() } } impl dyn TrainData + '_ { pub fn missing_value() -> Result<f32> { unsafe { sys::cv_ml_TrainData_missingValue() }.into_result() } /// Extract from 1D vector elements specified by passed indexes. /// ## Parameters /// * vec: input vector (supported types: CV_32S, CV_32F, CV_64F) /// * idx: 1D index vector pub fn get_sub_vector(vec: &core::Mat, idx: &core::Mat) -> Result<core::Mat> { unsafe { sys::cv_ml_TrainData_getSubVector_const_MatR_const_MatR(vec.as_raw_Mat(), idx.as_raw_Mat()) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } /// Extract from matrix rows/cols specified by passed indexes. /// ## Parameters /// * matrix: input matrix (supported types: CV_32S, CV_32F, CV_64F) /// * idx: 1D index vector /// * layout: specifies to extract rows (cv::ml::ROW_SAMPLES) or to extract columns (cv::ml::COL_SAMPLES) pub fn get_sub_matrix(matrix: &core::Mat, idx: &core::Mat, layout: i32) -> Result<core::Mat> { unsafe { sys::cv_ml_TrainData_getSubMatrix_const_MatR_const_MatR_int(matrix.as_raw_Mat(), idx.as_raw_Mat(), layout) }.into_result().map(|r| unsafe { core::Mat::opencv_from_extern(r) } ) } /// Reads the dataset from a .csv file and returns the ready-to-use training data. /// /// ## Parameters /// * filename: The input file name /// * headerLineCount: The number of lines in the beginning to skip; besides the header, the /// function also skips empty lines and lines staring with `#` /// * responseStartIdx: Index of the first output variable. If -1, the function considers the /// last variable as the response /// * responseEndIdx: Index of the last output variable + 1. If -1, then there is single /// response variable at responseStartIdx. /// * varTypeSpec: The optional text string that specifies the variables' types. It has the /// format `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, variables from `n1 to n2` /// (inclusive range), `n3`, `n4 to n5` ... are considered ordered and `n6`, `n7 to n8` ... are /// considered as categorical. The range `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` /// should cover all the variables. If varTypeSpec is not specified, then algorithm uses the /// following rules: /// - all input variables are considered ordered by default. If some column contains has non- /// numerical values, e.g. 'apple', 'pear', 'apple', 'apple', 'mango', the corresponding /// variable is considered categorical. /// - if there are several output variables, they are all considered as ordered. Error is /// reported when non-numerical values are used. /// - if there is a single output variable, then if its values are non-numerical or are all /// integers, then it's considered categorical. Otherwise, it's considered ordered. /// * delimiter: The character used to separate values in each line. /// * missch: The character used to specify missing measurements. It should not be a digit. /// Although it's a non-numerical value, it surely does not affect the decision of whether the /// variable ordered or categorical. /// /// Note: If the dataset only contains input variables and no responses, use responseStartIdx = -2 /// and responseEndIdx = 0. The output variables vector will just contain zeros. /// /// ## C++ default parameters /// * response_start_idx: -1 /// * response_end_idx: -1 /// * var_type_spec: String() /// * delimiter: ',' /// * missch: '?' pub fn load_from_csv(filename: &str, header_line_count: i32, response_start_idx: i32, response_end_idx: i32, var_type_spec: &str, delimiter: i8, missch: i8) -> Result<core::Ptr::<dyn crate::ml::TrainData>> { extern_container_arg!(filename); extern_container_arg!(var_type_spec); unsafe { sys::cv_ml_TrainData_loadFromCSV_const_StringR_int_int_int_const_StringR_char_char(filename.opencv_as_extern(), header_line_count, response_start_idx, response_end_idx, var_type_spec.opencv_as_extern(), delimiter, missch) }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::ml::TrainData>::opencv_from_extern(r) } ) } /// Creates training data from in-memory arrays. /// /// ## Parameters /// * samples: matrix of samples. It should have CV_32F type. /// * layout: see ml::SampleTypes. /// * responses: matrix of responses. If the responses are scalar, they should be stored as a /// single row or as a single column. The matrix should have type CV_32F or CV_32S (in the /// former case the responses are considered as ordered by default; in the latter case - as /// categorical) /// * varIdx: vector specifying which variables to use for training. It can be an integer vector /// (CV_32S) containing 0-based variable indices or byte vector (CV_8U) containing a mask of /// active variables. /// * sampleIdx: vector specifying which samples to use for training. It can be an integer /// vector (CV_32S) containing 0-based sample indices or byte vector (CV_8U) containing a mask /// of training samples. /// * sampleWeights: optional vector with weights for each sample. It should have CV_32F type. /// * varType: optional vector of type CV_8U and size `<number_of_variables_in_samples> + /// <number_of_variables_in_responses>`, containing types of each input and output variable. See /// ml::VariableTypes. /// /// ## C++ default parameters /// * var_idx: noArray() /// * sample_idx: noArray() /// * sample_weights: noArray() /// * var_type: noArray() pub fn create(samples: &dyn core::ToInputArray, layout: i32, responses: &dyn core::ToInputArray, var_idx: &dyn core::ToInputArray, sample_idx: &dyn core::ToInputArray, sample_weights: &dyn core::ToInputArray, var_type: &dyn core::ToInputArray) -> Result<core::Ptr::<dyn crate::ml::TrainData>> { input_array_arg!(samples); input_array_arg!(responses); input_array_arg!(var_idx); input_array_arg!(sample_idx); input_array_arg!(sample_weights); input_array_arg!(var_type); unsafe { sys::cv_ml_TrainData_create_const__InputArrayR_int_const__InputArrayR_const__InputArrayR_const__InputArrayR_const__InputArrayR_const__InputArrayR(samples.as_raw__InputArray(), layout, responses.as_raw__InputArray(), var_idx.as_raw__InputArray(), sample_idx.as_raw__InputArray(), sample_weights.as_raw__InputArray(), var_type.as_raw__InputArray()) }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::ml::TrainData>::opencv_from_extern(r) } ) } }
48.59305
532
0.722219
ed269f5f0bd28d2899ec2af6ffbc42d0a92a5289
24,540
/// # Building a Function's CFG ([`Control-Flow-Graph`]) /// /// ## Motivation /// /// The motivation is that once we have a CFG of a function we can analyze its execution paths. /// We want that in order to be able to give Gas pricing for different execution paths. /// By having that we can pick the most expensive execution path and set it as the function price. /// /// The Gas pricing will be possible since building CFG could be called only after the validation of the Wasm program succeeded. /// (As a remainder - the validation makes sure there are no `loop / call_indirect` neither `recursions / call-cycles` in the code). /// /// ## Terminology /// /// 1. `CFG` - The term is a short for [`Control-Flow-Graph`]. In our context we refer to building a CFG for a single function. /// For more info - see the `Motivation` section above. /// /// 2. `Block` - usually it's called `Basic Block` but we'll just use the name `Block` here. /// A `Block` is essentially a `Node` in the `CFG`, and it has: /// /// * `id` - A unique `id` (see `BlockNum` in the code). /// * `ops` - A list of code instructions. The instructions are guaranteed not to be branching instructions. /// Branching instructions are the reasons for stopping appending `instructions` to the current `Block`, /// and creating a new `Blocks`. /// * `edges` - Connections between the Blocks in the `CFG`. Each Blocks keeps a list of its incoming edges and another for its outgoing one. /// There are two types of edges: `continuations` and `jumps`. For more info see later `Edge`, `Continuation` and `Jump` (bullets 5, 6 and 7 respectively). /// /// 3. `Scope` - Since Wasm has a structured control-flow (there are no `goto`), each instruction lives under its parent Scope. /// The analyses of each Scope start and end can be obtained by doing a simple static-analysis. /// Scopes, like in high-level programming languages can be nested. So Scope `A` can have a child Scope `B` that may have other children `Scope`s and so on. /// /// 4. `Depth` - The nesting-level of a `Scope`. /// If `Scope` A starts at `depth = D` and at some points starts a child scope `B` - then `B` starts at `depth = D + 1` /// Scope with `depth=0` is reserved to the function itself. It a design decisions that simplifies implementing the `return` and `unreachable` instructions /// in the same mechanism as real branching instructions (`br / br_if / br_table`). /// /// 5. `Edge` - Connections between `Block`. There are two kinds of `Edge`s: `Continuation` and `Jump`. /// /// 6. `Continuation` - While constructing the CFG, we create new `Block` and connect them to the previously closed ones. /// A continuation-edge (or `cont` for short) between Blocks `A` and `B` essentially says there exists a valid execution path from `A` to `B`. /// In that execution path, after we're finished executing the instructions of Block `A` we are allowed to proceed to Block `B`. /// Each continuation is accompanied with a `kind` - it main purpose is to assist with story-telling the reason for adding the continuation. /// (When debugging an emitted CFG, it's very handy to have more information). /// /// 7. `Jump` - In Wasm there are a couple of branching instructions: `br / br_if / br_table`. /// A branch instruction can result in `JUMP-ing to other locations in the code. Without going here about the nuances of each branching instruction, /// we want to draw a `Jump`-edges in the CFG between possible jumps. We are able to do that since there is no arbitrary `goto`(s) in Wasm. /// The control-flow is structured and we can determine the targets of each branch. /// /// Note: we treat `return` and `unreachable` the same as branch. We look at it as "jumping" out of the function. /// This plays well with our design, and that's the reason why we reserve `Scope #0` to the function's entry. /// /// /// ## Algorithm /// /// 1. Scan the function's instruction one at a time. /// 1.1 Mark `op` as the current scanned instruction. /// 1.2 If `op` isn't `IF / ELSE / END` or a `jump` instruction: /// 1.2.1 Append `op` to the `current block` instructions /// /// 2. If `op` is a `jump` instruction: /// 2.1 Mark the current block as `old current` /// 2.2 Create a new block and mark it as `new current` /// 2.3 For each possible jump: (`br_table` may contain multiple `labels`) /// 2.3.1 `target_depth` <- jump target `Depth` /// 2.3.2 Add an unresolved jump with `origin = old current` and associate to `depth = target_depth` /// 2.4 If `op` is a conditional-jump (in other words `br_if`) /// 2.4.1 Add `default` continuation-edge with `origin = old current` to `target = new current` /// /// 3. If `op` is an `IF` instruction` /// 3.1 Mark the current block as `old current` /// 3.2 Create a new block and mark it as `new current` /// 3.3 Add a `on-if-true` continuation-edge between blocks `old current` to `new current` /// 3.4 Add unresolved-continuations associated with `depth = current_depth`: /// 3.4.1 Add an unresolved continuation for kind `after-if` with `origin = old current` /// 3.4.2 Add an unresolved continuation for kind `on-if-false` with `origin = old current` /// /// Note: At this stage we don't know whether the `IF`-block will have an `ELSE` block - so we prepare for any case. /// 3.5 Increment the `current depth` /// /// 4. If `op` is an `ELSE` instruction: /// 4.1 Mark the current block as `old current` /// 4.2 Create a new block and mark it as `new current` /// 4.3 `parent_depth <- current_depth - 1` /// 4.3 Extract the `origin` of the `on-if-false` unresolved continuation associated with `depth = parent_depth` /// Note: such unresolved continuation exists (see `3.4.2` above). /// 4.4 Add unresolved-continuations associated with `depth = current_depth`: /// 4.4.1 Add an unresolved continuation for kind `after-then` with `origin = old current` /// 4.5 Resolve the unresolved-continuation of kind `on-if-false` (created at `3.4.2`) /// 4.5.1 Assign it `target = new current`. /// 4.5.2 Add a jump-edge with the `origin` and `target` of the resolved continuation of 4.5.1 /// /// 5. If `op` is an `END` instruction: /// 5.1 `parent_depth <- current_depth - 1` /// 5.2 If we're ending an existing `ELSE` block: /// 5.2.1 Mark the current block as `old current` /// 5.2.2 Create a new block and mark it as `new current` /// 5.2.3 Extract the `after-then` block under `unresolved-cont[parent_depth]` /// Note: the `after-then` block has been assigned previously when we visited the /// `ELSE` instruction associated with the current `END` (see `4.4` above). /// 5.2.4 Add `after-else` continuation-edge between `old current` to `new current`. /// 5.2.5 Add `after-then` continuation-edge between `after-then` block to `new current`. /// 5.2.6 goto 5.5 /// 5.3 If we're ending a `THEN` block: /// 5.3.1 Mark the current block as `old current` /// 5.3.2 Create a new block and mark it as `new current` /// 5.3.3 Extract the `after-if` block under `unresolved-cont[parent_depth]` /// 5.3.4 Add `after-else` continuation-edge between `old current` to `new current`. /// 5.3.5 Add `on-if-false` continuation-edge between `after-if` block to `new current`. /// 5.3.6 goto 5.5 /// 5.4 Else (i.e `END`ing a general `Block` but not an `ELSE` or an `THEN`) /// 5.4.1 goto 5.5 /// 5.5 Decrement the `current depth` /// 5.6 Resolve `unresolved-jumps` associated with the `current depth` (after the decrement): /// 5.6.1 If we landed here after 5.4 (i.e NOT 5.2 or 5.3) /// * Mark the current block as `old current` /// * Create a new block and mark it as `new current` /// * Add `default` continuation between `old current` to `new current`. /// 5.6.2 For each unresolved jump `jump`: /// * `target <- current block` /// * Add a `jump` edge between the `jump`'s `origin` to the `target` /// /// 6. If `op` is a `BLOCK` instruction: /// 6.1 Increment the `current depth` /// /// [`Control-Flow-Graph`]: https://en.wikipedia.org/wiki/Control-flow_graph /// /// /// ### Functions CFG Illustrations /// /// In order to make it easier to grasp the above algorithm - here a couple of illustrations /// of how different instructions are translated when building the CFG. /// /// These illustrations are not exhaustive - there are other interesting cases (for example nested `if-then` blocks). /// But understanding them should give a good background when trying to understand the code of the CFG building. /// /// Moreover, there are tests under `tests/cfg.rs` that compare against CFGs described under `tests/graphs. /// Each file under `tests/graphs` is easy to draw as a graph on paper when debugging tests. /// /// a) Illustration for handling an `if-then` (without `else`): /// /// +----------------------+ /// | block | depth = `d` /// +----------------------+ /// || || ***************** /// || || * `on-if-false` * /// || ||_______________ *****************_____ /// || |_____________________________________ | /// || || /// || **************** || /// || * `on-if-true` * || /// || **************** || /// || || /// \/ || /// +---------------------+ || /// | first `then` block | depth = `d + 1` || /// +---------------------+ || /// || || /// || || /// \/ || /// .... (more blocks) || /// || || /// || || /// \/ || /// +--------------------+ || /// | last `then` block | depth = `d + 1` || /// +--------------------+ || /// || || /// || || /// || **************** || /// || * `after-then` * || /// || **************** || /// || _______________________________________|| /// || | ______________________________________| /// || || /// || || /// \/ \/ /// +-------------------------------+ /// | `after-if` block | depth = `d` /// +-------------------------------+ /// /// /// b) Illustration for handling an `if-then-else`: /// /// +----------------------+ /// | block | depth = `d` /// +----------------------+ /// || || ***************** /// || || * `on-if-false` * /// || ||_______________ *****************______________ /// || |______________________________________________ | /// || || /// || || /// || || /// || || /// || **************** || /// || * `on-if-true` * || /// || **************** || /// || || /// \/ \/ /// +---------------------+ +---------------------+ /// | first `then` block | depth = `d + 1` | first `else` block | depth = `d + 1` /// +---------------------+ +---------------------+ /// || || /// || || /// \/ \/ /// .... (more blocks) .... (more blocks) /// || || /// || || /// \/ \/ /// +--------------------+ +---------------------+ /// | last `then` block | depth = `d + 1` | last `else` block | depth = `d + 1` /// +--------------------+ +---------------------+ /// || || /// || || /// || || /// || **************** || **************** /// || * `after-then` * || * `after-else` * /// || **************** || **************** /// || || /// || ________________________________________________|| /// || | _______________________________________________| /// || || /// || || /// || || /// || || /// || || /// || || /// \/ \/ /// +---------------------------+ /// | `after-if` block | depth = `d` /// +---------------------------+ /// /// /// /// c) Illustration for handling `br_if 0`: /// /// +----------------------+ /// | block | depth = `d` /// +----------------------+ /// || || /// || || ******************* /// || || * `default` cont. * /// || ||___________*******************________________ /// || |______________________________________________ | /// || || /// || || /// || ********** || /// || * `jump` * +------------+ /// || ********** | block | depth = `d` /// || +------------+ /// || ________________________________________________|| /// || | _______________________________________________| /// || || /// || || /// || || /// \/ \/ /// +-----------------------------------+ /// | `branch target` block | depth = `d - 1` /// +-----------------------------------+ /// /// /// d) illustration for handling `br 0`: /// /// +----------------------+ /// | block | depth = `d` /// +----------------------+ /// || /// || /// || /// || ********** /// || * `jump` * +------------+ /// || ********** | block | depth = `d` /// || +------------+ (unreachable) /// || || /// || || /// || || /// ||_____________________ ____________________|| /// |____________________ | | ___________________| /// || || ******************* /// || || * `default` cont. * /// || || ******************* /// || || /// || || /// || || /// \/ \/ /// +-----------------------------------+ /// | `branch target` block | depth = `d - 1` /// +-----------------------------------+ /// /// /// use parity_wasm::elements::Instruction; use std::fmt::{self, Debug}; use svm_program::{Function, Op}; use crate::{CallGraph, Gas}; mod block; mod builder; mod cont; mod depth; mod edge; mod jump; pub use block::{Block, BlockBuilder, BlockNum, BlockRef}; pub use builder::CFGBuilder; pub use cont::{Cont, ContKind, DepthUnresolvedCont}; pub use depth::Depth; pub use edge::Edge; pub use jump::{Jump, UnresolvedJump, WasmJump}; /// This is the API that should be used externally when we want to feed with a `Function` and get back its `CFG` pub fn build_func_cfg<'f>(func: &'f Function<'f>) -> CFG<'f> { // println!("Starting to build CFG for function #{:?}", func.index().0); let mut builder = CFGBuilder::new(); debug_assert_eq!(builder.current_block(), BlockNum(1)); debug_assert_eq!(builder.current_depth(), Depth(1)); for op in func.iter() { match op_kind(&op) { OpKind::Jump => on_jump(op, &mut builder), OpKind::If => on_if(op, &mut builder), OpKind::Else => on_else(op, &mut builder), OpKind::ScopeStart => on_scope_start(op, &mut builder), OpKind::ScopeEnd => on_scope_end(op, &mut builder), OpKind::Other => on_general_op(op, &mut builder), } } // println!( // "Finalizing building the CFG for function #{:?}", // func.index().0 // ); builder.build() } enum OpKind { Jump, ScopeStart, ScopeEnd, If, Else, Other, } /// The `on_branch` is getting called upon hitting a Wasm branch instruction (can be one of: `br / br_if / br_table`). /// /// There are two exceptions - the `return` and `unreachable` Wasm instruction (immediately returning / halting the current function). /// The `return / unreachable` are kind of branching instructions - we can view them as **jumping out** of the function. /// /// In order to simplify the design, we count the `scope depth` starting at 1, when the function's itself is considered to have `depth = 0` /// This will play well with unifying the `branch`-ing and `return` Wasm instructions under a single flow. /// /// This is the flow for handling a branch (or `return / unreachable` as explained above.): /// /// 1. Extract the `target depth` out of the Wasm branch. /// We compute `target_depth = current_depth - branch-label - 1` /// /// For `br_table` we compute a `target depth` for each `table item` and for the `default` as well. /// For `return / unreachable` instructions we will always set `target depth = 0` /// /// 2. We add an `unresolved jump` with `origin = current_block` and `depth = target_depth` (derived at 2.) /// Then we associate the `unresolved jump` with the `current depth - 1` /// /// Once we'll unwind back to the target scope depth, we'll have a chance to resolve the jumps. /// The reason we can't right away resolve a `jump` (and that's why we name it `unresolved jump`) /// is because we don't know the target `block` at that point. /// /// 3. Create a new block and make it the new `current block` /// fn on_jump<'f>(op: Op<'f>, builder: &mut CFGBuilder<'f>) { fn target_depth(current: Depth, label: u32) -> Depth { debug_assert!(current > Depth(label)); current - label - 1u32 } let depth = builder.current_depth(); let origin = builder.current_block(); let origin_next = builder.create_block(); let mut conditional = true; let branch: WasmJump = op.raw().into(); match branch { WasmJump::Return | WasmJump::Unreachable => { // For `return` and `unreachable` we explicitly set `target_depth = 0` (i.e jumping off the function) add_jump(origin, Depth(0), builder); } WasmJump::Br(label) => { add_jump(origin, target_depth(depth, label), builder); } WasmJump::BrIf(label) => { add_jump(origin, target_depth(depth, label), builder); add_continuation(origin, origin_next, builder); } WasmJump::BrTable(table) => { // Adding an unresolved jump for the `default` label add_jump(origin, target_depth(depth, table.default()), builder); // Adding an unresolved jump for each `label` for label in table.iter() { add_jump(origin, target_depth(depth, *label), builder); } } } } #[inline] fn add_jump<'f>(origin: BlockNum, target_depth: Depth, builder: &mut CFGBuilder<'f>) { builder.add_jump(origin, target_depth); } #[inline] fn add_continuation(origin: BlockNum, target: BlockNum, builder: &mut CFGBuilder) { builder.add_cont(origin, target, ContKind::Default); } #[inline] fn on_if<'f>(op: Op<'f>, builder: &mut CFGBuilder<'f>) { builder.enter_if(op); } #[inline] fn on_else<'f>(op: Op<'f>, builder: &mut CFGBuilder<'f>) { builder.enter_else(op); } #[inline] fn on_scope_start<'f>(op: Op<'f>, builder: &mut CFGBuilder<'f>) { builder.enter_scope(Some(op)); } #[inline] fn on_scope_end<'f>(op: Op<'f>, builder: &mut CFGBuilder<'f>) { builder.exit_scope(op); } /// Should be called as a fallback handler when the last read Wasm instruction didn't match on any of the above rules. /// In other words, when we didn't read any `branch` instruction neither a new-scope/end-scope we should arrive here. #[inline] fn on_general_op<'f>(op: Op<'f>, builder: &mut CFGBuilder<'f>) { builder.append(op); } #[inline] fn op_kind(op: &Op) -> OpKind { let op = op.raw(); if is_jump(op) { OpKind::Jump } else if is_if(op) { OpKind::If } else if is_else(op) { OpKind::Else } else if is_scope_start(op) { OpKind::ScopeStart } else if is_scope_end(op) { OpKind::ScopeEnd } else { OpKind::Other } } #[inline] fn is_jump(op: &Instruction) -> bool { matches!( op, Instruction::Br(..) | Instruction::BrIf(..) | Instruction::BrTable(..) | Instruction::Return | Instruction::Unreachable ) } #[inline] fn is_scope_start(op: &Instruction) -> bool { matches!(op, Instruction::Block(..)) } #[inline] fn is_if(op: &Instruction) -> bool { matches!(op, Instruction::If(..)) } #[inline] fn is_scope_end(op: &Instruction) -> bool { matches!(op, Instruction::End) } #[inline] fn is_else(op: &Instruction) -> bool { matches!(op, Instruction::Else) } /// Since an `Block` is self-contained in the sense it has all its relevant data /// The `CFG` is merely a container of its `Block`s. #[derive(PartialEq)] pub struct CFG<'f> { /// The `Block`s of the `CFG` pub blocks: Vec<Block<'f>>, } impl<'f> CFG<'f> { /// Returns a slice to the `CFG` blocks pub fn blocks(&self) -> &[Block] { &self.blocks } /// Borrows a `Block` with the specified `block_num` parameter pub fn get_block(&self, block_num: BlockNum) -> &Block { let num = block_num.0 as usize; &self.blocks[num] } /// The `BlockNum` of the entry `Node` starting each flow pub fn start(&self) -> BlockNum { BlockNum(0) } /// The `BlockNum` of the last created `Node` ending each flow pub fn end(&self) -> BlockNum { let len = self.blocks().len(); debug_assert!(len > 0); let end = len - 1; BlockNum(end) } } impl<'f> Debug for CFG<'f> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { for block in self.blocks.iter() { block.fmt(f)?; writeln!(f, "")?; } Ok(()) } }
44.37613
173
0.479707
2331e3f08a716deaf2743e6fe6587095bbd80490
409
use std::time::{Duration, Instant}; pub struct Timer { start: Instant, expires_at: Instant, } impl Timer { pub fn new(time_out_millis: u64) -> Self { let now = Instant::now(); Timer { start: now, expires_at: now + Duration::from_millis(time_out_millis), } } pub fn is_done(&self) -> bool { self.expires_at > Instant::now() } }
20.45
69
0.555012
3a1feda9733a174054c623283ee8e7c108cac04e
527
const STARTING_MISSLES: i32 = 8; const READY_AMOUNT: i32 = 2; // fn main() { // let missiles = STARTING_MISSLES; // let ready = READY_AMOUNT; // println!("Firing {} of my {} missiles", ready, missiles); // let missiles = missiles - ready; // println!("{} missiles left", missiles); // } //EXTRA CHALLENGE fn main() { let (missiles, ready):(i32, i32) = (STARTING_MISSLES, READY_AMOUNT); println!("Firing {} of my {} missiles", ready, missiles); println!("{} missiles left", missiles - ready); }
31
72
0.622391
8a38d453c5d1186f7eb4053a1af18bae42107027
311
#[macro_use] extern crate afl; use sailfish::runtime as sf; use sf::Render; fn main() { fuzz!(|data: &[u8]| { // HTML escaping if let Ok(feed) = std::str::from_utf8(data) { let mut buf = sf::Buffer::new(); let _ = feed.render_escaped(&mut buf); } }); }
19.4375
53
0.5209
1a82ebbd72b382a3409438febff796885523a983
617
/* * Copyright 2022. gudaoxuri * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ pub mod cache_client;
34.277778
75
0.73906
b9799aa17790e3d6459431ca0569415e8b01d0d6
617
use serde::Deserialize; use std::collections::HashSet; #[derive(Deserialize)] pub struct Criteria { pub user: Vec<toml::Value>, #[serde(skip)] pub userid: HashSet<u32>, name: HashSet<String>, pub cmd: HashSet<String>, } impl Criteria { pub fn meets(&self, ruid: &u32, name: &String, cmd: &String) -> bool { if (!self.userid.is_empty() && !self.userid.contains(ruid)) || (!self.name.is_empty() && !self.name.contains(name)) { return false; } if !self.cmd.is_empty() { for cmdl in &self.cmd { if cmd.contains(cmdl) { return true; } } return false; } return true; } }
18.147059
71
0.625608
722fe601ee003b1e8ba14de6bd3f9ee3f48f723a
942
use objr::bindings::*; use crate::MTLPixelFormat; objc_class! { pub struct MTLRenderPipelineColorAttachmentDescriptor { @class(MTLRenderPipelineColorAttachmentDescriptor) } } #[allow(non_snake_case)] impl MTLRenderPipelineColorAttachmentDescriptor { pub fn set_pixelFormat(&mut self, pixelFormat: MTLPixelFormat,pool: &ActiveAutoreleasePool) { unsafe { use crate::mtltexturedescriptor::MTLTextureDescriptorSelectors; Self::perform_primitive(self, Sel::setPixelFormat_(), pool, (pixelFormat.field(),)) } } } #[test] fn smoke_test() { autoreleasepool(|pool| { let mut d = unsafe{ MTLRenderPipelineColorAttachmentDescriptor::class().alloc_init(pool).assume_mut() }; d.set_pixelFormat( MTLPixelFormat::A8Unorm,pool); let description = d.description(pool).to_str(pool).to_owned(); assert!(description.contains("MTLPixelFormatA8Unorm")); }) }
36.230769
112
0.708068
e42ce94d94467a2c5b2f6003b252f22b3693151d
8,283
#[doc = "Reader of register INVERT"] pub type R = crate::R<u32, super::INVERT>; #[doc = "Writer for register INVERT"] pub type W = crate::W<u32, super::INVERT>; #[doc = "Register INVERT `reset()`'s with value 0"] impl crate::ResetValue for super::INVERT { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `PWM0INV`"] pub type PWM0INV_R = crate::R<bool, bool>; #[doc = "Write proxy for field `PWM0INV`"] pub struct PWM0INV_W<'a> { w: &'a mut W, } impl<'a> PWM0INV_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01); self.w } } #[doc = "Reader of field `PWM1INV`"] pub type PWM1INV_R = crate::R<bool, bool>; #[doc = "Write proxy for field `PWM1INV`"] pub struct PWM1INV_W<'a> { w: &'a mut W, } impl<'a> PWM1INV_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1); self.w } } #[doc = "Reader of field `PWM2INV`"] pub type PWM2INV_R = crate::R<bool, bool>; #[doc = "Write proxy for field `PWM2INV`"] pub struct PWM2INV_W<'a> { w: &'a mut W, } impl<'a> PWM2INV_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2); self.w } } #[doc = "Reader of field `PWM3INV`"] pub type PWM3INV_R = crate::R<bool, bool>; #[doc = "Write proxy for field `PWM3INV`"] pub struct PWM3INV_W<'a> { w: &'a mut W, } impl<'a> PWM3INV_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3); self.w } } #[doc = "Reader of field `PWM4INV`"] pub type PWM4INV_R = crate::R<bool, bool>; #[doc = "Write proxy for field `PWM4INV`"] pub struct PWM4INV_W<'a> { w: &'a mut W, } impl<'a> PWM4INV_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4); self.w } } #[doc = "Reader of field `PWM5INV`"] pub type PWM5INV_R = crate::R<bool, bool>; #[doc = "Write proxy for field `PWM5INV`"] pub struct PWM5INV_W<'a> { w: &'a mut W, } impl<'a> PWM5INV_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5); self.w } } #[doc = "Reader of field `PWM6INV`"] pub type PWM6INV_R = crate::R<bool, bool>; #[doc = "Write proxy for field `PWM6INV`"] pub struct PWM6INV_W<'a> { w: &'a mut W, } impl<'a> PWM6INV_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 6)) | (((value as u32) & 0x01) << 6); self.w } } #[doc = "Reader of field `PWM7INV`"] pub type PWM7INV_R = crate::R<bool, bool>; #[doc = "Write proxy for field `PWM7INV`"] pub struct PWM7INV_W<'a> { w: &'a mut W, } impl<'a> PWM7INV_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7); self.w } } impl R { #[doc = "Bit 0 - Invert MnPWM0 Signal"] #[inline(always)] pub fn pwm0inv(&self) -> PWM0INV_R { PWM0INV_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - Invert MnPWM1 Signal"] #[inline(always)] pub fn pwm1inv(&self) -> PWM1INV_R { PWM1INV_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 2 - Invert MnPWM2 Signal"] #[inline(always)] pub fn pwm2inv(&self) -> PWM2INV_R { PWM2INV_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 3 - Invert MnPWM3 Signal"] #[inline(always)] pub fn pwm3inv(&self) -> PWM3INV_R { PWM3INV_R::new(((self.bits >> 3) & 0x01) != 0) } #[doc = "Bit 4 - Invert MnPWM4 Signal"] #[inline(always)] pub fn pwm4inv(&self) -> PWM4INV_R { PWM4INV_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bit 5 - Invert MnPWM5 Signal"] #[inline(always)] pub fn pwm5inv(&self) -> PWM5INV_R { PWM5INV_R::new(((self.bits >> 5) & 0x01) != 0) } #[doc = "Bit 6 - Invert MnPWM6 Signal"] #[inline(always)] pub fn pwm6inv(&self) -> PWM6INV_R { PWM6INV_R::new(((self.bits >> 6) & 0x01) != 0) } #[doc = "Bit 7 - Invert MnPWM7 Signal"] #[inline(always)] pub fn pwm7inv(&self) -> PWM7INV_R { PWM7INV_R::new(((self.bits >> 7) & 0x01) != 0) } } impl W { #[doc = "Bit 0 - Invert MnPWM0 Signal"] #[inline(always)] pub fn pwm0inv(&mut self) -> PWM0INV_W { PWM0INV_W { w: self } } #[doc = "Bit 1 - Invert MnPWM1 Signal"] #[inline(always)] pub fn pwm1inv(&mut self) -> PWM1INV_W { PWM1INV_W { w: self } } #[doc = "Bit 2 - Invert MnPWM2 Signal"] #[inline(always)] pub fn pwm2inv(&mut self) -> PWM2INV_W { PWM2INV_W { w: self } } #[doc = "Bit 3 - Invert MnPWM3 Signal"] #[inline(always)] pub fn pwm3inv(&mut self) -> PWM3INV_W { PWM3INV_W { w: self } } #[doc = "Bit 4 - Invert MnPWM4 Signal"] #[inline(always)] pub fn pwm4inv(&mut self) -> PWM4INV_W { PWM4INV_W { w: self } } #[doc = "Bit 5 - Invert MnPWM5 Signal"] #[inline(always)] pub fn pwm5inv(&mut self) -> PWM5INV_W { PWM5INV_W { w: self } } #[doc = "Bit 6 - Invert MnPWM6 Signal"] #[inline(always)] pub fn pwm6inv(&mut self) -> PWM6INV_W { PWM6INV_W { w: self } } #[doc = "Bit 7 - Invert MnPWM7 Signal"] #[inline(always)] pub fn pwm7inv(&mut self) -> PWM7INV_W { PWM7INV_W { w: self } } }
28.6609
84
0.536158